summaryrefslogtreecommitdiff
path: root/i386/i386at/model_dep.c
diff options
context:
space:
mode:
Diffstat (limited to 'i386/i386at/model_dep.c')
-rw-r--r--i386/i386at/model_dep.c310
1 files changed, 50 insertions, 260 deletions
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 04cf695..e1ad794 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -49,6 +49,7 @@
#include <kern/mach_clock.h>
#include <kern/printf.h>
#include <kern/startup.h>
+#include <kern/thread.h>
#include <sys/time.h>
#include <sys/types.h>
#include <vm/vm_page.h>
@@ -70,6 +71,12 @@
#include <i386at/rtc.h>
#include <i386at/model_dep.h>
#include <i386at/acpihalt.h>
+#define multiboot_module x15_multiboot_module
+#define multiboot_info x15_multiboot_info
+#include <i386/x15/multiboot.h>
+#include <i386at/biosmem.h>
+#undef multiboot_module
+#undef multiboot_info
#ifdef MACH_XEN
#include <xen/console.h>
#include <xen/store.h>
@@ -127,27 +134,13 @@ struct multiboot_info boot_info;
/* Command line supplied to kernel. */
char *kernel_cmdline = "";
-/* This is used for memory initialization:
- it gets bumped up through physical memory
- that exists and is not occupied by boot gunk.
- It is not necessarily page-aligned. */
-static vm_offset_t avail_next
-#ifndef MACH_HYP
- = RESERVED_BIOS /* XX end of BIOS data area */
-#endif /* MACH_HYP */
- ;
-
-/* Possibly overestimated amount of available memory
- still remaining to be handed to the VM system. */
-static vm_size_t avail_remaining;
-
extern char version[];
/* If set, reboot the system on ctrl-alt-delete. */
boolean_t rebootflag = FALSE; /* exported to kdintr */
-/* XX interrupt stack pointer and highwater mark, for locore.S. */
-vm_offset_t int_stack_top, int_stack_high;
+/* Interrupt stack. */
+vm_offset_t int_stack_top, int_stack_base;
#ifdef LINUX_DEV
extern void linux_init(void);
@@ -275,7 +268,8 @@ void db_reset_cpu(void)
halt_all_cpus(1);
}
-
+#if 0
+/* XXX: Port XEN bits to biosmem. */
/*
* Compute physical memory size and other parameters.
*/
@@ -359,6 +353,7 @@ mem_size_init(void)
- RESERVED_BIOS);
#endif /* MACH_HYP */
}
+#endif /* 0 */
/*
* Basic PC VM initialization.
@@ -370,7 +365,7 @@ i386at_init(void)
/* XXX move to intel/pmap.h */
extern pt_entry_t *kernel_page_dir;
int nb_direct, i;
- vm_offset_t addr, delta;
+ vm_offset_t delta;
/*
* Initialize the PIC prior to any possible call to an spl.
@@ -384,44 +379,7 @@ i386at_init(void)
/*
* Find memory size parameters.
*/
- mem_size_init();
-
-#ifdef MACH_XEN
- kernel_cmdline = (char*) boot_info.cmd_line;
-#else /* MACH_XEN */
- /* Copy content pointed by boot_info before losing access to it when it
- * is too far in physical memory. */
- if (boot_info.flags & MULTIBOOT_CMDLINE) {
- int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
- assert(init_alloc_aligned(round_page(len), &addr));
- kernel_cmdline = (char*) phystokv(addr);
- memcpy(kernel_cmdline, (void *)phystokv(boot_info.cmdline), len);
- boot_info.cmdline = addr;
- }
-
- if (boot_info.flags & MULTIBOOT_MODS) {
- struct multiboot_module *m;
- int i;
-
- assert(init_alloc_aligned(round_page(boot_info.mods_count * sizeof(*m)), &addr));
- m = (void*) phystokv(addr);
- memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
- boot_info.mods_addr = addr;
-
- for (i = 0; i < boot_info.mods_count; i++) {
- vm_size_t size = m[i].mod_end - m[i].mod_start;
- assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
- m[i].mod_start = addr;
- m[i].mod_end = addr + size;
-
- size = strlen((char*) phystokv(m[i].string)) + 1;
- assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
- m[i].string = addr;
- }
- }
-#endif /* MACH_XEN */
+ biosmem_bootstrap((struct multiboot_raw_info *) &boot_info);
/*
* Initialize kernel physical map, mapping the
@@ -485,10 +443,42 @@ i386at_init(void)
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
#endif /* MACH_PV_PAGETABLES */
- /* Interrupt stacks are allocated in physical memory,
- while kernel stacks are allocated in kernel virtual memory,
- so phys_last_addr serves as a convenient dividing point. */
- int_stack_high = phystokv(phys_last_addr);
+ /* Load the pages into the vm_page module. */
+ biosmem_setup();
+
+ /* Initialize physical memory management. */
+ vm_page_setup();
+ vm_page_info();
+
+ /* Initialize the slab allocator. */
+ slab_bootstrap();
+ slab_init();
+
+ {
+#ifdef MACH_XEN
+ kernel_cmdline = (char*) boot_info.cmd_line;
+#else /* MACH_XEN */
+ /* Copy content pointed by boot_info before losing
+ * access to it when it is too far in physical
+ * memory. */
+ if (boot_info.flags & MULTIBOOT_CMDLINE) {
+ size_t len = strlen((const char *)
+ phystokv(boot_info.cmdline));
+ kernel_cmdline = (char *) kalloc(len);
+ memcpy(kernel_cmdline,
+ (const void *) phystokv(boot_info.cmdline),
+ len + 1);
+ }
+#endif
+ }
+
+ /* Bootstrap the thread module so that we can allocate an
+ interrupt stack. */
+ thread_bootstrap();
+
+ /* Allocate interrupt stack. */
+ int_stack_base = thread_bootstrap_stack_alloc();
+ int_stack_top = int_stack_base + KERNEL_STACK_SIZE - 4;
/*
* Initialize and activate the real i386 protected-mode structures.
@@ -534,11 +524,6 @@ i386at_init(void)
#ifdef MACH_XEN
hyp_p2m_init();
#endif /* MACH_XEN */
-
- /* XXX We'll just use the initialization stack we're already running on
- as the interrupt stack for now. Later this will have to change,
- because the init stack will get freed after bootup. */
- asm("movl %%esp,%0" : "=m" (int_stack_top));
}
/*
@@ -704,201 +689,6 @@ resettodr(void)
writetodc();
}
-unsigned int pmap_free_pages(void)
-{
- return atop(avail_remaining);
-}
-
-/* Always returns page-aligned regions. */
-boolean_t
-init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
-{
- vm_offset_t addr;
-
-#ifdef MACH_HYP
- /* There is none */
- if (!avail_next)
- avail_next = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
-#else /* MACH_HYP */
- extern char start[], end[];
- int i;
- static int wrapped = 0;
-
- /* Memory regions to skip. */
- vm_offset_t cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
- ? boot_info.cmdline : 0;
- vm_offset_t cmdline_end_pa = cmdline_start_pa
- ? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
- : 0;
- vm_offset_t mods_start_pa = boot_info.flags & MULTIBOOT_MODS
- ? boot_info.mods_addr : 0;
- vm_offset_t mods_end_pa = mods_start_pa
- ? mods_start_pa
- + boot_info.mods_count * sizeof(struct multiboot_module)
- : 0;
-
- retry:
-#endif /* MACH_HYP */
-
- /* Page-align the start address. */
- avail_next = round_page(avail_next);
-
-#ifndef MACH_HYP
- /* Start with memory above 16MB, reserving the low memory for later. */
- /* Don't care on Xen */
- if (!wrapped && phys_last_addr > 16 * 1024*1024)
- {
- if (avail_next < 16 * 1024*1024)
- avail_next = 16 * 1024*1024;
- else if (avail_next == phys_last_addr)
- {
- /* We have used all the memory above 16MB, so now start on
- the low memory. This will wind up at the end of the list
- of free pages, so it should not have been allocated to any
- other use in early initialization before the Linux driver
- glue initialization needs to allocate low memory. */
- avail_next = RESERVED_BIOS;
- wrapped = 1;
- }
- }
-#endif /* MACH_HYP */
-
- /* Check if we have reached the end of memory. */
- if (avail_next ==
- (
-#ifndef MACH_HYP
- wrapped ? 16 * 1024*1024 :
-#endif /* MACH_HYP */
- phys_last_addr))
- return FALSE;
-
- /* Tentatively assign the current location to the caller. */
- addr = avail_next;
-
- /* Bump the pointer past the newly allocated region
- and see where that puts us. */
- avail_next += size;
-
-#ifndef MACH_HYP
- /* Skip past the I/O and ROM area. */
- if (boot_info.flags & MULTIBOOT_MEM_MAP)
- {
- struct multiboot_mmap *map, *map_end, *current = NULL, *next = NULL;
- unsigned long long minimum_next = ~0ULL;
-
- map = (void*) phystokv(boot_info.mmap_addr);
- map_end = (void*) map + boot_info.mmap_count;
-
- /* Find both our current map, and the next one */
- while (map + 1 <= map_end)
- {
- if (map->Type == MB_ARD_MEMORY)
- {
- unsigned long long start = map->BaseAddr;
- unsigned long long end = start + map->Length;;
-
- if (start <= addr && avail_next <= end)
- {
- /* Ok, fits in the current map */
- current = map;
- break;
- }
- else if (avail_next <= start && start < minimum_next)
- {
- /* This map is not far from avail_next */
- next = map;
- minimum_next = start;
- }
- }
- map = (void*) map + map->size + sizeof(map->size);
- }
-
- if (!current) {
- /* Area does not fit in the current map, switch to next
- * map if any */
- if (!next || next->BaseAddr >= phys_last_addr)
- {
- /* No further reachable map, we have reached
- * the end of memory, but possibly wrap around
- * 16MiB. */
- avail_next = phys_last_addr;
- goto retry;
- }
-
- /* Start from next map */
- avail_next = next->BaseAddr;
- goto retry;
- }
- }
- else if ((avail_next > (boot_info.mem_lower * 0x400)) && (addr < 0x100000))
- {
- avail_next = 0x100000;
- goto retry;
- }
-
- /* Skip our own kernel code, data, and bss. */
- if ((phystokv(avail_next) > (vm_offset_t)start) && (phystokv(addr) < (vm_offset_t)end))
- {
- avail_next = _kvtophys(end);
- goto retry;
- }
-
- /* Skip any areas occupied by valuable boot_info data. */
- if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
- {
- avail_next = cmdline_end_pa;
- goto retry;
- }
- if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
- {
- avail_next = mods_end_pa;
- goto retry;
- }
- if ((phystokv(avail_next) > kern_sym_start) && (phystokv(addr) < kern_sym_end))
- {
- avail_next = _kvtophys(kern_sym_end);
- goto retry;
- }
- if (boot_info.flags & MULTIBOOT_MODS)
- {
- struct multiboot_module *m = (struct multiboot_module *)
- phystokv(boot_info.mods_addr);
- for (i = 0; i < boot_info.mods_count; i++)
- {
- if ((avail_next > m[i].mod_start)
- && (addr < m[i].mod_end))
- {
- avail_next = m[i].mod_end;
- goto retry;
- }
- /* XXX string */
- }
- }
-#endif /* MACH_HYP */
-
- avail_remaining -= size;
-
- *addrp = addr;
- return TRUE;
-}
-
-boolean_t pmap_next_page(vm_offset_t *addrp)
-{
- return init_alloc_aligned(PAGE_SIZE, addrp);
-}
-
-/* Grab a physical page:
- the standard memory allocation mechanism
- during system initialization. */
-vm_offset_t
-pmap_grab_page(void)
-{
- vm_offset_t addr;
- if (!pmap_next_page(&addr))
- panic("Not enough memory to initialize Mach");
- return addr;
-}
-
boolean_t pmap_valid_page(vm_offset_t x)
{
/* XXX is this OK? What does it matter for? */