summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
Diffstat (limited to 'i386')
-rw-r--r--i386/Makefrag.am2
-rw-r--r--i386/i386/db_trace.c4
-rw-r--r--i386/i386/locore.S14
-rw-r--r--i386/i386/vm_param.h61
-rw-r--r--i386/i386at/biosmem.c71
-rw-r--r--i386/i386at/biosmem.h4
-rw-r--r--i386/i386at/model_dep.c310
-rw-r--r--i386/i386at/model_dep.h14
-rw-r--r--i386/include/mach/i386/vm_types.h9
-rw-r--r--i386/intel/pmap.c45
-rw-r--r--i386/ldscript9
-rw-r--r--i386/x15/boot.h127
-rw-r--r--i386/x15/elf.h61
-rw-r--r--i386/x15/multiboot.h111
-rw-r--r--i386/x15/param.h185
15 files changed, 700 insertions, 327 deletions
diff --git a/i386/Makefrag.am b/i386/Makefrag.am
index 4dd6a9f..215318d 100644
--- a/i386/Makefrag.am
+++ b/i386/Makefrag.am
@@ -29,6 +29,8 @@ libkernel_a_SOURCES += \
if PLATFORM_at
libkernel_a_SOURCES += \
+ i386/i386at/biosmem.c \
+ i386/i386at/biosmem.h \
i386/i386at/boothdr.S \
i386/i386at/com.c \
i386/i386at/com.h \
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
index ec33859..c8789e7 100644
--- a/i386/i386/db_trace.c
+++ b/i386/i386/db_trace.c
@@ -37,6 +37,7 @@
#include <machine/machspl.h>
#include <machine/db_interface.h>
#include <machine/db_trace.h>
+#include <i386at/model_dep.h>
#include <ddb/db_access.h>
#include <ddb/db_command.h>
@@ -129,7 +130,6 @@ db_i386_reg_value(
long *dp = 0;
db_expr_t null_reg = 0;
thread_t thread = ap->thread;
- extern unsigned int_stack_high;
if (db_option(ap->modif, 'u')) {
if (thread == THREAD_NULL) {
@@ -139,7 +139,7 @@ db_i386_reg_value(
if (thread == current_thread()) {
if (ddb_regs.cs & 0x3)
dp = vp->valuep;
- else if (ddb_regs.ebp < int_stack_high)
+ else if (ON_INT_STACK(ddb_regs.ebp))
db_error("cannot get/set user registers in nested interrupt\n");
}
} else {
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index cfda86f..3ff0044 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -542,8 +542,10 @@ trap_from_kernel:
#if MACH_KDB || MACH_TTD
movl %esp,%ebx /* save current stack */
- cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb 1f /* OK if so */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl EXT(int_stack_base),%edx
+ je 1f /* OK if so */
CPU_NUMBER(%edx) /* get CPU number */
cmpl CX(EXT(kernel_stack),%edx),%esp
@@ -647,8 +649,10 @@ ENTRY(all_intrs)
pushl %edx
cld /* clear direction flag */
- cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb int_from_intstack /* if not: */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl %ss:EXT(int_stack_base),%edx
+ je int_from_intstack /* if not: */
pushl %ds /* save segment registers */
pushl %es
@@ -707,7 +711,7 @@ LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
iret /* return to caller */
int_from_intstack:
- cmpl $EXT(_intstack),%esp /* seemingly looping? */
+ cmpl $EXT(int_stack_base),%esp /* seemingly looping? */
jb stack_overflowed /* if not: */
call EXT(interrupt) /* call interrupt routine */
_return_to_iret_i: /* ( label for kdb_kintr) */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index ffd91d6..16f9119 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -25,6 +25,7 @@
/* XXX use xu/vm_param.h */
#include <mach/vm_param.h>
+#include <kern/macros.h>
#ifdef MACH_PV_PAGETABLES
#include <xen/public/xen.h>
#endif
@@ -54,19 +55,65 @@
#define VM_MAX_KERNEL_ADDRESS (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
#endif /* MACH_PV_PAGETABLES */
-/* Reserve mapping room for kmem. */
-#ifdef MACH_XEN
-#define VM_KERNEL_MAP_SIZE (224 * 1024 * 1024)
-#else
-#define VM_KERNEL_MAP_SIZE (192 * 1024 * 1024)
-#endif
-
/* The kernel virtual address space is actually located
at high linear addresses.
This is the kernel address range in linear addresses. */
#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_ADDRESS)
#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
+/*
+ * Direct physical mapping boundaries.
+ */
+#ifdef __LP64__
+#define VM_MIN_DIRECTMAP_ADDRESS VM_MIN_KERNEL_ADDRESS
+#define VM_MAX_DIRECTMAP_ADDRESS DECL_CONST(0xffffc00000000000, UL)
+#else /* __LP64__ */
+#define VM_MIN_DIRECTMAP_ADDRESS VM_MAX_ADDRESS
+#define VM_MAX_DIRECTMAP_ADDRESS DECL_CONST(0xf8000000, UL)
+#endif /* __LP64__ */
+
+/* Reserve mapping room for virtual kernel memory, mainly used for IPC
+ and temporary mappings. */
+#define VM_KERNEL_MAP_SIZE (128 * 1024 * 1024)
+
+/*
+ * Physical memory properties.
+ */
+
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x38000000, ULL)
+#ifdef X86_PAE
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* X86_PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* X86_PAE */
+#endif /* __LP64__ */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#ifdef __LP64__
+#define VM_PAGE_SEG_DMA32 1
+#define VM_PAGE_SEG_DIRECTMAP 2
+#define VM_PAGE_SEG_HIGHMEM 3
+#else /* __LP64__ */
+#define VM_PAGE_SEG_DMA32 1 /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_DIRECTMAP 1
+#define VM_PAGE_SEG_HIGHMEM 2
+#endif /* __LP64__ */
+
+
#ifdef MACH_PV_PAGETABLES
/* need room for mmu updates (2*8bytes) */
#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
diff --git a/i386/i386at/biosmem.c b/i386/i386at/biosmem.c
index d666f1b..6181eb3 100644
--- a/i386/i386at/biosmem.c
+++ b/i386/i386at/biosmem.c
@@ -16,23 +16,33 @@
*/
#include <kern/assert.h>
-#include <kern/init.h>
#include <kern/macros.h>
-#include <kern/panic.h>
-#include <kern/param.h>
-#include <kern/printk.h>
-#include <kern/stddef.h>
-#include <kern/stdint.h>
-#include <kern/string.h>
-#include <kern/types.h>
-#include <machine/biosmem.h>
-#include <machine/boot.h>
-#include <machine/cpu.h>
-#include <machine/elf.h>
-#include <machine/multiboot.h>
-#include <vm/vm_kmem.h>
+#include <kern/debug.h>
+#include <kern/printf.h>
+#include <stddef.h>
+#include <string.h>
+#include <sys/types.h>
#include <vm/vm_page.h>
+#include "biosmem.h"
+#include "x15/elf.h"
+#include "x15/multiboot.h"
+
+/* Mach glue. */
+#define __bootdata /* nothing */
+#define __boot /* nothing */
+#define __init /* nothing */
+#define boot_memmove memmove
+#define boot_memset(P,C,S) memset((char *) phystokv(P), C, S)
+#define boot_strlen(P) strlen((char *) phystokv(P))
+#define boot_panic panic
+#define printk printf
+#define BOOT_VTOP(addr) _kvtophys(addr)
+
+/* XXX */
+extern char _boot;
+extern char _end;
+
/*
* Maximum number of entries in the BIOS memory map.
*
@@ -115,7 +125,7 @@ biosmem_map_build(const struct multiboot_raw_info *mbi)
struct biosmem_map_entry *start, *entry, *end;
unsigned long addr;
- addr = mbi->mmap_addr;
+ addr = phystokv(mbi->mmap_addr);
mb_entry = (struct multiboot_raw_mmap_entry *)addr;
mb_end = (struct multiboot_raw_mmap_entry *)(addr + mbi->mmap_length);
start = biosmem_map;
@@ -373,16 +383,16 @@ biosmem_save_cmdline_sizes(struct multiboot_raw_info *mbi)
uint32_t i;
if (mbi->flags & MULTIBOOT_LOADER_CMDLINE)
- mbi->unused0 = boot_strlen((char *)(unsigned long)mbi->cmdline) + 1;
+ mbi->unused0 = boot_strlen((unsigned long)mbi->cmdline) + 1;
if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
unsigned long addr;
- addr = mbi->mods_addr;
+ addr = phystokv(mbi->mods_addr);
for (i = 0; i < mbi->mods_count; i++) {
mod = (struct multiboot_raw_module *)addr + i;
- mod->reserved = boot_strlen((char *)(unsigned long)mod->string) + 1;
+ mod->reserved = boot_strlen((unsigned long)mod->string) + 1;
}
}
}
@@ -391,6 +401,8 @@ static void __boot
biosmem_find_boot_data_update(uint32_t min, uint32_t *start, uint32_t *end,
uint32_t data_start, uint32_t data_end)
{
+ assert (data_start < data_end);
+
if ((min <= data_start) && (data_start < *start)) {
*start = data_start;
*end = data_end;
@@ -419,21 +431,20 @@ biosmem_find_boot_data(const struct multiboot_raw_info *mbi, uint32_t min,
struct elf_shdr *shdr;
uint32_t i, start, end = end;
unsigned long tmp;
-
start = max;
- biosmem_find_boot_data_update(min, &start, &end, (unsigned long)&_boot,
+ biosmem_find_boot_data_update(min, &start, &end,
+ BOOT_VTOP((unsigned long)&_boot),
BOOT_VTOP((unsigned long)&_end));
if ((mbi->flags & MULTIBOOT_LOADER_CMDLINE) && (mbi->cmdline != 0))
biosmem_find_boot_data_update(min, &start, &end, mbi->cmdline,
mbi->cmdline + mbi->unused0);
-
if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
i = mbi->mods_count * sizeof(struct multiboot_raw_module);
biosmem_find_boot_data_update(min, &start, &end, mbi->mods_addr,
mbi->mods_addr + i);
- tmp = mbi->mods_addr;
+ tmp = phystokv(mbi->mods_addr);
for (i = 0; i < mbi->mods_count; i++) {
mod = (struct multiboot_raw_module *)tmp + i;
@@ -450,7 +461,7 @@ biosmem_find_boot_data(const struct multiboot_raw_info *mbi, uint32_t min,
tmp = mbi->shdr_num * mbi->shdr_size;
biosmem_find_boot_data_update(min, &start, &end, mbi->shdr_addr,
mbi->shdr_addr + tmp);
- tmp = mbi->shdr_addr;
+ tmp = phystokv(mbi->shdr_addr);
for (i = 0; i < mbi->shdr_num; i++) {
shdr = (struct elf_shdr *)(tmp + (i * mbi->shdr_size));
@@ -458,7 +469,6 @@ biosmem_find_boot_data(const struct multiboot_raw_info *mbi, uint32_t min,
if ((shdr->type != ELF_SHT_SYMTAB)
&& (shdr->type != ELF_SHT_STRTAB))
continue;
-
biosmem_find_boot_data_update(min, &start, &end, shdr->addr,
shdr->addr + shdr->size);
}
@@ -516,6 +526,10 @@ biosmem_setup_allocator(struct multiboot_raw_info *mbi)
biosmem_heap_start = max_heap_start;
biosmem_heap_end = max_heap_end;
biosmem_heap_cur = biosmem_heap_end;
+
+ /* Mach pmap glue. */
+ extern vm_offset_t phys_last_addr;
+ phys_last_addr = (vm_offset_t) max_heap_end;
}
void __boot
@@ -596,7 +610,7 @@ biosmem_bootalloc(unsigned int nr_pages)
boot_panic(biosmem_panic_nomem_msg);
biosmem_heap_cur = addr;
- return boot_memset((void *)addr, 0, size);
+ return boot_memset(addr, 0, size);
}
phys_addr_t __boot
@@ -688,10 +702,15 @@ biosmem_setup(void)
biosmem_map_show();
+#if notyet
cpu = cpu_current();
max_phys_end = (cpu->phys_addr_width == 0)
? (uint64_t)-1
: (uint64_t)1 << cpu->phys_addr_width;
+#else
+ max_phys_end = (uint64_t)1 << 32;
+ (void) cpu;
+#endif
for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
if (biosmem_segment_size(i) == 0)
@@ -713,7 +732,7 @@ biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
(unsigned long long)((end - start) >> 10));
while (start < end) {
- page = vm_page_lookup(start);
+ page = vm_page_lookup_pa(start);
assert(page != NULL);
vm_page_manage(page);
start += PAGE_SIZE;
diff --git a/i386/i386at/biosmem.h b/i386/i386at/biosmem.h
index b32e027..c4b59f5 100644
--- a/i386/i386at/biosmem.h
+++ b/i386/i386at/biosmem.h
@@ -18,8 +18,8 @@
#ifndef _X86_BIOSMEM_H
#define _X86_BIOSMEM_H
-#include <kern/types.h>
-#include <machine/multiboot.h>
+#include <sys/types.h>
+#include "x15/multiboot.h"
/*
* Address where the address of the Extended BIOS Data Area segment can be
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index fdf983b..30adcb0 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -49,6 +49,7 @@
#include <kern/mach_clock.h>
#include <kern/printf.h>
#include <kern/startup.h>
+#include <kern/thread.h>
#include <sys/time.h>
#include <sys/types.h>
#include <vm/vm_page.h>
@@ -70,6 +71,12 @@
#include <i386at/rtc.h>
#include <i386at/model_dep.h>
#include <i386at/acpihalt.h>
+#define multiboot_module x15_multiboot_module
+#define multiboot_info x15_multiboot_info
+#include <i386/x15/multiboot.h>
+#include <i386at/biosmem.h>
+#undef multiboot_module
+#undef multiboot_info
#ifdef MACH_XEN
#include <xen/console.h>
#include <xen/store.h>
@@ -125,27 +132,13 @@ struct multiboot_info boot_info;
/* Command line supplied to kernel. */
char *kernel_cmdline = "";
-/* This is used for memory initialization:
- it gets bumped up through physical memory
- that exists and is not occupied by boot gunk.
- It is not necessarily page-aligned. */
-static vm_offset_t avail_next
-#ifndef MACH_HYP
- = 0x1000 /* XX end of BIOS data area */
-#endif /* MACH_HYP */
- ;
-
-/* Possibly overestimated amount of available memory
- still remaining to be handed to the VM system. */
-static vm_size_t avail_remaining;
-
extern char version[];
/* If set, reboot the system on ctrl-alt-delete. */
boolean_t rebootflag = FALSE; /* exported to kdintr */
-/* XX interrupt stack pointer and highwater mark, for locore.S. */
-vm_offset_t int_stack_top, int_stack_high;
+/* Interrupt stack. */
+vm_offset_t int_stack_top, int_stack_base;
#ifdef LINUX_DEV
extern void linux_init(void);
@@ -273,7 +266,8 @@ void db_reset_cpu(void)
halt_all_cpus(1);
}
-
+#if 0
+/* XXX: Port XEN bits to biosmem. */
/*
* Compute physical memory size and other parameters.
*/
@@ -357,6 +351,7 @@ mem_size_init(void)
- 0x1000);
#endif /* MACH_HYP */
}
+#endif /* 0 */
/*
* Basic PC VM initialization.
@@ -368,7 +363,7 @@ i386at_init(void)
/* XXX move to intel/pmap.h */
extern pt_entry_t *kernel_page_dir;
int nb_direct, i;
- vm_offset_t addr, delta;
+ vm_offset_t delta;
/*
* Initialize the PIC prior to any possible call to an spl.
@@ -382,44 +377,7 @@ i386at_init(void)
/*
* Find memory size parameters.
*/
- mem_size_init();
-
-#ifdef MACH_XEN
- kernel_cmdline = (char*) boot_info.cmd_line;
-#else /* MACH_XEN */
- /* Copy content pointed by boot_info before losing access to it when it
- * is too far in physical memory. */
- if (boot_info.flags & MULTIBOOT_CMDLINE) {
- int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
- assert(init_alloc_aligned(round_page(len), &addr));
- kernel_cmdline = (char*) phystokv(addr);
- memcpy(kernel_cmdline, (void *)phystokv(boot_info.cmdline), len);
- boot_info.cmdline = addr;
- }
-
- if (boot_info.flags & MULTIBOOT_MODS) {
- struct multiboot_module *m;
- int i;
-
- assert(init_alloc_aligned(round_page(boot_info.mods_count * sizeof(*m)), &addr));
- m = (void*) phystokv(addr);
- memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
- boot_info.mods_addr = addr;
-
- for (i = 0; i < boot_info.mods_count; i++) {
- vm_size_t size = m[i].mod_end - m[i].mod_start;
- assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
- m[i].mod_start = addr;
- m[i].mod_end = addr + size;
-
- size = strlen((char*) phystokv(m[i].string)) + 1;
- assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
- m[i].string = addr;
- }
- }
-#endif /* MACH_XEN */
+ biosmem_bootstrap((struct multiboot_raw_info *) &boot_info);
/*
* Initialize kernel physical map, mapping the
@@ -483,10 +441,42 @@ i386at_init(void)
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
#endif /* MACH_PV_PAGETABLES */
- /* Interrupt stacks are allocated in physical memory,
- while kernel stacks are allocated in kernel virtual memory,
- so phys_last_addr serves as a convenient dividing point. */
- int_stack_high = phystokv(phys_last_addr);
+ /* Load the pages into the vm_page module. */
+ biosmem_setup();
+
+ /* Initialize physical memory management. */
+ vm_page_setup();
+ vm_page_info();
+
+ /* Initialize the slab allocator. */
+ slab_bootstrap();
+ slab_init();
+
+ {
+#ifdef MACH_XEN
+ kernel_cmdline = (char*) boot_info.cmd_line;
+#else /* MACH_XEN */
+ /* Copy content pointed by boot_info before losing
+ * access to it when it is too far in physical
+ * memory. */
+ if (boot_info.flags & MULTIBOOT_CMDLINE) {
+ size_t len = strlen((const char *)
+ phystokv(boot_info.cmdline));
+ kernel_cmdline = (char *) kalloc(len);
+ memcpy(kernel_cmdline,
+ (const void *) phystokv(boot_info.cmdline),
+ len + 1);
+ }
+#endif
+ }
+
+ /* Bootstrap the thread module so that we can allocate an
+ interrupt stack. */
+ thread_bootstrap();
+
+ /* Allocate interrupt stack. */
+ int_stack_base = thread_bootstrap_stack_alloc();
+ int_stack_top = int_stack_base + KERNEL_STACK_SIZE - 4;
/*
* Initialize and activate the real i386 protected-mode structures.
@@ -532,11 +522,6 @@ i386at_init(void)
#ifdef MACH_XEN
hyp_p2m_init();
#endif /* MACH_XEN */
-
- /* XXX We'll just use the initialization stack we're already running on
- as the interrupt stack for now. Later this will have to change,
- because the init stack will get freed after bootup. */
- asm("movl %%esp,%0" : "=m" (int_stack_top));
}
/*
@@ -702,201 +687,6 @@ resettodr(void)
writetodc();
}
-unsigned int pmap_free_pages(void)
-{
- return atop(avail_remaining);
-}
-
-/* Always returns page-aligned regions. */
-boolean_t
-init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
-{
- vm_offset_t addr;
-
-#ifdef MACH_HYP
- /* There is none */
- if (!avail_next)
- avail_next = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
-#else /* MACH_HYP */
- extern char start[], end[];
- int i;
- static int wrapped = 0;
-
- /* Memory regions to skip. */
- vm_offset_t cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
- ? boot_info.cmdline : 0;
- vm_offset_t cmdline_end_pa = cmdline_start_pa
- ? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
- : 0;
- vm_offset_t mods_start_pa = boot_info.flags & MULTIBOOT_MODS
- ? boot_info.mods_addr : 0;
- vm_offset_t mods_end_pa = mods_start_pa
- ? mods_start_pa
- + boot_info.mods_count * sizeof(struct multiboot_module)
- : 0;
-
- retry:
-#endif /* MACH_HYP */
-
- /* Page-align the start address. */
- avail_next = round_page(avail_next);
-
-#ifndef MACH_HYP
- /* Start with memory above 16MB, reserving the low memory for later. */
- /* Don't care on Xen */
- if (!wrapped && phys_last_addr > 16 * 1024*1024)
- {
- if (avail_next < 16 * 1024*1024)
- avail_next = 16 * 1024*1024;
- else if (avail_next == phys_last_addr)
- {
- /* We have used all the memory above 16MB, so now start on
- the low memory. This will wind up at the end of the list
- of free pages, so it should not have been allocated to any
- other use in early initialization before the Linux driver
- glue initialization needs to allocate low memory. */
- avail_next = 0x1000;
- wrapped = 1;
- }
- }
-#endif /* MACH_HYP */
-
- /* Check if we have reached the end of memory. */
- if (avail_next ==
- (
-#ifndef MACH_HYP
- wrapped ? 16 * 1024*1024 :
-#endif /* MACH_HYP */
- phys_last_addr))
- return FALSE;
-
- /* Tentatively assign the current location to the caller. */
- addr = avail_next;
-
- /* Bump the pointer past the newly allocated region
- and see where that puts us. */
- avail_next += size;
-
-#ifndef MACH_HYP
- /* Skip past the I/O and ROM area. */
- if (boot_info.flags & MULTIBOOT_MEM_MAP)
- {
- struct multiboot_mmap *map, *map_end, *current = NULL, *next = NULL;
- unsigned long long minimum_next = ~0ULL;
-
- map = (void*) phystokv(boot_info.mmap_addr);
- map_end = (void*) map + boot_info.mmap_count;
-
- /* Find both our current map, and the next one */
- while (map + 1 <= map_end)
- {
- if (map->Type == MB_ARD_MEMORY)
- {
- unsigned long long start = map->BaseAddr;
- unsigned long long end = start + map->Length;;
-
- if (start <= addr && avail_next <= end)
- {
- /* Ok, fits in the current map */
- current = map;
- break;
- }
- else if (avail_next <= start && start < minimum_next)
- {
- /* This map is not far from avail_next */
- next = map;
- minimum_next = start;
- }
- }
- map = (void*) map + map->size + sizeof(map->size);
- }
-
- if (!current) {
- /* Area does not fit in the current map, switch to next
- * map if any */
- if (!next || next->BaseAddr >= phys_last_addr)
- {
- /* No further reachable map, we have reached
- * the end of memory, but possibly wrap around
- * 16MiB. */
- avail_next = phys_last_addr;
- goto retry;
- }
-
- /* Start from next map */
- avail_next = next->BaseAddr;
- goto retry;
- }
- }
- else if ((avail_next > (boot_info.mem_lower * 0x400)) && (addr < 0x100000))
- {
- avail_next = 0x100000;
- goto retry;
- }
-
- /* Skip our own kernel code, data, and bss. */
- if ((phystokv(avail_next) > (vm_offset_t)start) && (phystokv(addr) < (vm_offset_t)end))
- {
- avail_next = _kvtophys(end);
- goto retry;
- }
-
- /* Skip any areas occupied by valuable boot_info data. */
- if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
- {
- avail_next = cmdline_end_pa;
- goto retry;
- }
- if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
- {
- avail_next = mods_end_pa;
- goto retry;
- }
- if ((phystokv(avail_next) > kern_sym_start) && (phystokv(addr) < kern_sym_end))
- {
- avail_next = _kvtophys(kern_sym_end);
- goto retry;
- }
- if (boot_info.flags & MULTIBOOT_MODS)
- {
- struct multiboot_module *m = (struct multiboot_module *)
- phystokv(boot_info.mods_addr);
- for (i = 0; i < boot_info.mods_count; i++)
- {
- if ((avail_next > m[i].mod_start)
- && (addr < m[i].mod_end))
- {
- avail_next = m[i].mod_end;
- goto retry;
- }
- /* XXX string */
- }
- }
-#endif /* MACH_HYP */
-
- avail_remaining -= size;
-
- *addrp = addr;
- return TRUE;
-}
-
-boolean_t pmap_next_page(vm_offset_t *addrp)
-{
- return init_alloc_aligned(PAGE_SIZE, addrp);
-}
-
-/* Grab a physical page:
- the standard memory allocation mechanism
- during system initialization. */
-vm_offset_t
-pmap_grab_page(void)
-{
- vm_offset_t addr;
- if (!pmap_next_page(&addr))
- panic("Not enough memory to initialize Mach");
- return addr;
-}
-
boolean_t pmap_valid_page(vm_offset_t x)
{
/* XXX is this OK? What does it matter for? */
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
index aa24032..a27d9b3 100644
--- a/i386/i386at/model_dep.h
+++ b/i386/i386at/model_dep.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013 Free Software Foundation.
+ * Copyright (c) 2013-2015 Free Software Foundation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -25,6 +25,16 @@ extern int timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
void inittodr(void);
-boolean_t init_alloc_aligned(vm_size_t size, vm_offset_t *addrp);
+/*
+ * Interrupt stack.
+ *
+ * We allocate the interrupt stacks from the kernels stack cache. As
+ * the stacks are naturally aligned, it is easy to find the base
+ * address given a stack pointer.
+ */
+extern vm_offset_t int_stack_top, int_stack_base;
+
+/* Check whether P points to the interrupt stack. */
+#define ON_INT_STACK(P) (((P) & ~(KERNEL_STACK_SIZE-1)) == int_stack_base)
#endif /* _MODEL_DEP_H_ */
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
index 1439940..47badad 100644
--- a/i386/include/mach/i386/vm_types.h
+++ b/i386/include/mach/i386/vm_types.h
@@ -77,6 +77,15 @@ typedef unsigned long vm_offset_t;
typedef vm_offset_t * vm_offset_array_t;
/*
+ * A phys_addr_t is a physical address.
+ */
+#if PAE
+typedef unsigned long long phys_addr_t;
+#else /* PAE */
+typedef unsigned long phys_addr_t;
+#endif /* PAE */
+
+/*
* A vm_size_t is the proper type for e.g.
* expressing the difference between two
* vm_offset_t entities.
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 3978303..3416037 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -83,6 +83,7 @@
#include <i386/proc_reg.h>
#include <i386/locore.h>
#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
#ifdef MACH_PSEUDO_PHYS
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
@@ -627,19 +628,16 @@ void pmap_bootstrap(void)
/* Note: initial Xen mapping holds at least 512kB free mapped page.
* We use that for directly building our linear mapping. */
#if PAE
- {
- vm_offset_t addr;
- init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(addr);
- }
- kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page());
+ kernel_pmap->dirbase = kernel_page_dir =
+ (pt_entry_t*) biosmem_bootalloc (PDPNUM);
+ kernel_pmap->pdpbase = (pt_entry_t*) biosmem_bootalloc(1);
{
int i;
for (i = 0; i < PDPNUM; i++)
WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte(_kvtophys((void *) kernel_pmap->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID);
}
#else /* PAE */
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+ kernel_pmap->dirbase = kernel_page_dir = biosmem_bootalloc(1);
#endif /* PAE */
{
int i;
@@ -679,7 +677,7 @@ void pmap_bootstrap(void)
struct mmu_update update;
int j, n;
- l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
+ l1_map[n_l1map] = (pt_entry_t*) biosmem_bootalloc(1);
for (j = 0; j < NPTES; j++)
l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
@@ -719,7 +717,7 @@ void pmap_bootstrap(void)
for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; )
{
pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
- pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
+ pt_entry_t *ptable = (pt_entry_t*) biosmem_bootalloc(1);
pt_entry_t *pte;
/* Initialize the page directory entry. */
@@ -955,9 +953,8 @@ void pmap_init(void)
s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ pv_lock_table_size(npages)
+ npages);
-
- s = round_page(s);
- if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ addr = kalloc(s);
+ if (! addr)
panic("pmap_init");
memset((void *) addr, 0, s);
@@ -1158,6 +1155,7 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
pmap_t pmap_create(vm_size_t size)
{
pmap_t p;
+ struct vm_page *mem;
pmap_statistics_t stats;
/*
@@ -1177,10 +1175,11 @@ pmap_t pmap_create(vm_size_t size)
if (p == PMAP_NULL)
panic("pmap_create");
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->dirbase, PDPNUM * INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ mem = vm_page_alloc_p(iorder2(PDPNUM), VM_PAGE_SEL_DIRECTMAP,
+ VM_PAGE_KERNEL);
+ if (! mem)
+ return PMAP_NULL;
+ p->dirbase = (pt_entry_t *) phystokv(mem->phys_addr);
memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES);
#ifdef LINUX_DEV
@@ -1198,10 +1197,10 @@ pmap_t pmap_create(vm_size_t size)
#endif /* MACH_PV_PAGETABLES */
#if PAE
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ mem = vm_page_alloc_p(0, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
+ if (! mem)
+ return PMAP_NULL;
+ p->pdpbase = (pt_entry_t *) phystokv(mem->phys_addr);
{
int i;
for (i = 0; i < PDPNUM; i++)
@@ -1286,12 +1285,14 @@ void pmap_destroy(pmap_t p)
pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES);
}
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES);
+ m = vm_page_lookup_pa(_kvtophys(p->dirbase));
+ vm_page_free_p(m, PDPNUM);
#if PAE
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(p->pdpbase);
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES);
+ m = vm_page_lookup_pa(_kvtophys(p->pdpbase));
+ vm_page_free_p(m, 0);
#endif /* PAE */
kmem_cache_free(&pmap_cache, (vm_offset_t) p);
}
diff --git a/i386/ldscript b/i386/ldscript
index ddbbf91..dcf47a9 100644
--- a/i386/ldscript
+++ b/i386/ldscript
@@ -12,6 +12,10 @@ SECTIONS
* `gnumach_LINKFLAGS' in `i386/Makefrag.am'.
*/
. = _START;
+
+ /* biosmem.c uses this. */
+ _boot = .;
+
.text :
AT (_START_MAP)
{
@@ -34,7 +38,10 @@ SECTIONS
PROVIDE (_etext = .);
PROVIDE (etext = .);
- /* Read-only sections, merged into text segment: */
+ /* biosmem.c uses this. */
+ _end = .;
+
+/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = .);
.interp : { *(.interp) }
.note.gnu.build-id : { *(.note.gnu.build-id) }
diff --git a/i386/x15/boot.h b/i386/x15/boot.h
new file mode 100644
index 0000000..ab85be0
--- /dev/null
+++ b/i386/x15/boot.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_BOOT_H
+#define _X86_BOOT_H
+
+#include <kern/macros.h>
+#include <machine/vm_param.h>
+
+#define VM_KERNEL_OFFSET VM_MIN_KERNEL_ADDRESS
+#define STACK_SIZE PAGE_SIZE
+#define pmap_pte_t void // XXX
+
+/*
+ * Macros used by the very early panic functions.
+ */
+#define BOOT_CGAMEM 0xb8000
+#define BOOT_CGACHARS (80 * 25)
+#define BOOT_CGACOLOR 0x7
+
+/*
+ * The kernel is physically loaded at BOOT_OFFSET by the boot loader. It
+ * is divided in two parts: the .boot section which uses physical addresses
+ * and the main kernel code and data at VM_KERNEL_OFFSET.
+ *
+ * See the linker script for more information.
+ */
+#define BOOT_OFFSET DECL_CONST(0x100000, UL)
+
+/*
+ * Virtual to physical address translation macro.
+ */
+#define BOOT_VTOP(addr) ((addr) - VM_KERNEL_OFFSET)
+
+/*
+ * Address where the MP trampoline code is copied and run at.
+ *
+ * It must reside at a free location in the first segment and be page
+ * aligned.
+ */
+#define BOOT_MP_TRAMPOLINE_ADDR 0x7000
+
+#ifndef __ASSEMBLER__
+
+#include "multiboot.h"
+#include <machine/pmap.h>
+
+/*
+ * Functions and data used before paging is enabled must be part of the .boot
+ * and .bootdata sections respectively, so that they use physical addresses.
+ * Once paging is enabled, their access relies on the kernel identity mapping.
+ */
+#define __boot __section(".boot.text")
+#define __bootdata __section(".boot.data")
+
+/*
+ * Boundaries of the .boot section.
+ */
+extern char _boot;
+extern char _eboot;
+
+extern char boot_stack[STACK_SIZE];
+extern char boot_ap_stack[STACK_SIZE];
+
+/*
+ * This variable contains the CPU ID of an AP during early initialization.
+ */
+extern unsigned int boot_ap_id;
+
+/*
+ * Size of the trampoline code used for APs.
+ */
+extern uint32_t boot_mp_trampoline_size;
+
+/*
+ * Address of the MP trampoline code.
+ */
+void boot_mp_trampoline(void);
+
+/*
+ * Helper functions available before paging is enabled.
+ *
+ * Any memory passed to these must also be accessible without paging.
+ */
+void * boot_memmove(void *dest, const void *src, size_t n);
+void * boot_memset(void *s, int c, size_t n);
+size_t boot_strlen(const char *s);
+void __noreturn boot_panic(const char *s);
+
+/*
+ * This function is called by the bootstrap code before paging is enabled.
+ * It establishes a direct mapping of the kernel at virtual addresses and
+ * returns the physical address of the page directory. It is up to the
+ * caller to actually enable paging.
+ *
+ * TODO Update comment.
+ */
+pmap_pte_t * boot_setup_paging(const struct multiboot_raw_info *mbi,
+ unsigned long eax);
+
+/*
+ * Main entry point, called directly after basic paging is initialized.
+ */
+void boot_main(void);
+
+/*
+ * Entry point for APs.
+ */
+void boot_ap_main(void);
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _X86_BOOT_H */
diff --git a/i386/x15/elf.h b/i386/x15/elf.h
new file mode 100644
index 0000000..e0ea260
--- /dev/null
+++ b/i386/x15/elf.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_ELF_H
+#define _X86_ELF_H
+
+#define ELF_SHT_SYMTAB 2
+#define ELF_SHT_STRTAB 3
+
+struct elf_shdr {
+ unsigned int name;
+ unsigned int type;
+ unsigned int flags;
+ unsigned long addr;
+ unsigned long offset;
+ unsigned int size;
+ unsigned int link;
+ unsigned int info;
+ unsigned int addralign;
+ unsigned int entsize;
+};
+
+#ifdef __LP64__
+
+struct elf_sym {
+ unsigned int name;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+ unsigned long value;
+ unsigned long size;
+};
+
+#else /* __LP64__ */
+
+struct elf_sym {
+ unsigned int name;
+ unsigned long value;
+ unsigned long size;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+};
+
+#endif /* __LP64__ */
+
+#endif /* _X86_ELF_H */
diff --git a/i386/x15/multiboot.h b/i386/x15/multiboot.h
new file mode 100644
index 0000000..4a0502c
--- /dev/null
+++ b/i386/x15/multiboot.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_MULTIBOOT_H
+#define _X86_MULTIBOOT_H
+
+/*
+ * Magic number provided by the OS to the boot loader.
+ */
+#define MULTIBOOT_OS_MAGIC 0x1badb002
+
+/*
+ * Multiboot flags requesting services from the boot loader.
+ */
+#define MULTIBOOT_OS_MEMORY_INFO 0x2
+
+#define MULTIBOOT_OS_FLAGS MULTIBOOT_OS_MEMORY_INFO
+
+/*
+ * Magic number to identify a multiboot compliant boot loader.
+ */
+#define MULTIBOOT_LOADER_MAGIC 0x2badb002
+
+/*
+ * Multiboot flags set by the boot loader.
+ */
+#define MULTIBOOT_LOADER_MEMORY 0x01
+#define MULTIBOOT_LOADER_CMDLINE 0x04
+#define MULTIBOOT_LOADER_MODULES 0x08
+#define MULTIBOOT_LOADER_SHDR 0x20
+#define MULTIBOOT_LOADER_MMAP 0x40
+
+#ifndef __ASSEMBLER__
+
+#include <kern/macros.h>
+#include <kern/stdint.h>
+
+/*
+ * A multiboot module.
+ */
+struct multiboot_raw_module {
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * Memory map entry.
+ */
+struct multiboot_raw_mmap_entry {
+ uint32_t size;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} __packed;
+
+/*
+ * Multiboot information structure as passed by the boot loader.
+ */
+struct multiboot_raw_info {
+ uint32_t flags;
+ uint32_t mem_lower;
+ uint32_t mem_upper;
+ uint32_t unused0;
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+ uint32_t shdr_num;
+ uint32_t shdr_size;
+ uint32_t shdr_addr;
+ uint32_t shdr_strndx;
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+ uint32_t unused1[9];
+} __packed;
+
+/*
+ * Versions of the multiboot structures suitable for use with 64-bit pointers.
+ */
+
+struct multiboot_module {
+ void *mod_start;
+ void *mod_end;
+ char *string;
+};
+
+struct multiboot_info {
+ uint32_t flags;
+ char *cmdline;
+ struct multiboot_module *mods_addr;
+ uint32_t mods_count;
+};
+
+#endif /* __ASSEMBLER__ */
+
+#endif /* _X86_MULTIBOOT_H */
diff --git a/i386/x15/param.h b/i386/x15/param.h
new file mode 100644
index 0000000..f8f9c33
--- /dev/null
+++ b/i386/x15/param.h
@@ -0,0 +1,185 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This file is a top header in the inclusion hierarchy, and shouldn't include
+ * other headers that may cause circular dependencies.
+ */
+
+#ifndef _X86_PARAM_H
+#define _X86_PARAM_H
+
+#include <kern/macros.h>
+
+#define __LITTLE_ENDIAN__
+
+/*
+ * L1 cache line size.
+ *
+ * XXX Use this value until processor selection is available.
+ */
+#define CPU_L1_SIZE 64
+
+/*
+ * Code/data alignment.
+ */
+#define TEXT_ALIGN 16
+
+#ifdef __LP64__
+#define DATA_ALIGN 8
+#else /* __LP64__ */
+#define DATA_ALIGN 4
+#endif /* __LP64__ */
+
+/*
+ * Attributes for variables that are mostly read and seldom changed.
+ */
+#define __read_mostly __section(".data.read_mostly")
+
+/*
+ * Provide architecture-specific string functions.
+ */
+#define ARCH_STRING_MEMCPY
+#define ARCH_STRING_MEMMOVE
+#define ARCH_STRING_MEMSET
+#define ARCH_STRING_MEMCMP
+#define ARCH_STRING_STRLEN
+#define ARCH_STRING_STRCPY
+#define ARCH_STRING_STRCMP
+
+/*
+ * System timer frequency.
+ *
+ * The selected value of 200 translates to a period of 5ms, small enough to
+ * provide low latency, and is practical as both a dividend and divisor.
+ */
+#define HZ 200
+
+/*
+ * 4 KiB pages.
+ */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#define PAGE_MASK (PAGE_SIZE - 1)
+
+/*
+ * Kernel stack size for threads and interrupt handlers.
+ */
+#define STACK_SIZE PAGE_SIZE
+
+/*
+ * Virtual memory properties.
+ */
+
+/*
+ * User space boundaries.
+ */
+#define VM_MIN_ADDRESS DECL_CONST(0, UL)
+
+#ifdef __LP64__
+#define VM_MAX_ADDRESS DECL_CONST(0x800000000000, UL)
+#else /* __LP64__ */
+#define VM_MAX_ADDRESS DECL_CONST(0xc0000000, UL)
+#endif/* __LP64__ */
+
+/*
+ * Kernel space boundaries.
+ */
+#ifdef __LP64__
+#define VM_MIN_KERNEL_ADDRESS DECL_CONST(0xffff800000000000, UL)
+#define VM_MAX_KERNEL_ADDRESS DECL_CONST(0xfffffffffffff000, UL)
+#else /* __LP64__ */
+#define VM_MIN_KERNEL_ADDRESS VM_MAX_ADDRESS
+#define VM_MAX_KERNEL_ADDRESS DECL_CONST(0xfffff000, UL)
+#endif /* __LP64__ */
+
+/*
+ * Direct physical mapping boundaries.
+ */
+#ifdef __LP64__
+#define VM_MIN_DIRECTMAP_ADDRESS VM_MIN_KERNEL_ADDRESS
+#define VM_MAX_DIRECTMAP_ADDRESS DECL_CONST(0xffffc00000000000, UL)
+#else /* __LP64__ */
+#define VM_MIN_DIRECTMAP_ADDRESS VM_MAX_ADDRESS
+#define VM_MAX_DIRECTMAP_ADDRESS DECL_CONST(0xf8000000, UL)
+#endif /* __LP64__ */
+
+/*
+ * Kernel mapping offset.
+ *
+ * On 32-bits systems, the kernel is linked at addresses included in the
+ * direct physical mapping, whereas on 64-bits systems, it is linked at
+ * -2 GiB because the "kernel" memory model is used when compiling (see
+ * the -mcmodel=kernel gcc option).
+ */
+#ifdef __LP64__
+#define VM_KERNEL_OFFSET DECL_CONST(0xffffffff80000000, UL)
+#else /* __LP64__ */
+#define VM_KERNEL_OFFSET VM_MIN_DIRECTMAP_ADDRESS
+#endif /* __LP64__ */
+
+/*
+ * Kernel virtual space boundaries.
+ *
+ * In addition to the direct physical mapping, the kernel has its own virtual
+ * memory space.
+ */
+#define VM_MIN_KMEM_ADDRESS VM_MAX_DIRECTMAP_ADDRESS
+
+#ifdef __LP64__
+#define VM_MAX_KMEM_ADDRESS VM_KERNEL_OFFSET
+#else /* __LP64__ */
+#define VM_MAX_KMEM_ADDRESS DECL_CONST(0xfffff000, UL)
+#endif /* __LP64__ */
+
+/*
+ * Physical memory properties.
+ */
+
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x38000000, ULL)
+#ifdef X86_PAE
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* X86_PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* X86_PAE */
+#endif /* __LP64__ */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#ifdef __LP64__
+#define VM_PAGE_SEG_DMA32 1
+#define VM_PAGE_SEG_DIRECTMAP 2
+#define VM_PAGE_SEG_HIGHMEM 3
+#else /* __LP64__ */
+#define VM_PAGE_SEG_DMA32 1 /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_DIRECTMAP 1
+#define VM_PAGE_SEG_HIGHMEM 2
+#endif /* __LP64__ */
+
+#endif /* _X86_PARAM_H */