summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
Diffstat (limited to 'i386')
-rw-r--r--i386/i386/vm_param.h6
-rw-r--r--i386/i386at/model_dep.c12
2 files changed, 9 insertions, 9 deletions
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index ba8e584..1b79dda 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -28,7 +28,7 @@
/* The kernel address space is 1GB, starting at virtual address 0. */
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t) 0x00000000)
-#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS))
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) (LINEAR_MAX_KERNEL_ADDRESS - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS))
/* The kernel virtual address space is actually located
at high linear addresses.
@@ -58,7 +58,7 @@ extern vm_offset_t phys_mem_va;
/*
* Kernel virtual memory is actually at 0xc0000000 in linear addresses.
*/
-#define kvtolin(a) ((vm_offset_t)(a) + LINEAR_MIN_KERNEL_ADDRESS)
-#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS)
+#define kvtolin(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS + LINEAR_MIN_KERNEL_ADDRESS)
+#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
#endif /* _I386_KERNEL_I386_VM_PARAM_ */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 3929293..7dfeb1c 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -236,8 +236,8 @@ mem_size_init(void)
/* Reserve 1/6 of the memory address space for virtual mappings.
* Yes, this loses memory. Blame i386. */
- if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 6) * 5) {
- phys_last_addr = (VM_MAX_KERNEL_ADDRESS / 6) * 5;
+ if (phys_last_addr > ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 6) * 5) {
+ phys_last_addr = ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 6) * 5;
printf("Truncating memory size to %dMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
}
@@ -321,10 +321,10 @@ i386at_init(void)
* Also, set the WP bit so that on 486 or better processors
* page-level write protection works in kernel mode.
*/
- kernel_page_dir[lin2pdenum(0)] =
+ kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS)] =
kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)];
#if PAE
- kernel_page_dir[lin2pdenum(0) + 1] =
+ kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + 1] =
kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + 1];
set_cr3((unsigned)kernel_pmap->pdpbase);
if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
@@ -348,9 +348,9 @@ i386at_init(void)
ktss_init();
/* Get rid of the temporary direct mapping and flush it out of the TLB. */
- kernel_page_dir[lin2pdenum(0)] = 0;
+ kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS)] = 0;
#if PAE
- kernel_page_dir[lin2pdenum(0) + 1] = 0;
+ kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + 1] = 0;
#endif /* PAE */
flush_tlb();