summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2009-11-22 21:09:57 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2009-11-22 21:15:02 +0100
commita04ce77f750f1655c17394adcdb7919e8f50f5cb (patch)
tree660e42280bc53312b2348ac74c733b041aa42e16
parent199183d5babe50b05ef4b91e2bba390fa2d4bf06 (diff)
Add missing phystokv/kvtophys conversions
* i386/i386/mp_desc.c (interrupt_stack_alloc): Apply phystokv to stack_start. * i386/i386at/model_dep.c (i386at_init): Apply phystokv to memory allocated to duplicate multiboot information. Apply _kvtophys before calling set_cr3. Apply phystokv to phys_last_addr before assigning to int_stack_high. * i386/intel/pmap.c (pmap_bootstrap): Apply phystokv to phys_last_addr, use kernel_virtual_start instead of phys_last_addr. Apply phystokv to allocated bootstrap page table pages. Apply _kvtophys to page table pointers before writing into page table. (pmap_enter): Apply phystokv to allocated page table page, apply kvtophys to deallocated page table page.
-rw-r--r--i386/i386/mp_desc.c1
-rw-r--r--i386/i386at/model_dep.c18
-rw-r--r--i386/intel/pmap.c30
3 files changed, 25 insertions, 24 deletions
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index 7f0b21e..54660d5 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -192,6 +192,7 @@ interrupt_stack_alloc()
*/
if (!init_alloc_aligned(INTSTACK_SIZE*(cpu_count-1), &stack_start))
panic("not enough memory for interrupt stacks");
+ stack_start = phystokv(stack_start);
/*
* Set up pointers to the top of the interrupt stack.
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 1dcb8f6..37f43f4 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -272,9 +272,9 @@ i386at_init(void)
vm_offset_t addr;
int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
assert(init_alloc_aligned(round_page(len), &addr));
- kernel_cmdline = (char*) addr;
+ kernel_cmdline = (char*) phystokv(addr);
memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), len);
- boot_info.cmdline = (vm_offset_t) kernel_cmdline;
+ boot_info.cmdline = addr;
}
if (boot_info.flags & MULTIBOOT_MODS) {
@@ -283,20 +283,20 @@ i386at_init(void)
int i;
assert(init_alloc_aligned(round_page(boot_info.mods_count * sizeof(*m)), &addr));
- m = (void*) addr;
+ m = (void*) phystokv(addr);
memcpy(m, (void*) phystokv(boot_info.mods_addr), boot_info.mods_count * sizeof(*m));
- boot_info.mods_addr = (vm_offset_t) m;
+ boot_info.mods_addr = addr;
for (i = 0; i < boot_info.mods_count; i++) {
vm_size_t size = m[i].mod_end - m[i].mod_start;
assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) addr, (void*) phystokv(m[i].mod_start), size);
+ memcpy((void*) phystokv(addr), (void*) phystokv(m[i].mod_start), size);
m[i].mod_start = addr;
m[i].mod_end = addr + size;
size = strlen((char*) phystokv(m[i].string)) + 1;
assert(init_alloc_aligned(round_page(size), &addr));
- memcpy((void*) addr, (void*) phystokv(m[i].string), size);
+ memcpy((void*) phystokv(addr), (void*) phystokv(m[i].string), size);
m[i].string = addr;
}
}
@@ -323,12 +323,12 @@ i386at_init(void)
#if PAE
kernel_page_dir[lin2pdenum(VM_MIN_KERNEL_ADDRESS) + 1] =
kernel_page_dir[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS) + 1];
- set_cr3((unsigned)kernel_pmap->pdpbase);
+ set_cr3((unsigned)_kvtophys(kernel_pmap->pdpbase));
if (!CPU_HAS_FEATURE(CPU_FEATURE_PAE))
panic("CPU doesn't have support for PAE.");
set_cr4(get_cr4() | CR4_PAE);
#else
- set_cr3((unsigned)kernel_page_dir);
+ set_cr3((unsigned)_kvtophys(kernel_page_dir));
#endif /* PAE */
if (CPU_HAS_FEATURE(CPU_FEATURE_PGE))
set_cr4(get_cr4() | CR4_PGE);
@@ -361,7 +361,7 @@ i386at_init(void)
/* Interrupt stacks are allocated in physical memory,
while kernel stacks are allocated in kernel virtual memory,
so phys_last_addr serves as a convenient dividing point. */
- int_stack_high = phys_last_addr;
+ int_stack_high = phystokv(phys_last_addr);
}
/*
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index d1a7c47..e380be7 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -578,11 +578,11 @@ void pmap_bootstrap()
* mapped into the kernel address space,
* and extends to a stupid arbitrary limit beyond that.
*/
- kernel_virtual_start = phys_last_addr;
- kernel_virtual_end = phys_last_addr + morevm
+ kernel_virtual_start = phystokv(phys_last_addr);
+ kernel_virtual_end = phystokv(phys_last_addr) + morevm
+ (phys_last_addr - phys_first_addr);
- if (kernel_virtual_end < phys_last_addr
+ if (kernel_virtual_end < kernel_virtual_start
|| kernel_virtual_end > VM_MAX_KERNEL_ADDRESS)
kernel_virtual_end = VM_MAX_KERNEL_ADDRESS;
@@ -593,16 +593,16 @@ void pmap_bootstrap()
{
vm_offset_t addr;
init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)addr;
+ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(addr);
}
- kernel_pmap->pdpbase = (pt_entry_t*)pmap_grab_page();
+ kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page());
{
int i;
for (i = 0; i < PDPNUM; i++)
- WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte((vm_offset_t) kernel_pmap->dirbase + i * INTEL_PGBYTES) | INTEL_PTE_VALID);
+ WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte(_kvtophys((void *) kernel_pmap->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID);
}
#else /* PAE */
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)pmap_grab_page();
+ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
#endif /* PAE */
{
int i;
@@ -628,18 +628,18 @@ void pmap_bootstrap()
* to allocate new kernel page tables later.
* XX fix this
*/
- for (va = phys_first_addr; va < kernel_virtual_end; )
+ for (va = phystokv(phys_first_addr); va < kernel_virtual_end; )
{
pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
- pt_entry_t *ptable = (pt_entry_t*)pmap_grab_page();
+ pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
pt_entry_t *pte;
/* Initialize the page directory entry. */
- WRITE_PTE(pde, pa_to_pte((vm_offset_t)ptable)
+ WRITE_PTE(pde, pa_to_pte((vm_offset_t)_kvtophys(ptable))
| INTEL_PTE_VALID | INTEL_PTE_WRITE);
/* Initialize the page table. */
- for (pte = ptable; (va < phys_last_addr) && (pte < ptable+NPTES); pte++)
+ for (pte = ptable; (va < phystokv(phys_last_addr)) && (pte < ptable+NPTES); pte++)
{
if ((pte - ptable) < ptenum(va))
{
@@ -652,12 +652,12 @@ void pmap_bootstrap()
if ((va >= (vm_offset_t)_start)
&& (va + INTEL_PGBYTES <= (vm_offset_t)etext))
{
- WRITE_PTE(pte, pa_to_pte(va)
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
| INTEL_PTE_VALID | global);
}
else
{
- WRITE_PTE(pte, pa_to_pte(va)
+ WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
| INTEL_PTE_VALID | INTEL_PTE_WRITE | global);
}
va += INTEL_PGBYTES;
@@ -1481,7 +1481,7 @@ Retry:
*/
PMAP_READ_UNLOCK(pmap, spl);
- ptp = pmap_page_table_page_alloc();
+ ptp = phystokv(pmap_page_table_page_alloc());
/*
* Re-lock the pmap and check that another thread has
@@ -1496,7 +1496,7 @@ Retry:
* Oops...
*/
PMAP_READ_UNLOCK(pmap, spl);
- pmap_page_table_page_dealloc(ptp);
+ pmap_page_table_page_dealloc(kvtophys(ptp));
PMAP_READ_LOCK(pmap, spl);
continue;
}