summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2012-03-18 17:05:00 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2012-03-18 17:05:00 +0100
commitef34957f80c8b973a064cb677d6d29a65711ca6b (patch)
treec1555d34be09ceef692c895290d3ecbe47cb0b24 /i386
parentcaaf3ece6d895b326f9b52df207b26d5dc0566f7 (diff)
Fix access above 4GiB in bootstrap page table
* i386/intel/pmap.c (pmap_set_page_readonly_init) [PAE]: Access the bootstrap dirbase with PTEMASK (1 page) instead of PDEMASK (4 pages) through pmap_pde.
Diffstat (limited to 'i386')
-rw-r--r--i386/intel/pmap.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 76055f8..7cb990a 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -826,26 +826,27 @@ void pmap_set_page_readonly(void *_vaddr) {
}
/* This needs to be called instead of pmap_set_page_readonly as long as RC3
- * still points to the bootstrap dirbase. */
+ * still points to the bootstrap dirbase, to also fix the bootstrap table. */
void pmap_set_page_readonly_init(void *_vaddr) {
vm_offset_t vaddr = (vm_offset_t) _vaddr;
#if PAE
pt_entry_t *pdpbase = (void*) boot_info.pt_base;
- vm_offset_t dirbase = ptetokv(pdpbase[lin2pdpnum(vaddr)]);
+ /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
+ pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)]);
#else
- vm_offset_t dirbase = boot_info.pt_base;
+ pt_entry_t *dirbase = (void*) boot_info.pt_base;
#endif
- struct pmap linear_pmap = {
- .dirbase = (void*) dirbase,
- };
+ pt_entry_t *pte = &dirbase[lin2pdenum(vaddr) & PTEMASK];
/* Modify our future kernel map (can't use update_va_mapping for this)... */
- if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID)
+ if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
/* ... and the bootstrap map. */
- if (*pmap_pde(&linear_pmap, vaddr) & INTEL_PTE_VALID)
+ if (*pte & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
+ }
}
void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {