summaryrefslogtreecommitdiff
path: root/i386/intel
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2012-07-14 14:54:04 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2012-07-14 14:54:04 +0100
commitd59b5731eaa93348485e2ae77e9b5d90fb1da36b (patch)
treeff007e873d8a35e06736cb71f30eae0154d0c693 /i386/intel
parent8d219eab0dcfbdcf464340630d568c4e16d7acbd (diff)
Fix Xen boot at linear address 0xC0000000
* i386/intel/pmap.c (pmap_bootstrap): Interate over linear addresses and compute l3 and l2 offsets from it instead of assuming nul l3 offset.
Diffstat (limited to 'i386/intel')
-rw-r--r--i386/intel/pmap.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index f0623de..e211bf3 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -667,25 +667,27 @@ void pmap_bootstrap()
pt_entry_t *l1_map[NSUP_L1];
{
pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
- int i;
+ vm_offset_t la;
int n_l1map;
+ for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS; la >= VM_MIN_KERNEL_ADDRESS; la += NPTES * PAGE_SIZE) {
#ifdef PAE
- pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[0]);
+ pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)]);
#else /* PAE */
- pt_entry_t *l2_map = base;
+ pt_entry_t *l2_map = base;
#endif /* PAE */
- for (n_l1map = 0, i = lin2pdenum(VM_MIN_KERNEL_ADDRESS); i < NPTES; i++) {
- if (!(l2_map[i] & INTEL_PTE_VALID)) {
+ /* Like lin2pdenum, but works with non-contiguous boot L3 */
+ l2_map += (la >> PDESHIFT) & PDEMASK;
+ if (!(*l2_map & INTEL_PTE_VALID)) {
struct mmu_update update;
int j, n;
l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
for (j = 0; j < NPTES; j++)
- l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn((i - lin2pdenum(VM_MIN_KERNEL_ADDRESS)) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
+ l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
- update.ptr = kv_to_ma(&l2_map[i]);
+ update.ptr = kv_to_ma(l2_map);
update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
if (n != 1)