summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2010-04-08 00:11:26 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2010-04-08 00:24:50 +0200
commit690d2c6a6fd7512dce0761f21a1d18fb49ca1fcb (patch)
treea63bdc1189dca0f3a8b21700305ff4ce8bbb1ae7
parent5a27310a308a7ea56b2ce9020c775c0f64c41b79 (diff)
Extend machine addresses to 64bit for PAE
* i386/i386/pcb.c (switch_ktss): Use uint64_t for descriptor type instead of unsigned long long. * i386/i386/seg.h (fill_descriptor): Use uint64_t for descriptor type instead of unsigned long long. Cast result of kv_to_ma() into vm_offset_t before printing. * i386/i386/xen.h (hyp_mmu_update_pte): Use pt_entry_t type instead of unsigned long long, make pte parameter a pt_entry_t too. (hyp_mmu_update_pte): Compute pte address just using pt_entry_t integers. (hyp_high): New macro to fix 32bit right shifting. (hyp_do_update_descriptor): Use pt_entry_t type for machine address instead of unsigned long. Split it in 32bit parts. Use uint64_t for descriptor type instead of unsigned long long. (update_va_mapping): Use pt_entry_t type instead of unsigned long long. Use hyp_high macro. (hyp_invlpg): Use uint64_t for time type instead of unsigned long long. * i386/intel/pmap.c (pmap_bootstrap): Use ptetokv instead of pte_to_pa + phystokv. Cast machine addresses into vm_offset_t before printing. (pmap_set_page_readwrite): Likewise. (pmap_set_page_readonly): Likewise (pmap_set_page_readonly_init): Likewise (pmap_clear_bootstrap_pagetable): Likewise (pmap_destroy): Likewise (pmap_map_mfn): Use pt_entry_t type for machine addresses. Cast machine addresses into vm_offset_t before printing. * i386/intel/pmap.h [PAE] (INTEL_PTE_PFN): Set macro to 0xfffffffffffff000ULL. * include/mach/xen.h [PAE] (pa_to_ma, ma_to_pa): Use pt_entry_t type for machine addresses. [!PAE] (pa_to_ma): Remove cast to vm_offset_t. (mfn_to_kv): Use mfn_to_pfn + ptoa instead of ptoa + ma_to_pa. * xen/xen.c (hyp_invalidate_pte): Cast machine addresses into vm_offset_t before printing.
-rw-r--r--i386/i386/pcb.c2
-rw-r--r--i386/i386/seg.h4
-rw-r--r--i386/i386/xen.h24
-rw-r--r--i386/intel/pmap.c39
-rw-r--r--i386/intel/pmap.h4
-rw-r--r--include/mach/xen.h9
-rw-r--r--xen/xen.c2
7 files changed, 50 insertions, 34 deletions
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
index 8be71ae..f687db1 100644
--- a/i386/i386/pcb.c
+++ b/i386/i386/pcb.c
@@ -199,7 +199,7 @@ void switch_ktss(pcb)
if (memcmp(gdt_desc_p (mycpu, USER_GDT + (i << 3)),
&pcb->ims.user_gdt[i], sizeof pcb->ims.user_gdt[i])) {
if (hyp_do_update_descriptor(kv_to_ma(gdt_desc_p (mycpu, USER_GDT + (i << 3))),
- *(unsigned long long *) &pcb->ims.user_gdt[i]))
+ *(uint64_t *) &pcb->ims.user_gdt[i]))
panic("couldn't set user gdt %d\n",i);
}
}
diff --git a/i386/i386/seg.h b/i386/i386/seg.h
index 01b1a2e..7f12cc0 100644
--- a/i386/i386/seg.h
+++ b/i386/i386/seg.h
@@ -180,8 +180,8 @@ fill_descriptor(struct real_descriptor *_desc, unsigned base, unsigned limit,
desc->granularity = sizebits;
desc->base_high = base >> 24;
#ifdef MACH_XEN
- if (hyp_do_update_descriptor(kv_to_ma(_desc), *(unsigned long long*)desc))
- panic("couldn't update descriptor(%p to %08lx%08lx)\n", kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
+ if (hyp_do_update_descriptor(kv_to_ma(_desc), *(uint64_t*)desc))
+ panic("couldn't update descriptor(%p to %08lx%08lx)\n", (vm_offset_t) kv_to_ma(_desc), *(((unsigned long*)desc)+1), *(unsigned long *)desc);
#endif /* MACH_XEN */
}
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
index a7fb641..1377a14 100644
--- a/i386/i386/xen.h
+++ b/i386/i386/xen.h
@@ -152,7 +152,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type
_hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps);
_hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid)
-MACH_INLINE int hyp_mmu_update_pte(unsigned long pte, unsigned long long val)
+MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
{
struct mmu_update update =
{
@@ -167,8 +167,8 @@ MACH_INLINE int hyp_mmu_update_pte(unsigned long pte, unsigned long long val)
#define HYP_BATCH_MMU_UPDATES 256
#define hyp_mmu_update_la(la, val) hyp_mmu_update_pte( \
- (unsigned long)(((pt_entry_t*)(kernel_pmap->dirbase[lin2pdenum((unsigned long)la)] & INTEL_PTE_PFN)) \
- + ptenum((unsigned long)la)), val)
+ (kernel_pmap->dirbase[lin2pdenum((vm_offset_t)(la))] & INTEL_PTE_PFN) \
+ + ptenum((vm_offset_t)(la)) * sizeof(pt_entry_t), val)
_hypcall2(long, set_gdt, vm_offset_t /* unsigned long * */, frame_list, unsigned int, entries)
@@ -178,10 +178,16 @@ _hypcall4(long, set_callbacks, unsigned long, es, void *, ea,
unsigned long, fss, void *, fsa);
_hypcall1(long, fpu_taskswitch, int, set);
+#ifdef PAE
+#define hyp_high(pte) ((pte) >> 32)
+#else
+#define hyp_high(pte) 0
+#endif
_hypcall4(long, update_descriptor, unsigned long, ma_lo, unsigned long, ma_hi, unsigned long, desc_lo, unsigned long, desc_hi);
#define hyp_do_update_descriptor(ma, desc) ({ \
- unsigned long long __desc = (desc); \
- hyp_update_descriptor(ma, 0, __desc, __desc >> 32); \
+ pt_entry_t __ma = (ma); \
+ uint64_t __desc = (desc); \
+ hyp_update_descriptor(__ma & 0xffffffffU, hyp_high(__ma), __desc & 0xffffffffU, __desc >> 32); \
})
#include <xen/public/memory.h>
@@ -200,8 +206,8 @@ MACH_INLINE void hyp_free_mfn(unsigned long mfn)
_hypcall4(int, update_va_mapping, unsigned long, va, unsigned long, val_lo, unsigned long, val_hi, unsigned long, flags);
#define hyp_do_update_va_mapping(va, val, flags) ({ \
- unsigned long long __val = (val); \
- hyp_update_va_mapping(va, __val & 0xffffffffU, __val >> 32, flags); \
+ pt_entry_t __val = (val); \
+ hyp_update_va_mapping(va, __val & 0xffffffffU, hyp_high(__val), flags); \
})
MACH_INLINE void hyp_free_page(unsigned long pfn, void *va)
@@ -271,8 +277,8 @@ MACH_INLINE void hyp_invlpg(vm_offset_t lin) {
_hypcall2(long, set_timer_op, unsigned long, absolute_lo, unsigned long, absolute_hi);
#define hyp_do_set_timer_op(absolute_nsec) ({ \
- unsigned long long __absolute = (absolute_nsec); \
- hyp_set_timer_op(__absolute, __absolute >> 32); \
+ uint64_t __absolute = (absolute_nsec); \
+ hyp_set_timer_op(__absolute & 0xffffffffU, __absolute >> 32); \
})
#include <xen/public/event_channel.h>
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 45b956c..aab088c 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -672,7 +672,7 @@ void pmap_bootstrap()
int i;
int n_l1map;
#ifdef PAE
- pt_entry_t *l2_map = (pt_entry_t*) phystokv(pte_to_pa(base[0]));
+ pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[0]);
#else /* PAE */
pt_entry_t *l2_map = base;
#endif /* PAE */
@@ -686,7 +686,7 @@ void pmap_bootstrap()
l1_map[n_l1map][j] = intel_ptob(pfn_to_mfn((i - lin2pdenum(VM_MIN_KERNEL_ADDRESS)) * NPTES + j)) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (l1_map[n_l1map])))
- panic("couldn't pin page %p(%p)", l1_map[n_l1map], kv_to_ma (l1_map[n_l1map]));
+ panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map]));
update.ptr = kv_to_ma(&l2_map[i]);
update.val = kv_to_ma(l1_map[n_l1map]) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
hyp_mmu_update(kv_to_la(&update), 1, kv_to_la(&n), DOMID_SELF);
@@ -784,7 +784,7 @@ void pmap_bootstrap()
#ifdef MACH_XEN
pmap_set_page_readonly_init(ptable);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
- panic("couldn't pin page %p(%p)\n", ptable, kv_to_ma (ptable));
+ panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
#endif /* MACH_XEN */
}
}
@@ -802,10 +802,10 @@ void pmap_set_page_readwrite(void *_vaddr) {
vm_offset_t paddr = kvtophys(vaddr);
vm_offset_t canon_vaddr = phystokv(paddr);
if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
- panic("couldn't set hiMMU readwrite for addr %p(%p)\n", vaddr, pa_to_ma (paddr));
+ panic("couldn't set hiMMU readwrite for addr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
if (canon_vaddr != vaddr)
if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE))
- panic("couldn't set hiMMU readwrite for paddr %p(%p)\n", canon_vaddr, pa_to_ma (paddr));
+ panic("couldn't set hiMMU readwrite for paddr %p(%p)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr));
}
/* Set a page read only (so as to pin it for instance) */
@@ -815,12 +815,12 @@ void pmap_set_page_readonly(void *_vaddr) {
vm_offset_t canon_vaddr = phystokv(paddr);
if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, pa_to_ma (paddr));
+ panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr));
}
if (canon_vaddr != vaddr &&
*pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID) {
if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set hiMMU readonly for vaddr %p canon_vaddr %p paddr %p (%p)\n", vaddr, canon_vaddr, paddr, pa_to_ma (paddr));
+ panic("couldn't set hiMMU readonly for vaddr %p canon_vaddr %p paddr %p (%p)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr));
}
}
@@ -840,11 +840,11 @@ void pmap_set_page_readonly_init(void *_vaddr) {
/* Modify our future kernel map (can't use update_va_mapping for this)... */
if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID)
if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID))
- panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, kv_to_ma (vaddr));
+ panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
/* ... and the bootstrap map. */
if (*pmap_pde(&linear_pmap, vaddr) & INTEL_PTE_VALID)
if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE))
- panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, kv_to_ma (vaddr));
+ panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr));
}
void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
@@ -855,13 +855,13 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
int j;
#endif /* PAE */
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(base)))
- panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%p)\n", base, kv_to_ma(base));
+ panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%p)\n", base, (vm_offset_t) kv_to_ma(base));
#if PAE
for (j = 0; j < PDPNUM; j++)
{
pt_entry_t pdpe = base[j];
if (pdpe & INTEL_PTE_VALID) {
- dir = (pt_entry_t *) phystokv(pte_to_pa(pdpe));
+ dir = (pt_entry_t *) ptetokv(pdpe);
#else /* PAE */
dir = base;
#endif /* PAE */
@@ -1046,21 +1046,22 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
vm_offset_t addr = (vm_offset_t) _addr;
pt_entry_t *pte, *pdp;
vm_offset_t ptp;
+ pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT;
if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL) {
ptp = phystokv(pmap_page_table_page_alloc());
pmap_set_page_readonly((void*) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, pa_to_mfn(ptp)))
- panic("couldn't pin page %p(%p)\n",ptp,kv_to_ma(ptp));
+ panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
pdp = pmap_pde(kernel_pmap, addr);
if (!hyp_mmu_update_pte(kv_to_ma(pdp),
pa_to_pte(kv_to_ma(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
- panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),kv_to_ma(pdp), ptp, pa_to_ma(ptp));
+ panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__,__LINE__,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp), ptp, (vm_offset_t) pa_to_ma(ptp));
pte = pmap_pte(kernel_pmap, addr);
}
- if (!hyp_mmu_update_pte(kv_to_ma(pte), ptoa(mfn) | INTEL_PTE_VALID | INTEL_PTE_WRITE))
- panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,kv_to_ma(pte), ptoa(mfn), pa_to_ma(ptoa(mfn)));
+ if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
+ panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
}
#endif /* MACH_XEN */
@@ -1207,7 +1208,7 @@ void pmap_destroy(p)
vm_page_lock_queues();
#ifdef MACH_XEN
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
- panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, kv_to_ma(pa));
+ panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa));
pmap_set_page_readwrite((void*) phystokv(pa));
#endif /* MACH_XEN */
vm_page_free(m);
@@ -1844,12 +1845,12 @@ Retry:
#ifdef MACH_XEN
pmap_set_page_readonly((void *) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
- panic("couldn't pin page %p(%p)\n",ptp,kv_to_ma(ptp));
+ panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp)),
pa_to_pte(pa_to_ma(kvtophys(ptp))) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
- panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), pa_to_ma(kvtophys(ptp)), pa_to_pte(kv_to_ma(ptp)));
+ panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
#else /* MACH_XEN */
*pdp = pa_to_pte(ptp) | INTEL_PTE_VALID
| INTEL_PTE_USER
@@ -2206,7 +2207,7 @@ void pmap_collect(p)
if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
panic("%s:%d could not clear pde %p\n",__FILE__,__LINE__,pdep-1);
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
- panic("couldn't unpin page %p(%p)\n", ptable, pa_to_ma(kvtophys((vm_offset_t)ptable)));
+ panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
pmap_set_page_readwrite(ptable);
#else /* MACH_XEN */
*pdep++ = 0;
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 6046ded..7ba7d2c 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -133,7 +133,11 @@ typedef unsigned int pt_entry_t;
#define INTEL_PTE_GLOBAL 0x00000100
#endif /* MACH_XEN */
#define INTEL_PTE_WIRED 0x00000200
+#ifdef PAE
+#define INTEL_PTE_PFN 0xfffffffffffff000ULL
+#else
#define INTEL_PTE_PFN 0xfffff000
+#endif
#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
#ifdef MACH_PSEUDO_PHYS
diff --git a/include/mach/xen.h b/include/mach/xen.h
index f954701..f1d9e41 100644
--- a/include/mach/xen.h
+++ b/include/mach/xen.h
@@ -67,12 +67,17 @@ extern unsigned long *mfn_list;
#endif /* MACH_PSEUDO_PHYS */
#define pa_to_mfn(a) (pfn_to_mfn(atop(a)))
+#ifdef PAE
+#define pa_to_ma(a) ({ vm_offset_t __a = (vm_offset_t) (a); (((pt_entry_t) pa_to_mfn(__a)) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#define ma_to_pa(a) ({ pt_entry_t __a = (pt_entry_t) (a); (mfn_to_pfn(__a >> PAGE_SHIFT) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#else
#define pa_to_ma(a) ({ vm_offset_t __a = (vm_offset_t) (a); ptoa(pa_to_mfn(__a)) | (__a & PAGE_MASK); })
-#define ma_to_pa(a) ({ vm_offset_t __a = (vm_offset_t) (a); (mfn_to_pfn(atop((vm_offset_t)(__a))) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#define ma_to_pa(a) ({ vm_offset_t __a = (vm_offset_t) (a); (mfn_to_pfn(atop((__a))) << PAGE_SHIFT) | (__a & PAGE_MASK); })
+#endif
#define kv_to_mfn(a) pa_to_mfn(_kvtophys(a))
#define kv_to_ma(a) pa_to_ma(_kvtophys(a))
-#define mfn_to_kv(mfn) (phystokv(ma_to_pa(ptoa(mfn))))
+#define mfn_to_kv(mfn) phystokv(ptoa(mfn_to_pfn(mfn)))
#include <machine/xen.h>
diff --git a/xen/xen.c b/xen/xen.c
index b3acef4..062ee4d 100644
--- a/xen/xen.c
+++ b/xen/xen.c
@@ -33,7 +33,7 @@
void hyp_invalidate_pte(pt_entry_t *pte)
{
if (!hyp_mmu_update_pte(kv_to_ma(pte), (*pte) & ~INTEL_PTE_VALID))
- panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,kv_to_ma(pte),*pte,pa_to_ma(*pte));
+ panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte),*pte,ma_to_pa(*pte));
hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL);
}