summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2013-01-28 02:31:44 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2013-01-28 02:36:44 +0100
commit6b010591cb94032a6fef2cb81bed16446f31f8b6 (patch)
treed0137bddf0a7a1930b37b86f47396cd63d86705f /i386
parent8bb723b45c225eebc3c92b8d4632eaee50b0e496 (diff)
Add initial code for disabling PV pagetables
* xen/configfrag.ac (--disable-pv-pagetables): Add option. * i386/xen/xen_boothdr.S (XEN_ELFNOTE_FEATURES) [!MACH_PV_PAGETABLES]: Add writable_page_tables. * i386/i386/gdt.c: Turn appropriate MACH_XEN/MACH_HYP tests into MACH_PV_PAGETABLES tests. * i386/i386/i386asm.sym: Likewise * i386/i386/ldt.c: Likewise * i386/i386/locore.S: Likewise * i386/i386/proc_reg.h: Likewise * i386/i386/user_ldt.c: Likewise * i386/i386/vm_param.h: Likewise * i386/i386/xen.h: Likewise * i386/i386at/model_dep.c: Likewise * i386/intel/pmap.h: Likewise * include/mach/xen.h: Likewise * xen/console.c: Likewise * xen/store.c: Likewise * i386/intel/pmap.c: Likewise. Define pmap_map_mfn as TODO stub.
Diffstat (limited to 'i386')
-rw-r--r--i386/i386/gdt.c4
-rw-r--r--i386/i386/i386asm.sym2
-rw-r--r--i386/i386/ldt.c2
-rw-r--r--i386/i386/locore.S6
-rw-r--r--i386/i386/proc_reg.h10
-rw-r--r--i386/i386/user_ldt.c4
-rw-r--r--i386/i386/vm_param.h6
-rw-r--r--i386/i386/xen.h6
-rw-r--r--i386/i386at/model_dep.c8
-rw-r--r--i386/intel/pmap.c158
-rw-r--r--i386/intel/pmap.h10
-rw-r--r--i386/xen/xen_boothdr.S6
12 files changed, 124 insertions, 98 deletions
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
index b686faa..c3a9a7c 100644
--- a/i386/i386/gdt.c
+++ b/i386/i386/gdt.c
@@ -101,7 +101,7 @@ gdt_init()
"movw %w1,%%es\n"
"movw %w1,%%ss\n"
: : "i" (KERNEL_CS), "r" (KERNEL_DS), "r" (0));
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
#if VM_MIN_KERNEL_ADDRESS != LINEAR_MIN_KERNEL_ADDRESS
/* things now get shifted */
#ifdef MACH_PSEUDO_PHYS
@@ -109,6 +109,6 @@ gdt_init()
#endif /* MACH_PSEUDO_PHYS */
la_shift += LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
#endif
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
diff --git a/i386/i386/i386asm.sym b/i386/i386/i386asm.sym
index 96f5761..dd1a2ed 100644
--- a/i386/i386/i386asm.sym
+++ b/i386/i386/i386asm.sym
@@ -151,5 +151,7 @@ offset shared_info si vcpu_info[0].evtchn_upcall_pending CPU_PENDING
offset shared_info si vcpu_info[0].evtchn_pending_sel CPU_PENDING_SEL
offset shared_info si evtchn_pending PENDING
offset shared_info si evtchn_mask EVTMASK
+#ifdef MACH_PV_PAGETABLES
offset shared_info si vcpu_info[0].arch.cr2 CR2
+#endif /* MACH_PV_PAGETABLES */
#endif /* MACH_XEN */
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
index 58af94b..43b9efb 100644
--- a/i386/i386/ldt.c
+++ b/i386/i386/ldt.c
@@ -49,7 +49,9 @@ void
ldt_init()
{
#ifdef MACH_PV_DESCRIPTORS
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(ldt);
+#endif /* MACH_PV_PAGETABLES */
#else /* MACH_PV_DESCRIPTORS */
/* Initialize the master LDT descriptor in the GDT. */
fill_gdt_descriptor(KERNEL_LDT,
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index 0e1462c..d24e172 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -432,11 +432,11 @@ ENTRY(t_debug)
ENTRY(t_page_fault)
pushl $(T_PAGE_FAULT) /* mark a page fault trap */
pusha /* save the general registers */
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
movl %ss:hyp_shared_info+CR2,%eax
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
movl %cr2,%eax /* get the faulting address */
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
movl %eax,R_CR2-R_EDI(%esp) /* save in esp save slot */
jmp trap_push_segs /* continue fault */
diff --git a/i386/i386/proc_reg.h b/i386/i386/proc_reg.h
index ca49339..c02b254 100644
--- a/i386/i386/proc_reg.h
+++ b/i386/i386/proc_reg.h
@@ -124,7 +124,7 @@ set_eflags(unsigned long eflags)
_temp__; \
})
-#ifdef MACH_HYP
+#ifdef MACH_PV_PAGETABLES
extern unsigned long cr3;
#define get_cr3() (cr3)
#define set_cr3(value) \
@@ -133,7 +133,7 @@ extern unsigned long cr3;
if (!hyp_set_cr3(value)) \
panic("set_cr3"); \
})
-#else /* MACH_HYP */
+#else /* MACH_PV_PAGETABLES */
#define get_cr3() \
({ \
register unsigned long _temp__; \
@@ -146,11 +146,11 @@ extern unsigned long cr3;
register unsigned long _temp__ = (value); \
asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"); \
})
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#define flush_tlb() set_cr3(get_cr3())
-#ifndef MACH_HYP
+#ifndef MACH_PV_PAGETABLES
#define invlpg(addr) \
({ \
asm volatile("invlpg (%0)" : : "r" (addr)); \
@@ -178,7 +178,7 @@ extern unsigned long cr3;
: "+r" (var) : "r" (end), \
"q" (LINEAR_DS), "q" (KERNEL_DS), "i" (PAGE_SIZE)); \
})
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#define get_cr4() \
({ \
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
index 70ef7cb..74c10a4 100644
--- a/i386/i386/user_ldt.c
+++ b/i386/i386/user_ldt.c
@@ -275,8 +275,10 @@ i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
#ifdef MACH_PV_DESCRIPTORS
{
int i;
+#ifdef MACH_PV_PAGETABLES
for (i=0; i<(new_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
pmap_set_page_readwrite(&new_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES*/
kfree(new_ldt->alloc, new_ldt->desc.limit_low + 1
+ PAGE_SIZE + offsetof(struct user_ldt, ldt));
}
@@ -419,8 +421,10 @@ user_ldt_free(user_ldt)
{
#ifdef MACH_PV_DESCRIPTORS
int i;
+#ifdef MACH_PV_PAGETABLES
for (i=0; i<(user_ldt->desc.limit_low + 1)/sizeof(struct real_descriptor); i+=PAGE_SIZE/sizeof(struct real_descriptor))
pmap_set_page_readwrite(&user_ldt->ldt[i]);
+#endif /* MACH_PV_PAGETABLES */
kfree(user_ldt->alloc, user_ldt->desc.limit_low + 1
+ PAGE_SIZE + offsetof(struct user_ldt, ldt));
#else /* MACH_PV_DESCRIPTORS */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index ada3563..df857f8 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -66,14 +66,14 @@
#define LINEAR_MIN_KERNEL_ADDRESS (VM_MAX_ADDRESS)
#define LINEAR_MAX_KERNEL_ADDRESS (0xffffffffUL)
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* need room for mmu updates (2*8bytes) */
#define KERNEL_STACK_SIZE (4*I386_PGBYTES)
#define INTSTACK_SIZE (4*I386_PGBYTES)
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
#define KERNEL_STACK_SIZE (1*I386_PGBYTES)
#define INTSTACK_SIZE (1*I386_PGBYTES)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/* interrupt stack size */
/*
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
index ba47eb4..5bdaf0b 100644
--- a/i386/i386/xen.h
+++ b/i386/i386/xen.h
@@ -152,6 +152,7 @@ MACH_INLINE type hyp_##name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type
_hypcall1(long, set_trap_table, vm_offset_t /* struct trap_info * */, traps);
+#ifdef MACH_PV_PAGETABLES
_hypcall4(int, mmu_update, vm_offset_t /* struct mmu_update * */, req, int, count, vm_offset_t /* int * */, success_count, domid_t, domid)
MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
{
@@ -170,6 +171,7 @@ MACH_INLINE int hyp_mmu_update_pte(pt_entry_t pte, pt_entry_t val)
#define hyp_mmu_update_la(la, val) hyp_mmu_update_pte( \
(kernel_pmap->dirbase[lin2pdenum((vm_offset_t)(la))] & INTEL_PTE_PFN) \
+ ptenum((vm_offset_t)(la)) * sizeof(pt_entry_t), val)
+#endif
_hypcall2(long, set_gdt, vm_offset_t /* unsigned long * */, frame_list, unsigned int, entries)
@@ -216,6 +218,7 @@ MACH_INLINE void hyp_free_page(unsigned long pfn, void *va)
/* save mfn */
unsigned long mfn = pfn_to_mfn(pfn);
+#ifdef MACH_PV_PAGETABLES
/* remove from mappings */
if (hyp_do_update_va_mapping(kvtolin(va), 0, UVMF_INVLPG|UVMF_ALL))
panic("couldn't clear page %d at %p\n", pfn, va);
@@ -224,11 +227,13 @@ MACH_INLINE void hyp_free_page(unsigned long pfn, void *va)
/* drop machine page */
mfn_list[pfn] = ~0;
#endif /* MACH_PSEUDO_PHYS */
+#endif
/* and free from Xen */
hyp_free_mfn(mfn);
}
+#ifdef MACH_PV_PAGETABLES
_hypcall4(int, mmuext_op, vm_offset_t /* struct mmuext_op * */, op, int, count, vm_offset_t /* int * */, success_count, domid_t, domid);
MACH_INLINE int hyp_mmuext_op_void(unsigned int cmd)
{
@@ -274,6 +279,7 @@ MACH_INLINE void hyp_invlpg(vm_offset_t lin) {
if (n < 1)
panic("couldn't invlpg\n");
}
+#endif
_hypcall2(long, set_timer_op, unsigned long, absolute_lo, unsigned long, absolute_hi);
#define hyp_do_set_timer_op(absolute_nsec) ({ \
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 3761093..3db03d7 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -402,13 +402,13 @@ i386at_init(void)
#endif
#endif
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
for (i = 0; i < PDPNUM; i++)
pmap_set_page_readonly_init((void*) kernel_page_dir + i * INTEL_PGBYTES);
#if PAE
pmap_set_page_readonly_init(kernel_pmap->pdpbase);
#endif /* PAE */
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
set_cr3((unsigned)_kvtophys(kernel_pmap->pdpbase));
#ifndef MACH_HYP
@@ -430,9 +430,9 @@ i386at_init(void)
set_cr4(get_cr4() | CR4_PGE);
#endif /* MACH_HYP */
flush_instr_queue();
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/* Interrupt stacks are allocated in physical memory,
while kernel stacks are allocated in kernel virtual memory,
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index e211bf3..0be1609 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -327,7 +327,7 @@ lock_data_t pmap_system_lock;
#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
-#ifdef MACH_HYP
+#ifdef MACH_PV_PAGETABLES
#if 1
#define INVALIDATE_TLB(pmap, s, e) hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL)
#else
@@ -339,7 +339,7 @@ lock_data_t pmap_system_lock;
hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL); \
} while(0)
#endif
-#else /* MACH_HYP */
+#else /* MACH_PV_PAGETABLES */
#if 0
/* It is hard to know when a TLB flush becomes less expensive than a bunch of
* invlpgs. But it surely is more expensive than just one invlpg. */
@@ -358,7 +358,7 @@ lock_data_t pmap_system_lock;
flush_tlb(); \
}
#endif
-#endif /* MACH_HYP */
+#endif /* MACH_PV_PAGETABLES */
#if NCPUS > 1
@@ -528,10 +528,10 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
register pt_entry_t template;
register pt_entry_t *pte;
int spl;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, i = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
template = pa_to_pte(start)
| INTEL_PTE_NCACHE|INTEL_PTE_WTHRU
@@ -546,7 +546,7 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
pte = pmap_pte(kernel_pmap, virt);
if (pte == PT_ENTRY_NULL)
panic("pmap_map_bd: Invalid kernel address\n");
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[i].ptr = kv_to_ma(pte);
update[i].val = pa_to_ma(template);
i++;
@@ -556,20 +556,20 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
panic("couldn't pmap_map_bd\n");
i = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte_increment_pa(template);
virt += PAGE_SIZE;
start += PAGE_SIZE;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (i > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_map_bd");
hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
if (n != i)
panic("couldn't pmap_map_bd\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
PMAP_READ_UNLOCK(pmap, spl);
return(virt);
}
@@ -649,7 +649,7 @@ void pmap_bootstrap()
kernel_pmap->dirbase[i] = 0;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* We don't actually deal with the CR3 register content at all */
hyp_vm_assist(VMASST_CMD_enable, VMASST_TYPE_pae_extended_cr3);
/*
@@ -698,7 +698,7 @@ void pmap_bootstrap()
}
}
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/*
* Allocate and set up the kernel page tables.
@@ -736,24 +736,24 @@ void pmap_bootstrap()
WRITE_PTE(pte, 0);
}
else
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (va == (vm_offset_t) &hyp_shared_info)
{
*pte = boot_info.shared_info | INTEL_PTE_VALID | INTEL_PTE_WRITE;
va += INTEL_PGBYTES;
}
else
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
{
extern char _start[], etext[];
if (((va >= (vm_offset_t) _start)
&& (va + INTEL_PGBYTES <= (vm_offset_t)etext))
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
|| (va >= (vm_offset_t) boot_info.pt_base
&& (va + INTEL_PGBYTES <=
(vm_offset_t) ptable + INTEL_PGBYTES))
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
)
{
WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
@@ -761,7 +761,7 @@ void pmap_bootstrap()
}
else
{
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* Keep supplementary L1 pages read-only */
int i;
for (i = 0; i < NSUP_L1; i++)
@@ -771,7 +771,7 @@ void pmap_bootstrap()
break;
}
if (i == NSUP_L1)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, pa_to_pte(_kvtophys(va))
| INTEL_PTE_VALID | INTEL_PTE_WRITE | global)
@@ -784,11 +784,11 @@ void pmap_bootstrap()
WRITE_PTE(pte, 0);
va += INTEL_PGBYTES;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly_init(ptable);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn (ptable)))
panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
}
@@ -796,7 +796,7 @@ void pmap_bootstrap()
soon after we return from here. */
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* These are only required because of Xen security policies */
/* Set back a page read write */
@@ -889,7 +889,7 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
#endif /* PAE */
hyp_free_page(atop(_kvtophys(base)), base);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
void pmap_virtual_space(startp, endp)
vm_offset_t *startp;
@@ -1046,6 +1046,7 @@ pmap_page_table_page_alloc()
}
#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
void pmap_map_mfn(void *_addr, unsigned long mfn) {
vm_offset_t addr = (vm_offset_t) _addr;
pt_entry_t *pte, *pdp;
@@ -1067,6 +1068,11 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
if (!hyp_mmu_update_pte(kv_to_ma(pte), ma | INTEL_PTE_VALID | INTEL_PTE_WRITE))
panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__,__LINE__,pte,(vm_offset_t) kv_to_ma(pte), ma, ma_to_pa(ma));
}
+#else /* MACH_PV_PAGETABLES */
+void pmap_map_mfn(void *_addr, unsigned long mfn) {
+ panic("TODO %s:%d\n",__FILE__,__LINE__);
+}
+#endif /* MACH_PV_PAGETABLES */
#endif /* MACH_XEN */
/*
@@ -1136,13 +1142,13 @@ pmap_t pmap_create(size)
p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)] = 0;
#endif
#endif
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
{
int i;
for (i = 0; i < PDPNUM; i++)
pmap_set_page_readonly((void*) p->dirbase + i * INTEL_PGBYTES);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
if (kmem_alloc_wired(kernel_map,
@@ -1154,9 +1160,9 @@ pmap_t pmap_create(size)
for (i = 0; i < PDPNUM; i++)
WRITE_PTE(&p->pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID);
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly(p->pdpbase);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#endif /* PAE */
p->ref_count = 1;
@@ -1216,29 +1222,29 @@ void pmap_destroy(p)
if (m == VM_PAGE_NULL)
panic("pmap_destroy: pte page not in object");
vm_page_lock_queues();
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, pa_to_mfn(pa)))
panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa));
pmap_set_page_readwrite((void*) phystokv(pa));
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
vm_page_free(m);
inuse_ptepages_count--;
vm_page_unlock_queues();
vm_object_unlock(pmap_object);
}
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
{
int i;
for (i = 0; i < PDPNUM; i++)
pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES);
}
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES);
#if PAE
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(p->pdpbase);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES);
#endif /* PAE */
kmem_cache_free(&pmap_cache, (vm_offset_t) p);
@@ -1284,10 +1290,10 @@ void pmap_remove_range(pmap, va, spte, epte)
int num_removed, num_unwired;
int pai;
vm_offset_t pa;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, ii = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if DEBUG_PTE_PAGE
if (pmap != kernel_pmap)
@@ -1316,7 +1322,7 @@ void pmap_remove_range(pmap, va, spte, epte)
register int i = ptes_per_vm_page;
register pt_entry_t *lpte = cpte;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[ii].ptr = kv_to_ma(lpte);
update[ii].val = 0;
ii++;
@@ -1326,9 +1332,9 @@ void pmap_remove_range(pmap, va, spte, epte)
panic("couldn't pmap_remove_range\n");
ii = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*lpte = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
lpte++;
} while (--i > 0);
continue;
@@ -1349,7 +1355,7 @@ void pmap_remove_range(pmap, va, spte, epte)
do {
pmap_phys_attributes[pai] |=
*lpte & (PHYS_MODIFIED|PHYS_REFERENCED);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[ii].ptr = kv_to_ma(lpte);
update[ii].val = 0;
ii++;
@@ -1359,9 +1365,9 @@ void pmap_remove_range(pmap, va, spte, epte)
panic("couldn't pmap_remove_range\n");
ii = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*lpte = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
lpte++;
} while (--i > 0);
}
@@ -1407,13 +1413,13 @@ void pmap_remove_range(pmap, va, spte, epte)
}
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (ii > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_remove_range");
hyp_mmu_update(kvtolin(&update), ii, kvtolin(&n), DOMID_SELF);
if (n != ii)
panic("couldn't pmap_remove_range\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
/*
* Update the counts
@@ -1559,12 +1565,12 @@ void pmap_page_protect(phys, prot)
do {
pmap_phys_attributes[pai] |=
*pte & (PHYS_MODIFIED|PHYS_REFERENCED);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte++), 0))
panic("%s:%d could not clear pte %p\n",__FILE__,__LINE__,pte-1);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte++ = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
@@ -1594,12 +1600,12 @@ void pmap_page_protect(phys, prot)
register int i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WRITE))
panic("%s:%d could not disable write on pte %p\n",__FILE__,__LINE__,pte);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte &= ~INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
} while (--i > 0);
@@ -1688,14 +1694,14 @@ void pmap_protect(map, s, e, prot)
spte = &spte[ptenum(s)];
epte = &spte[intel_btop(l-s)];
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
int n, i = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
while (spte < epte) {
if (*spte & INTEL_PTE_VALID) {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
update[i].ptr = kv_to_ma(spte);
update[i].val = *spte & ~INTEL_PTE_WRITE;
i++;
@@ -1705,19 +1711,19 @@ void pmap_protect(map, s, e, prot)
panic("couldn't pmap_protect\n");
i = 0;
}
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*spte &= ~INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
spte++;
}
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (i > HYP_BATCH_MMU_UPDATES)
panic("overflowed array in pmap_protect");
hyp_mmu_update(kvtolin(&update), i, kvtolin(&n), DOMID_SELF);
if (n != i)
panic("couldn't pmap_protect\n");
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
s = l;
pde++;
@@ -1854,7 +1860,7 @@ Retry:
/*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
pdp = pmap_pde(pmap, v);
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
pmap_set_page_readonly((void *) ptp);
if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE, kv_to_mfn(ptp)))
panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp));
@@ -1863,11 +1869,11 @@ Retry:
| INTEL_PTE_USER
| INTEL_PTE_WRITE))
panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__,__LINE__, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp)), ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp)), (vm_offset_t) pa_to_pte(kv_to_ma(ptp)));
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pdp = pa_to_pte(kvtophys(ptp)) | INTEL_PTE_VALID
| INTEL_PTE_USER
| INTEL_PTE_WRITE;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pdp++;
ptp += INTEL_PGBYTES;
} while (--i > 0);
@@ -1907,12 +1913,12 @@ Retry:
do {
if (*pte & INTEL_PTE_MOD)
template |= INTEL_PTE_MOD;
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template)))
panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
pte_increment_pa(template);
} while (--i > 0);
@@ -2017,12 +2023,12 @@ Retry:
template |= INTEL_PTE_WIRED;
i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), pa_to_ma(template))))
panic("%s:%d could not set pte %p to %p\n",__FILE__,__LINE__,pte,template);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
WRITE_PTE(pte, template)
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
pte_increment_pa(template);
} while (--i > 0);
@@ -2077,12 +2083,12 @@ void pmap_change_wiring(map, v, wired)
map->stats.wired_count--;
i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~INTEL_PTE_WIRED)))
panic("%s:%d could not wire down pte %p\n",__FILE__,__LINE__,pte);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte &= ~INTEL_PTE_WIRED;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
pte++;
} while (--i > 0);
}
@@ -2213,7 +2219,7 @@ void pmap_collect(p)
register int i = ptes_per_vm_page;
register pt_entry_t *pdep = pdp;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
unsigned long pte = *pdep;
void *ptable = (void*) ptetokv(pte);
if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++)), 0)))
@@ -2221,9 +2227,9 @@ void pmap_collect(p)
if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE, kv_to_mfn(ptable)))
panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable)));
pmap_set_page_readwrite(ptable);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pdep++ = 0;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
@@ -2440,12 +2446,12 @@ phys_attribute_clear(phys, bits)
{
register int i = ptes_per_vm_page;
do {
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__,__LINE__,bits,pte);
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte &= ~bits;
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
} while (--i > 0);
}
PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
@@ -2806,12 +2812,12 @@ pmap_unmap_page_zero ()
if (!pte)
return;
assert (pte);
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
if (!hyp_mmu_update_pte(kv_to_ma(pte), 0))
printf("couldn't unmap page 0\n");
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
*pte = 0;
INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
}
#endif /* __i386__ */
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 2692aae..93293e3 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -132,12 +132,12 @@ typedef unsigned int pt_entry_t;
#define INTEL_PTE_NCACHE 0x00000010
#define INTEL_PTE_REF 0x00000020
#define INTEL_PTE_MOD 0x00000040
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
/* Not supported */
#define INTEL_PTE_GLOBAL 0x00000000
-#else /* MACH_XEN */
+#else /* MACH_PV_PAGETABLES */
#define INTEL_PTE_GLOBAL 0x00000100
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#define INTEL_PTE_WIRED 0x00000200
#ifdef PAE
#define INTEL_PTE_PFN 0x00007ffffffff000ULL
@@ -178,13 +178,13 @@ typedef struct pmap *pmap_t;
#define PMAP_NULL ((pmap_t) 0)
-#ifdef MACH_XEN
+#ifdef MACH_PV_PAGETABLES
extern void pmap_set_page_readwrite(void *addr);
extern void pmap_set_page_readonly(void *addr);
extern void pmap_set_page_readonly_init(void *addr);
extern void pmap_map_mfn(void *addr, unsigned long mfn);
extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
-#endif /* MACH_XEN */
+#endif /* MACH_PV_PAGETABLES */
#if PAE
#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase))
diff --git a/i386/xen/xen_boothdr.S b/i386/xen/xen_boothdr.S
index 617be51..84666a8 100644
--- a/i386/xen/xen_boothdr.S
+++ b/i386/xen/xen_boothdr.S
@@ -36,6 +36,9 @@
#else /* MACH_PSEUDO_PHYS */
.ascii ",FEATURES=!auto_translated_physmap"
#endif
+#ifndef MACH_PV_PAGETABLES
+ .ascii "|writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
#ifndef MACH_PV_DESCRIPTORS
.ascii "|writable_descriptor_tables"
#endif /* MACH_PV_DESCRIPTORS */
@@ -76,6 +79,9 @@
#else /* MACH_PSEUDO_PHYS */
"!auto_translated_physmap"
#endif
+#ifndef MACH_PV_PAGETABLES
+ "|writable_page_tables"
+#endif /* MACH_PV_PAGETABLES */
#ifndef MACH_PV_DESCRIPTORS
"|writable_descriptor_tables"
#endif /* MACH_PV_DESCRIPTORS */