summaryrefslogtreecommitdiff
path: root/i386/intel/pmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'i386/intel/pmap.c')
-rw-r--r--i386/intel/pmap.c45
1 files changed, 23 insertions, 22 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 102309f..5b9ccaf 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -83,6 +83,7 @@
#include <i386/proc_reg.h>
#include <i386/locore.h>
#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
#ifdef MACH_PSEUDO_PHYS
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
@@ -627,19 +628,16 @@ void pmap_bootstrap(void)
/* Note: initial Xen mapping holds at least 512kB free mapped page.
* We use that for directly building our linear mapping. */
#if PAE
- {
- vm_offset_t addr;
- init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr);
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(addr);
- }
- kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page());
+ kernel_pmap->dirbase = kernel_page_dir =
+ (pt_entry_t*) biosmem_bootalloc (PDPNUM);
+ kernel_pmap->pdpbase = (pt_entry_t*) biosmem_bootalloc(1);
{
int i;
for (i = 0; i < PDPNUM; i++)
WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte(_kvtophys((void *) kernel_pmap->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID);
}
#else /* PAE */
- kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page());
+ kernel_pmap->dirbase = kernel_page_dir = biosmem_bootalloc(1);
#endif /* PAE */
{
int i;
@@ -679,7 +677,7 @@ void pmap_bootstrap(void)
struct mmu_update update;
int j, n;
- l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page());
+ l1_map[n_l1map] = (pt_entry_t*) biosmem_bootalloc(1);
for (j = 0; j < NPTES; j++)
l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)) << PAGE_SHIFT) | INTEL_PTE_VALID | INTEL_PTE_WRITE;
pmap_set_page_readonly_init(l1_map[n_l1map]);
@@ -719,7 +717,7 @@ void pmap_bootstrap(void)
for (va = phystokv(phys_first_addr); va >= phystokv(phys_first_addr) && va < kernel_virtual_end; )
{
pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
- pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page());
+ pt_entry_t *ptable = (pt_entry_t*) biosmem_bootalloc(1);
pt_entry_t *pte;
/* Initialize the page directory entry. */
@@ -955,9 +953,8 @@ void pmap_init(void)
s = (vm_size_t) (sizeof(struct pv_entry) * npages
+ pv_lock_table_size(npages)
+ npages);
-
- s = round_page(s);
- if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
+ addr = kalloc(s);
+ if (! addr)
panic("pmap_init");
memset((void *) addr, 0, s);
@@ -1158,6 +1155,7 @@ pmap_page_table_page_dealloc(vm_offset_t pa)
pmap_t pmap_create(vm_size_t size)
{
pmap_t p;
+ struct vm_page *mem;
pmap_statistics_t stats;
/*
@@ -1177,10 +1175,11 @@ pmap_t pmap_create(vm_size_t size)
if (p == PMAP_NULL)
panic("pmap_create");
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->dirbase, PDPNUM * INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ mem = vm_page_alloc_p(iorder2(PDPNUM), VM_PAGE_SEL_DIRECTMAP,
+ VM_PAGE_KERNEL);
+ if (! mem)
+ return PMAP_NULL;
+ p->dirbase = (pt_entry_t *) phystokv(mem->phys_addr);
memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES);
#ifdef LINUX_DEV
@@ -1198,10 +1197,10 @@ pmap_t pmap_create(vm_size_t size)
#endif /* MACH_PV_PAGETABLES */
#if PAE
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES)
- != KERN_SUCCESS)
- panic("pmap_create");
+ mem = vm_page_alloc_p(0, VM_PAGE_SEL_DIRECTMAP, VM_PAGE_KERNEL);
+ if (! mem)
+ return PMAP_NULL;
+ p->pdpbase = (pt_entry_t *) phystokv(mem->phys_addr);
{
int i;
for (i = 0; i < PDPNUM; i++)
@@ -1286,12 +1285,14 @@ void pmap_destroy(pmap_t p)
pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES);
}
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES);
+ m = vm_page_lookup_pa(_kvtophys(p->dirbase));
+ vm_page_free_p(m, PDPNUM);
#if PAE
#ifdef MACH_PV_PAGETABLES
pmap_set_page_readwrite(p->pdpbase);
#endif /* MACH_PV_PAGETABLES */
- kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES);
+ m = vm_page_lookup_pa(_kvtophys(p->pdpbase));
+ vm_page_free_p(m, 0);
#endif /* PAE */
kmem_cache_free(&pmap_cache, (vm_offset_t) p);
}