diff options
author | Samuel Thibault <samuel.thibault@ens-lyon.org> | 2008-11-10 15:18:48 +0000 |
---|---|---|
committer | Thomas Schwinge <tschwinge@gnu.org> | 2009-06-18 00:27:20 +0200 |
commit | a652906986e061b69fee367ff20a87d2a6b16dd3 (patch) | |
tree | 48a8d48830041215afe8fe8e27eac5c29a926d6e /i386/intel | |
parent | 039405897aa8245990bc6249e58a6595debcc21e (diff) |
2008-11-10 Samuel Thibault <samuel.thibault@ens-lyon.org>
[task #8135 --- ``PAE for GNU Mach']
* i386/configfrag.ac: Add --enable-pae option, which defines PAE.
* i386/i386/i386asm.sym (PDPSHIFT, PDEMASK): New assembly macros.
* i386/i386/locore.S [PAE] (copyout_retry): Use page directory pointer
bits.
* i386/i386at/model_dep.c [PAE] (i386at_init): Set second initial 2MB
page. Enable PAE bit. Set cr3 to page directory pointer table instead
of page directory.
* i386/intel/pmap.c [PAE] (pmap_bootstrap, pmap_create): Allocate 4
pages for dirbase. Setup pdpbase.
[PAE] (pmap_destroy): Free 4 pages from dirbase. Free pdpbase.
* i386/intel/pmap.h [PAE] (pt_entry_t): Typedef to unsigned long long.
[PAE] (PDPSHIFT, PDPNUM, PDPMASK): New macros.
[PAE] (PDESHIFT, PDEMASK, PTEMASK): Set to PAE values.
[PAE] (lin2pdenum, NPDES): Make them take the page directory pointer
index into account too.
[PAE] (struct pmap): Add `pdpbase' member.
(set_dirbase): Remove macro, replaced by...
(set_pmap): New macro, taking a pmap instead of the dirbase.
(PMAP_ACTIVATE_USER): Use set_pmap instead of set_dirbase.
Diffstat (limited to 'i386/intel')
-rw-r--r-- | i386/intel/pmap.c | 36 | ||||
-rw-r--r-- | i386/intel/pmap.h | 39 |
2 files changed, 66 insertions, 9 deletions
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c index b08252e..a648592 100644 --- a/i386/intel/pmap.c +++ b/i386/intel/pmap.c @@ -579,7 +579,21 @@ void pmap_bootstrap() /* * Allocate and clear a kernel page directory. */ +#if PAE + { + vm_offset_t addr; + init_alloc_aligned(PDPNUM * INTEL_PGBYTES, &addr); + kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)addr; + } + kernel_pmap->pdpbase = (pt_entry_t*)pmap_grab_page(); + { + int i; + for (i = 0; i < PDPNUM; i++) + kernel_pmap->pdpbase[i] = pa_to_pte((vm_offset_t) kernel_pmap->dirbase + i * INTEL_PGBYTES) | INTEL_PTE_VALID; + } +#else /* PAE */ kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)pmap_grab_page(); +#endif /* PAE */ { int i; for (i = 0; i < NPDES; i++) @@ -859,11 +873,24 @@ pmap_t pmap_create(size) panic("pmap_create"); if (kmem_alloc_wired(kernel_map, - (vm_offset_t *)&p->dirbase, INTEL_PGBYTES) + (vm_offset_t *)&p->dirbase, PDPNUM * INTEL_PGBYTES) != KERN_SUCCESS) panic("pmap_create"); - memcpy(p->dirbase, kernel_page_dir, INTEL_PGBYTES); + memcpy(p->dirbase, kernel_page_dir, PDPNUM * INTEL_PGBYTES); + +#if PAE + if (kmem_alloc_wired(kernel_map, + (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES) + != KERN_SUCCESS) + panic("pmap_create"); + { + int i; + for (i = 0; i < PDPNUM; i++) + p->pdpbase[i] = pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID; + } +#endif /* PAE */ + p->ref_count = 1; simple_lock_init(&p->lock); @@ -927,7 +954,10 @@ void pmap_destroy(p) vm_object_unlock(pmap_object); } } - kmem_free(kernel_map, (vm_offset_t)p->dirbase, INTEL_PGBYTES); + kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM * INTEL_PGBYTES); +#if PAE + kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES); +#endif /* PAE */ zfree(pmap_zone, (vm_offset_t) p); } diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h index c1d741b..7354a0f 100644 --- a/i386/intel/pmap.h +++ b/i386/intel/pmap.h @@ -65,21 +65,41 @@ * i386/i486 Page Table Entry */ +#if PAE +typedef unsigned long long pt_entry_t; +#else /* PAE */ typedef unsigned int pt_entry_t; +#endif /* PAE */ #define PT_ENTRY_NULL ((pt_entry_t *) 0) #endif /* __ASSEMBLER__ */ #define INTEL_OFFMASK 0xfff /* offset within page */ +#if PAE +#define PDPSHIFT 30 /* page directory pointer */ +#define PDPNUM 4 /* number of page directory pointers */ +#define PDPMASK 3 /* mask for page directory pointer index */ +#define PDESHIFT 21 /* page descriptor shift */ +#define PDEMASK 0x1ff /* mask for page descriptor index */ +#define PTESHIFT 12 /* page table shift */ +#define PTEMASK 0x1ff /* mask for page table index */ +#else /* PAE */ +#define PDPNUM 1 /* number of page directory pointers */ #define PDESHIFT 22 /* page descriptor shift */ #define PDEMASK 0x3ff /* mask for page descriptor index */ #define PTESHIFT 12 /* page table shift */ #define PTEMASK 0x3ff /* mask for page table index */ +#endif /* PAE */ /* * Convert linear offset to page descriptor index */ +#if PAE +/* Making it include the page directory pointer table index too */ +#define lin2pdenum(a) (((a) >> PDESHIFT) & 0x7ff) +#else #define lin2pdenum(a) (((a) >> PDESHIFT) & PDEMASK) +#endif /* * Convert page descriptor index to linear address @@ -92,7 +112,7 @@ typedef unsigned int pt_entry_t; #define ptenum(a) (((a) >> PTESHIFT) & PTEMASK) #define NPTES (intel_ptob(1)/sizeof(pt_entry_t)) -#define NPDES (intel_ptob(1)/sizeof(pt_entry_t)) +#define NPDES (PDPNUM * (intel_ptob(1)/sizeof(pt_entry_t))) /* * Hardware pte bit definitions (to be used directly on the ptes @@ -124,7 +144,10 @@ typedef volatile long cpu_set; /* set of CPUs - must be <= 32 */ /* changed by other processors */ struct pmap { - pt_entry_t *dirbase; /* page directory pointer register */ + pt_entry_t *dirbase; /* page directory table */ +#if PAE + pt_entry_t *pdpbase; /* page directory pointer table */ +#endif /* PAE */ int ref_count; /* reference count */ decl_simple_lock_data(,lock) /* lock on map */ @@ -136,7 +159,11 @@ typedef struct pmap *pmap_t; #define PMAP_NULL ((pmap_t) 0) -#define set_dirbase(dirbase) set_cr3(dirbase) +#if PAE +#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->pdpbase)) +#else /* PAE */ +#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase)) +#endif /* PAE */ #if NCPUS > 1 /* @@ -234,7 +261,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr); /* \ * If this is the kernel pmap, switch to its page tables. \ */ \ - set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \ + set_pmap(tpmap); \ } \ else { \ /* \ @@ -252,7 +279,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr); * No need to invalidate the TLB - the entire user pmap \ * will be invalidated by reloading dirbase. \ */ \ - set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \ + set_pmap(tpmap); \ \ /* \ * Mark that this cpu is using the pmap. \ @@ -341,7 +368,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr); #define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \ register pmap_t tpmap = (pmap); \ \ - set_dirbase(kvtophys((vm_offset_t)tpmap->dirbase)); \ + set_pmap(tpmap); \ if (tpmap != kernel_pmap) { \ tpmap->cpus_using = TRUE; \ } \ |