summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2014-03-03 00:50:33 +0100
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2014-03-03 00:57:24 +0100
commitb9a59e483225093dec4e28ee5841b5b7f5e73562 (patch)
tree3b4b57251c769a90792e4cc10433863497635c89 /i386
parent241317985705f7a4cc81286692322c23fce03104 (diff)
Keep two virtual pages as mapping windows to access physical memory
PCI devices expose their memory etc. way beyond last_phys_addr. Userland drivers opening /dev/mem need to open those too, even if phystokv() will not work for them. * i386/intel/pmap.h (pmap_mapwindow_t): New type. (pmap_get_mapwindow, pmap_put_mapwindow): New prototypes. (PMAP_NMAPWINDOWS): New macro. * i386/intel/pmap.c (mapwindows): New array. (pmap_get_mapwindow, pmap_put_mapwindow): New functions. (pmap_bootstrap, pmap_virtual_space): Reserve virtual pages for the mapping windows. * i386/i386/phys.c: Include <i386/model_dep.h> (INTEL_PTE_W, INTEL_PTE_R): New macros (pmap_zero_page, pmap_copy_page, copy_to_phys, copy_from_phys): Use `pmap_get_mapwindow' to temporarily map physical pages beyond last_phys_addr.
Diffstat (limited to 'i386')
-rw-r--r--i386/i386/phys.c79
-rw-r--r--i386/intel/pmap.c43
-rw-r--r--i386/intel/pmap.h10
3 files changed, 127 insertions, 5 deletions
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
index ed4a309..d4bd6c3 100644
--- a/i386/i386/phys.c
+++ b/i386/i386/phys.c
@@ -37,8 +37,12 @@
#include <vm/vm_page.h>
#include <i386/pmap.h>
+#include <i386/model_dep.h>
#include <mach/machine/vm_param.h>
+#define INTEL_PTE_W(p) (INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF | INTEL_PTE_MOD | pa_to_pte(p))
+#define INTEL_PTE_R(p) (INTEL_PTE_VALID | INTEL_PTE_REF | pa_to_pte(p))
+
/*
* pmap_zero_page zeros the specified (machine independent) page.
*/
@@ -47,7 +51,21 @@ pmap_zero_page(p)
vm_offset_t p;
{
assert(p != vm_page_fictitious_addr);
- memset((void *)phystokv(p), 0, PAGE_SIZE);
+ vm_offset_t v;
+ pmap_mapwindow_t *map;
+
+ if (p >= phys_last_addr)
+ {
+ map = pmap_get_mapwindow(INTEL_PTE_W(p));
+ v = map->vaddr;
+ }
+ else
+ v = phystokv(p);
+
+ memset((void*) v, 0, PAGE_SIZE);
+
+ if (p >= phys_last_addr)
+ pmap_put_mapwindow(map);
}
/*
@@ -57,10 +75,33 @@ void
pmap_copy_page(src, dst)
vm_offset_t src, dst;
{
+ vm_offset_t src_addr_v, dst_addr_v;
+ pmap_mapwindow_t *src_map, *dst_map;
assert(src != vm_page_fictitious_addr);
assert(dst != vm_page_fictitious_addr);
- memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE);
+ if (src >= phys_last_addr)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src);
+
+ if (dst >= phys_last_addr)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst);
+
+ memcpy((void *) dst_addr_v, (void *) src_addr_v, PAGE_SIZE);
+
+ if (src >= phys_last_addr)
+ pmap_put_mapwindow(src_map);
+ if (dst >= phys_last_addr)
+ pmap_put_mapwindow(dst_map);
}
/*
@@ -73,8 +114,23 @@ copy_to_phys(src_addr_v, dst_addr_p, count)
vm_offset_t src_addr_v, dst_addr_p;
int count;
{
+ vm_offset_t dst_addr_v;
+ pmap_mapwindow_t *dst_map;
assert(dst_addr_p != vm_page_fictitious_addr);
- memcpy((void *)phystokv(dst_addr_p), (void *)src_addr_v, count);
+ assert(pa_to_pte(dst_addr_p + count-1) == pa_to_pte(dst_addr_p));
+
+ if (dst_addr_p >= phys_last_addr)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst_addr_p));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (dst_addr_p >= phys_last_addr)
+ pmap_put_mapwindow(dst_map);
}
/*
@@ -88,8 +144,23 @@ copy_from_phys(src_addr_p, dst_addr_v, count)
vm_offset_t src_addr_p, dst_addr_v;
int count;
{
+ vm_offset_t src_addr_v;
+ pmap_mapwindow_t *src_map;
assert(src_addr_p != vm_page_fictitious_addr);
- memcpy((void *)dst_addr_v, (void *)phystokv(src_addr_p), count);
+ assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));
+
+ if (src_addr_p >= phys_last_addr)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src_addr_p));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (src_addr_p >= phys_last_addr)
+ pmap_put_mapwindow(src_map);
}
/*
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 2943d26..a3d9630 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -419,6 +419,12 @@ unsigned int inuse_ptepages_count = 0; /* debugging */
*/
pt_entry_t *kernel_page_dir;
+/*
+ * Two slots for temporary physical page mapping, to allow for
+ * physical-to-physical transfers.
+ */
+static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS];
+
static inline pt_entry_t *
pmap_pde(const pmap_t pmap, vm_offset_t addr)
{
@@ -774,6 +780,12 @@ void pmap_bootstrap(void)
}
for (; pte < ptable+NPTES; pte++)
{
+ if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE);
+ {
+ pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE))];
+ win->entry = pte;
+ win->vaddr = va;
+ }
WRITE_PTE(pte, 0);
va += INTEL_PGBYTES;
}
@@ -884,12 +896,41 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
}
#endif /* MACH_PV_PAGETABLES */
+/*
+ * Create a temporary mapping for a given physical entry
+ *
+ * This can be used to access physical pages which are not mapped 1:1 by
+ * phystokv().
+ */
+pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
+{
+ pmap_mapwindow_t *map;
+
+ /* Find an empty one. */
+ for (map = &mapwindows[0]; map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]; map++)
+ if (!(*map->entry))
+ break;
+ assert(map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]);
+
+ WRITE_PTE(map->entry, entry);
+ return map;
+}
+
+/*
+ * Destroy a temporary mapping for a physical entry
+ */
+void pmap_put_mapwindow(pmap_mapwindow_t *map)
+{
+ WRITE_PTE(map->entry, 0);
+ PMAP_UPDATE_TLBS(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+}
+
void pmap_virtual_space(startp, endp)
vm_offset_t *startp;
vm_offset_t *endp;
{
*startp = kernel_virtual_start;
- *endp = kernel_virtual_end;
+ *endp = kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE;
}
/*
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 047a384..382cd5f 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -192,6 +192,16 @@ extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
#endif /* PAE */
+typedef struct {
+ pt_entry_t *entry;
+ vm_offset_t vaddr;
+} pmap_mapwindow_t;
+
+extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry);
+extern void pmap_put_mapwindow(pmap_mapwindow_t *map);
+
+#define PMAP_NMAPWINDOWS 2
+
#if NCPUS > 1
/*
* List of cpus that are actively using mapped memory. Any