summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
Diffstat (limited to 'i386')
-rw-r--r--i386/i386/pmap.h3
-rw-r--r--i386/intel/pmap.c143
-rw-r--r--i386/intel/pmap.h39
-rw-r--r--i386/intel/read_fault.c3
4 files changed, 5 insertions, 183 deletions
diff --git a/i386/i386/pmap.h b/i386/i386/pmap.h
index 28b8cea..a989923 100644
--- a/i386/i386/pmap.h
+++ b/i386/i386/pmap.h
@@ -23,8 +23,5 @@
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
-/*
- * Now using shared pmap module for i386 and i860.
- */
#include <intel/pmap.h>
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 3b0f952..688406c 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -28,7 +28,7 @@
* Author: Avadis Tevanian, Jr., Michael Wayne Young
* (These guys wrote the Vax version)
*
- * Physical Map management code for Intel i386, i486, and i860.
+ * Physical Map management code for Intel i386, and i486.
*
* Manages physical address maps.
*
@@ -75,9 +75,6 @@
#include <mach/machine/vm_param.h>
#include <machine/thread.h>
#include <i386/cpu_number.h>
-#if i860
-#include <i860ipsc/nodehw.h>
-#endif
#ifdef ORC
#define OLIVETTICACHE 1
@@ -372,17 +369,9 @@ lock_data_t pmap_system_lock;
#define MAX_TBIS_SIZE 32 /* > this -> TBIA */ /* XXX */
-#if i860
-/* Do a data cache flush until we find the caching bug XXX prp */
-#define INVALIDATE_TLB(s, e) { \
- flush(); \
- flush_tlb(); \
-}
-#else /* i860 */
#define INVALIDATE_TLB(s, e) { \
flush_tlb(); \
}
-#endif /* i860 */
#if NCPUS > 1
@@ -450,13 +439,6 @@ void pmap_remove_range(); /* forward */
void signal_cpus(); /* forward */
#endif /* NCPUS > 1 */
-#if i860
-/*
- * Paging flag
- */
-int paging_enabled = 0;
-#endif
-
static inline pt_entry_t *
pmap_pde(pmap_t pmap, vm_offset_t addr)
{
@@ -549,9 +531,6 @@ vm_offset_t pmap_map(virt, start, end, prot)
* Useful for mapping memory outside the range
* [phys_first_addr, phys_last_addr) (i.e., devices).
* Otherwise like pmap_map.
-#if i860
- * Sets no-cache bit.
-#endif
*/
vm_offset_t pmap_map_bd(virt, start, end, prot)
register vm_offset_t virt;
@@ -562,11 +541,7 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
register pt_entry_t template;
register pt_entry_t *pte;
- template = pa_to_pte(start)
-#if i860
- | INTEL_PTE_NCACHE
-#endif
- | INTEL_PTE_VALID;
+ template = pa_to_pte(start) | INTEL_PTE_VALID;
if (prot & VM_PROT_WRITE)
template |= INTEL_PTE_WRITE;
@@ -703,89 +678,6 @@ void pmap_bootstrap()
}
}
-#if i860
-#error probably doesnt work anymore
- XXX move to architecture-specific code just after the pmap_bootstrap call.
-
- /* kvtophys should now work in phys range */
-
- /*
- * Mark page table pages non-cacheable
- */
-
- pt_pte = (pt_entry_t *)pte_to_pa(*(kpde + pdenum(sva))) + ptenum(sva);
-
- for (va = load_start; va < tva; va += INTEL_PGBYTES*NPTES) {
- /* Mark page table non-cacheable */
- *pt_pte |= INTEL_PTE_NCACHE;
- pt_pte++;
- }
-
- /*
- * Map I/O space
- */
-
- ppde = kpde;
- ppde += pdenum(IO_BASE);
-
- if (pte_to_pa(*ppde) == 0) {
- /* This pte has not been allocated */
- ppte = (pt_entry_t *)kvtophys(virtual_avail);
- ptend = ppte + NPTES;
- virtual_avail = phystokv((vm_offset_t)ptend);
- *ppde = pa_to_pte((vm_offset_t)ppte)
- | INTEL_PTE_VALID
- | INTEL_PTE_WRITE;
- pte = ptend;
-
- /* Mark page table non-cacheable */
- *pt_pte |= INTEL_PTE_NCACHE;
- pt_pte++;
-
- bzero((char *)ppte, INTEL_PGBYTES);
- } else {
- ppte = (pt_entry_t *)(*ppde); /* first pte of page */
- }
- *ppde |= INTEL_PTE_USER;
-
-
- WRITE_PTE(ppte + ptenum(FIFO_ADDR),
- pa_to_pte(FIFO_ADDR_PH)
- | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
-
- WRITE_PTE(ppte + ptenum(FIFO_ADDR + XEOD_OFF),
- pa_to_pte(FIFO_ADDR_PH + XEOD_OFF_PH)
- | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
-
-/* XXX Allowed user access to control reg - cfj */
- WRITE_PTE(ppte + ptenum(CSR_ADDR),
- pa_to_pte(CSR_ADDR_PH)
- | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE | INTEL_PTE_USER);
-
-/* XXX Allowed user access to perf reg - cfj */
- WRITE_PTE(ppte + ptenum(PERFCNT_ADDR),
- pa_to_pte(PERFCNT_ADDR_PH)
- | INTEL_PTE_VALID | INTEL_PTE_USER | INTEL_PTE_NCACHE | INTEL_PTE_USER);
-
- WRITE_PTE(ppte + ptenum(UART_ADDR),
- pa_to_pte(UART_ADDR_PH)
- | INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_NCACHE);
-
- WRITE_PTE(ppte + ptenum(0xFFFFF000),
- pa_to_pte(avail_end)
- | INTEL_PTE_VALID | INTEL_PTE_WRITE);
- avail_start = kvtophys(virtual_avail);
-
-/*
- * Turn on mapping
- */
-
- flush_and_ctxsw(kernel_pmap->dirbase);
- paging_enabled = 1;
-
- printf("Paging enabled.\n");
-#endif
-
/* Architecture-specific code will turn on paging
soon after we return from here. */
}
@@ -939,21 +831,6 @@ pmap_page_table_page_alloc()
*/
bzero(phystokv(pa), PAGE_SIZE);
-#if i860
- /*
- * Mark the page table page(s) non-cacheable.
- */
- {
- int i = ptes_per_vm_page;
- pt_entry_t *pdp;
-
- pdp = pmap_pte(kernel_pmap, pa);
- do {
- *pdp |= INTEL_PTE_NCACHE;
- pdp++;
- } while (--i > 0);
- }
-#endif
return pa;
}
@@ -1638,12 +1515,6 @@ Retry:
pdp++;
ptp += INTEL_PGBYTES;
} while (--i > 0);
-#if i860
- /*
- * Flush the data cache.
- */
- flush();
-#endif /* i860 */
/*
* Now, get the address of the page-table entry.
@@ -2541,16 +2412,6 @@ void pmap_update_interrupt()
}
#endif /* NCPUS > 1 */
-#if i860 /* akp */
-void set_dirbase(dirbase)
- register vm_offset_t dirbase;
-{
- /*flush();*/
- /*flush_tlb();*/
- flush_and_ctxsw(dirbase);
-}
-#endif /* i860 */
-
#ifdef i386
/* Unmap page 0 to trap NULL references. */
void
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 1ea84bf..25ccc1a 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -58,20 +58,9 @@
#define round_intel_to_vm(x) round_i386_to_vm(x)
#define vm_to_intel(x) vm_to_i386(x)
#endif /* i386 */
-#if i860
-#define INTEL_PGBYTES I860_PGBYTES
-#define INTEL_PGSHIFT I860_PGSHIFT
-#define intel_btop(x) i860_btop(x)
-#define intel_ptob(x) i860_ptob(x)
-#define intel_round_page(x) i860_round_page(x)
-#define intel_trunc_page(x) i860_trunc_page(x)
-#define trunc_intel_to_vm(x) trunc_i860_to_vm(x)
-#define round_intel_to_vm(x) round_i860_to_vm(x)
-#define vm_to_intel(x) vm_to_i860(x)
-#endif /* i860 */
/*
- * i386/i486/i860 Page Table Entry
+ * i386/i486 Page Table Entry
*/
typedef unsigned int pt_entry_t;
@@ -108,11 +97,7 @@ typedef unsigned int pt_entry_t;
* without using the bit fields).
*/
-#if i860
-#define INTEL_PTE_valid 0x00000001
-#else
#define INTEL_PTE_VALID 0x00000001
-#endif
#define INTEL_PTE_WRITE 0x00000002
#define INTEL_PTE_USER 0x00000004
#define INTEL_PTE_WTHRU 0x00000008
@@ -122,22 +107,6 @@ typedef unsigned int pt_entry_t;
#define INTEL_PTE_WIRED 0x00000200
#define INTEL_PTE_PFN 0xfffff000
-#if i860
-#if NOCACHE
-#define INTEL_PTE_VALID (INTEL_PTE_valid \
- |INTEL_PTE_WTHRU \
- |INTEL_PTE_NCACHE \
- |INTEL_PTE_REF \
- |INTEL_PTE_MOD \
- )
-#else /* NOCACHE */
-#define INTEL_PTE_VALID (INTEL_PTE_valid \
- |INTEL_PTE_REF \
- |INTEL_PTE_MOD \
- )
-#endif /* NOCACHE */
-#endif /* i860 */
-
#define pa_to_pte(a) ((a) & INTEL_PTE_PFN)
#define pte_to_pa(p) ((p) & INTEL_PTE_PFN)
#define pte_increment_pa(p) ((p) += INTEL_OFFMASK+1)
@@ -164,11 +133,7 @@ typedef struct pmap *pmap_t;
#define PMAP_NULL ((pmap_t) 0)
-#if i860
-/*#define set_dirbase(dirbase) flush_and_ctxsw(dirbase)*//*akp*/
-#else
#define set_dirbase(dirbase) set_cr3(dirbase)
-#endif
#if NCPUS > 1
/*
@@ -202,7 +167,7 @@ extern pmap_t kernel_pmap;
#endif /* NCPUS > 1 */
/*
- * Machine dependent routines that are used only for i386/i486/i860.
+ * Machine dependent routines that are used only for i386/i486.
*/
pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c
index d0c03e8..eb640cc 100644
--- a/i386/intel/read_fault.c
+++ b/i386/intel/read_fault.c
@@ -36,8 +36,7 @@
/*
* Expansion of vm_fault for read fault in kernel mode.
* Must enter the mapping as writable, since the i386
- * (and i860 in i386 compatability mode) ignores write
- * protection in kernel mode.
+ * ignores write protection in kernel mode.
*/
kern_return_t
intel_read_fault(map, vaddr)