Bug Summary

File:obj-scan-build/../i386/intel/pmap.c
Location:line 569, column 6
Description:The left operand of '!=' is a garbage value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: pmap.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young
29 * (These guys wrote the Vax version)
30 *
31 * Physical Map management code for Intel i386, and i486.
32 *
33 * Manages physical address maps.
34 *
35 * In addition to hardware address maps, this
36 * module is called upon to provide software-use-only
37 * maps which may or may not be stored in the same
38 * form as hardware maps. These pseudo-maps are
39 * used to store intermediate results from copy
40 * operations to and from address spaces.
41 *
42 * Since the information managed by this module is
43 * also stored by the logical address mapping module,
44 * this module may throw away valid virtual-to-physical
45 * mappings at almost any time. However, invalidations
46 * of virtual-to-physical mappings must be done as
47 * requested.
48 *
49 * In order to cope with hardware architectures which
50 * make virtual-to-physical map invalidates expensive,
51 * this module may delay invalidate or reduced protection
52 * operations until such time as they are actually
53 * necessary. This module is given full information as
54 * to which processors are currently using which maps,
55 * and to when physical maps must be made correct.
56 */
57
58#include <string.h>
59
60#include <mach/machine/vm_types.h>
61
62#include <mach/boolean.h>
63#include <kern/debug.h>
64#include <kern/printf.h>
65#include <kern/thread.h>
66#include <kern/slab.h>
67
68#include <kern/lock.h>
69
70#include <vm/pmap.h>
71#include <vm/vm_map.h>
72#include <vm/vm_kern.h>
73#include <i3861/vm_param.h>
74#include <mach/vm_prot.h>
75#include <vm/vm_object.h>
76#include <vm/vm_page.h>
77#include <vm/vm_user.h>
78
79#include <mach/machine/vm_param.h>
80#include <mach/xen.h>
81#include <machine/thread.h>
82#include <i3861/cpu_number.h>
83#include <i3861/proc_reg.h>
84#include <i3861/locore.h>
85#include <i3861/model_dep.h>
86
87#ifdef MACH_PSEUDO_PHYS
88#define WRITE_PTE(pte_p, pte_entry)*(pte_p) = pte_entry?({ vm_offset_t __a = (vm_offset_t) (pte_entry
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
*(pte_p) = pte_entry?pa_to_ma(pte_entry)({ vm_offset_t __a = (vm_offset_t) (pte_entry); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })
:0;
89#else /* MACH_PSEUDO_PHYS */
90#define WRITE_PTE(pte_p, pte_entry)*(pte_p) = pte_entry?({ vm_offset_t __a = (vm_offset_t) (pte_entry
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
*(pte_p) = (pte_entry);
91#endif /* MACH_PSEUDO_PHYS */
92
93/*
94 * Private data structures.
95 */
96
97/*
98 * For each vm_page_t, there is a list of all currently
99 * valid virtual mappings of that page. An entry is
100 * a pv_entry_t; the list is the pv_table.
101 */
102
103typedef struct pv_entry {
104 struct pv_entry *next; /* next pv_entry */
105 pmap_t pmap; /* pmap where mapping lies */
106 vm_offset_t va; /* virtual address for mapping */
107} *pv_entry_t;
108
109#define PV_ENTRY_NULL((pv_entry_t) 0) ((pv_entry_t) 0)
110
111pv_entry_t pv_head_table; /* array of entries, one per page */
112
113/*
114 * pv_list entries are kept on a list that can only be accessed
115 * with the pmap system locked (at SPLVM, not in the cpus_active set).
116 * The list is refilled from the pv_list_cache if it becomes empty.
117 */
118pv_entry_t pv_free_list; /* free list at SPLVM */
119decl_simple_lock_data(, pv_free_list_lock)struct simple_lock_data_empty pv_free_list_lock;
120
121#define PV_ALLOC(pv_e){ ; if ((pv_e = pv_free_list) != 0) { pv_free_list = pv_e->
next; } ((void)(&pv_free_list_lock)); }
{ \
122 simple_lock(&pv_free_list_lock); \
123 if ((pv_e = pv_free_list) != 0) { \
124 pv_free_list = pv_e->next; \
125 } \
126 simple_unlock(&pv_free_list_lock)((void)(&pv_free_list_lock)); \
127}
128
129#define PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
{ \
130 simple_lock(&pv_free_list_lock); \
131 pv_e->next = pv_free_list; \
132 pv_free_list = pv_e; \
133 simple_unlock(&pv_free_list_lock)((void)(&pv_free_list_lock)); \
134}
135
136struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
137
138/*
139 * Each entry in the pv_head_table is locked by a bit in the
140 * pv_lock_table. The lock bits are accessed by the physical
141 * address of the page they lock.
142 */
143
144char *pv_lock_table; /* pointer to array of bits */
145#define pv_lock_table_size(n)(((n)+8 -1)/8) (((n)+BYTE_SIZE8-1)/BYTE_SIZE8)
146
147/* Has pmap_init completed? */
148boolean_t pmap_initialized = FALSE((boolean_t) 0);
149
150/*
151 * Range of kernel virtual addresses available for kernel memory mapping.
152 * Does not include the virtual addresses used to map physical memory 1-1.
153 * Initialized by pmap_bootstrap.
154 */
155vm_offset_t kernel_virtual_start;
156vm_offset_t kernel_virtual_end;
157
158/*
159 * Index into pv_head table, its lock bits, and the modify/reference
160 * bits starting at phys_first_addr.
161 */
162#define pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12)) (atop(pa - phys_first_addr)(((vm_size_t)(pa - phys_first_addr)) >> 12))
163
164#define pai_to_pvh(pai)(&pv_head_table[pai]) (&pv_head_table[pai])
165#define lock_pvh_pai(pai)(bit_lock(pai, pv_lock_table)) (bit_lock(pai, pv_lock_table))
166#define unlock_pvh_pai(pai)(bit_unlock(pai, pv_lock_table)) (bit_unlock(pai, pv_lock_table))
167
168/*
169 * Array of physical page attribites for managed pages.
170 * One byte per physical page.
171 */
172char *pmap_phys_attributes;
173
174/*
175 * Physical page attributes. Copy bits from PTE definition.
176 */
177#define PHYS_MODIFIED0x00000040 INTEL_PTE_MOD0x00000040 /* page modified */
178#define PHYS_REFERENCED0x00000020 INTEL_PTE_REF0x00000020 /* page referenced */
179
180/*
181 * Amount of virtual memory mapped by one
182 * page-directory entry.
183 */
184#define PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21)) (pdenum2lin(1)((vm_offset_t)(1) << 21))
185
186/*
187 * We allocate page table pages directly from the VM system
188 * through this object. It maps physical memory.
189 */
190vm_object_t pmap_object = VM_OBJECT_NULL((vm_object_t) 0);
191
192/*
193 * Locking and TLB invalidation
194 */
195
196/*
197 * Locking Protocols:
198 *
199 * There are two structures in the pmap module that need locking:
200 * the pmaps themselves, and the per-page pv_lists (which are locked
201 * by locking the pv_lock_table entry that corresponds to the pv_head
202 * for the list in question.) Most routines want to lock a pmap and
203 * then do operations in it that require pv_list locking -- however
204 * pmap_remove_all and pmap_copy_on_write operate on a physical page
205 * basis and want to do the locking in the reverse order, i.e. lock
206 * a pv_list and then go through all the pmaps referenced by that list.
207 * To protect against deadlock between these two cases, the pmap_lock
208 * is used. There are three different locking protocols as a result:
209 *
210 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
211 * the pmap.
212 *
213 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
214 * lock on the pmap_lock (shared read), then lock the pmap
215 * and finally the pv_lists as needed [i.e. pmap lock before
216 * pv_list lock.]
217 *
218 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
219 * Get a write lock on the pmap_lock (exclusive write); this
220 * also guaranteees exclusive access to the pv_lists. Lock the
221 * pmaps as needed.
222 *
223 * At no time may any routine hold more than one pmap lock or more than
224 * one pv_list lock. Because interrupt level routines can allocate
225 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
226 * kernel_pmap can only be held at splvm.
227 */
228
229#if NCPUS1 > 1
230/*
231 * We raise the interrupt level to splvm, to block interprocessor
232 * interrupts during pmap operations. We must take the CPU out of
233 * the cpus_active set while interrupts are blocked.
234 */
235#define SPLVM(spl)((void)(spl)) { \
236 spl = splvm(); \
237 i_bit_clear(cpu_number()(0), &cpus_active); \
238}
239
240#define SPLX(spl)((void)(spl)) { \
241 i_bit_set(cpu_number()(0), &cpus_active); \
242 splx(spl); \
243}
244
245/*
246 * Lock on pmap system
247 */
248lock_data_t pmap_system_lock;
249
250#define PMAP_READ_LOCK(pmap, spl)((void)(spl)) { \
251 SPLVM(spl)((void)(spl)); \
252 lock_read(&pmap_system_lock); \
253 simple_lock(&(pmap)->lock); \
254}
255
256#define PMAP_WRITE_LOCK(spl)((void)(spl)) { \
257 SPLVM(spl)((void)(spl)); \
258 lock_write(&pmap_system_lock); \
259}
260
261#define PMAP_READ_UNLOCK(pmap, spl)((void)(spl)) { \
262 simple_unlock(&(pmap)->lock)((void)(&(pmap)->lock)); \
263 lock_read_done(&pmap_system_lock)lock_done(&pmap_system_lock); \
264 SPLX(spl)((void)(spl)); \
265}
266
267#define PMAP_WRITE_UNLOCK(spl)((void)(spl)) { \
268 lock_write_done(&pmap_system_lock)lock_done(&pmap_system_lock); \
269 SPLX(spl)((void)(spl)); \
270}
271
272#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
273 simple_lock(&(pmap)->lock); \
274 lock_write_to_read(&pmap_system_lock); \
275}
276
277#define LOCK_PVH(index) (lock_pvh_pai(index)(bit_lock(index, pv_lock_table)))
278
279#define UNLOCK_PVH(index) (unlock_pvh_pai(index)(bit_unlock(index, pv_lock_table)))
280
281#define PMAP_UPDATE_TLBS(pmap, s, e){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } } \
282{ \
283 cpu_set cpu_mask = 1 << cpu_number()(0); \
284 cpu_set users; \
285 \
286 /* Since the pmap is locked, other updates are locked */ \
287 /* out, and any pmap_activate has finished. */ \
288 \
289 /* find other cpus using the pmap */ \
290 users = (pmap)->cpus_using & ~cpu_mask; \
291 if (users) { \
292 /* signal them, and wait for them to finish */ \
293 /* using the pmap */ \
294 signal_cpus(users, (pmap), (s), (e)); \
295 while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
296 continue; \
297 } \
298 \
299 /* invalidate our own TLB if pmap is in use */ \
300 if ((pmap)->cpus_using & cpu_mask) { \
301 INVALIDATE_TLB((pmap), (s), (e))hyp_mmuext_op_void(6); \
302 } \
303}
304
305#else /* NCPUS > 1 */
306
307#define SPLVM(spl)((void)(spl)) ((void)(spl))
308#define SPLX(spl)((void)(spl)) ((void)(spl))
309
310#define PMAP_READ_LOCK(pmap, spl)((void)(spl)) SPLVM(spl)((void)(spl))
311#define PMAP_WRITE_LOCK(spl)((void)(spl)) SPLVM(spl)((void)(spl))
312#define PMAP_READ_UNLOCK(pmap, spl)((void)(spl)) SPLX(spl)((void)(spl))
313#define PMAP_WRITE_UNLOCK(spl)((void)(spl)) SPLX(spl)((void)(spl))
314#define PMAP_WRITE_TO_READ_LOCK(pmap)
315
316#define LOCK_PVH(index)
317#define UNLOCK_PVH(index)
318
319#define PMAP_UPDATE_TLBS(pmap, s, e){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } } { \
320 /* invalidate our own TLB if pmap is in use */ \
321 if ((pmap)->cpus_using) { \
322 INVALIDATE_TLB((pmap), (s), (e))hyp_mmuext_op_void(6); \
323 } \
324}
325
326#endif /* NCPUS > 1 */
327
328#define MAX_TBIS_SIZE32 32 /* > this -> TBIA */ /* XXX */
329
330#ifdef MACH_PV_PAGETABLES
331#if 1
332#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL6)
333#else
334#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) do { \
335 if (__builtin_constant_p((e) - (s)) \
336 && (e) - (s) == PAGE_SIZE(1 << 12)) \
337 hyp_invlpg((pmap) == kernel_pmap ? kvtolin(s)((vm_offset_t)(s) - 0xC0000000UL + ((0xc0000000UL))) : (s)); \
338 else \
339 hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL6); \
340} while(0)
341#endif
342#else /* MACH_PV_PAGETABLES */
343#if 0
344/* It is hard to know when a TLB flush becomes less expensive than a bunch of
345 * invlpgs. But it surely is more expensive than just one invlpg. */
346#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) { \
347 if (__builtin_constant_p((e) - (s)) \
348 && (e) - (s) == PAGE_SIZE(1 << 12)) \
349 invlpg_linear(s); \
350 else \
351 flush_tlb()({ cr3 = ((cr3)); if (!hyp_mmuext_op_mfn(5, ((mfn_list[(((vm_size_t
)((cr3))) >> 12)])))) panic("set_cr3"); })
; \
352}
353#else
354#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) { \
355 (void) (pmap); \
356 (void) (s); \
357 (void) (e); \
358 flush_tlb()({ cr3 = ((cr3)); if (!hyp_mmuext_op_mfn(5, ((mfn_list[(((vm_size_t
)((cr3))) >> 12)])))) panic("set_cr3"); })
; \
359}
360#endif
361#endif /* MACH_PV_PAGETABLES */
362
363
364#if NCPUS1 > 1
365/*
366 * Structures to keep track of pending TLB invalidations
367 */
368
369#define UPDATE_LIST_SIZE 4
370
371struct pmap_update_item {
372 pmap_t pmap; /* pmap to invalidate */
373 vm_offset_t start; /* start address to invalidate */
374 vm_offset_t end; /* end address to invalidate */
375} ;
376
377typedef struct pmap_update_item *pmap_update_item_t;
378
379/*
380 * List of pmap updates. If the list overflows,
381 * the last entry is changed to invalidate all.
382 */
383struct pmap_update_list {
384 decl_simple_lock_data(, lock)struct simple_lock_data_empty lock;
385 int count;
386 struct pmap_update_item item[UPDATE_LIST_SIZE];
387} ;
388typedef struct pmap_update_list *pmap_update_list_t;
389
390struct pmap_update_list cpu_update_list[NCPUS1];
391
392#endif /* NCPUS > 1 */
393
394/*
395 * Other useful macros.
396 */
397#define current_pmap()((((active_threads[(0)])->task->map)->pmap)) (vm_map_pmap(current_thread()->task->map)(((active_threads[(0)])->task->map)->pmap))
398#define pmap_in_use(pmap, cpu)(((pmap)->cpus_using & (1 << (cpu))) != 0) (((pmap)->cpus_using & (1 << (cpu))) != 0)
399
400struct pmap kernel_pmap_store;
401pmap_t kernel_pmap;
402
403struct kmem_cache pmap_cache; /* cache of pmap structures */
404
405boolean_t pmap_debug = FALSE((boolean_t) 0); /* flag for debugging prints */
406
407#if 0
408int ptes_per_vm_page1; /* number of hardware ptes needed
409 to map one VM page. */
410#else
411#define ptes_per_vm_page1 1
412#endif
413
414unsigned int inuse_ptepages_count = 0; /* debugging */
415
416/*
417 * Pointer to the basic page directory for the kernel.
418 * Initialized by pmap_bootstrap().
419 */
420pt_entry_t *kernel_page_dir;
421
422/*
423 * Two slots for temporary physical page mapping, to allow for
424 * physical-to-physical transfers.
425 */
426static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS2];
427
428static inline pt_entry_t *
429pmap_pde(const pmap_t pmap, vm_offset_t addr)
430{
431 if (pmap == kernel_pmap)
432 addr = kvtolin(addr)((vm_offset_t)(addr) - 0xC0000000UL + ((0xc0000000UL)));
433 return &pmap->dirbase[lin2pdenum(addr)(((addr) >> 21) & 0x7ff)];
434}
435
436/*
437 * Given an offset and a map, compute the address of the
438 * pte. If the address is invalid with respect to the map
439 * then PT_ENTRY_NULL is returned (and the map may need to grow).
440 *
441 * This is only used internally.
442 */
443pt_entry_t *
444pmap_pte(const pmap_t pmap, vm_offset_t addr)
445{
446 pt_entry_t *ptp;
447 pt_entry_t pte;
448
449 if (pmap->dirbase == 0)
450 return(PT_ENTRY_NULL((pt_entry_t *) 0));
451 pte = *pmap_pde(pmap, addr);
452 if ((pte & INTEL_PTE_VALID0x00000001) == 0)
453 return(PT_ENTRY_NULL((pt_entry_t *) 0));
454 ptp = (pt_entry_t *)ptetokv(pte)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pte) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
455 return(&ptp[ptenum(addr)(((addr) >> 12) & 0x1ff)]);
456}
457
458#define DEBUG_PTE_PAGE0 0
459
460#if DEBUG_PTE_PAGE0
461void ptep_check(ptep)
462 ptep_t ptep;
463{
464 pt_entry_t *pte, *epte;
465 int ctu, ctw;
466
467 /* check the use and wired counts */
468 if (ptep == PTE_PAGE_NULL)
469 return;
470 pte = pmap_pte(ptep->pmap, ptep->va);
471 epte = pte + INTEL_PGBYTES4096/sizeof(pt_entry_t);
472 ctu = 0;
473 ctw = 0;
474 while (pte < epte) {
475 if (pte->pfn != 0) {
476 ctu++;
477 if (pte->wired)
478 ctw++;
479 }
480 pte += ptes_per_vm_page1;
481 }
482
483 if (ctu != ptep->use_count || ctw != ptep->wired_count) {
484 printf("use %d wired %d - actual use %d wired %d\n",
485 ptep->use_count, ptep->wired_count, ctu, ctw);
486 panic("pte count");
487 }
488}
489#endif /* DEBUG_PTE_PAGE */
490
491/*
492 * Map memory at initialization. The physical addresses being
493 * mapped are not managed and are never unmapped.
494 *
495 * For now, VM is already on, we only need to map the
496 * specified memory.
497 */
498vm_offset_t pmap_map(virt, start, end, prot)
499 vm_offset_t virt;
500 vm_offset_t start;
501 vm_offset_t end;
502 int prot;
503{
504 int ps;
505
506 ps = PAGE_SIZE(1 << 12);
507 while (start < end) {
508 pmap_enter(kernel_pmap, virt, start, prot, FALSE((boolean_t) 0));
509 virt += ps;
510 start += ps;
511 }
512 return(virt);
513}
514
515/*
516 * Back-door routine for mapping kernel VM at initialization.
517 * Useful for mapping memory outside the range
518 * [phys_first_addr, phys_last_addr) (i.e., devices).
519 * Otherwise like pmap_map.
520 */
521vm_offset_t pmap_map_bd(virt, start, end, prot)
522 vm_offset_t virt;
523 vm_offset_t start;
524 vm_offset_t end;
525 vm_prot_t prot;
526{
527 pt_entry_t template;
528 pt_entry_t *pte;
529 int spl;
530#ifdef MACH_PV_PAGETABLES
531 int n, i = 0;
1
Variable 'n' declared without an initial value
532 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
533#endif /* MACH_PV_PAGETABLES */
534
535 template = pa_to_pte(start)((start) & 0x00007ffffffff000ULL)
536 | INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008
537 | INTEL_PTE_VALID0x00000001;
538 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
2
Taking false branch
539 template |= INTEL_PTE_GLOBAL0x00000000;
540 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
3
Taking false branch
541 template |= INTEL_PTE_WRITE0x00000002;
542
543 PMAP_READ_LOCK(pmap, spl)((void)(spl));
544 while (start < end) {
4
Loop condition is false. Execution continues on line 566
545 pte = pmap_pte(kernel_pmap, virt);
546 if (pte == PT_ENTRY_NULL((pt_entry_t *) 0))
547 panic("pmap_map_bd: Invalid kernel address\n");
548#ifdef MACH_PV_PAGETABLES
549 update[i].ptr = kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
550 update[i].val = pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
;
551 i++;
552 if (i == HYP_BATCH_MMU_UPDATES256) {
553 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
554 if (n != i)
555 panic("couldn't pmap_map_bd\n");
556 i = 0;
557 }
558#else /* MACH_PV_PAGETABLES */
559 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
560#endif /* MACH_PV_PAGETABLES */
561 pte_increment_pa(template)((template) += 0xfff +1);
562 virt += PAGE_SIZE(1 << 12);
563 start += PAGE_SIZE(1 << 12);
564 }
565#ifdef MACH_PV_PAGETABLES
566 if (i > HYP_BATCH_MMU_UPDATES256)
5
Taking false branch
567 panic("overflowed array in pmap_map_bd");
568 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
569 if (n != i)
6
The left operand of '!=' is a garbage value
570 panic("couldn't pmap_map_bd\n");
571#endif /* MACH_PV_PAGETABLES */
572 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
573 return(virt);
574}
575
576/*
577 * Bootstrap the system enough to run with virtual memory.
578 * Allocate the kernel page directory and page tables,
579 * and direct-map all physical memory.
580 * Called with mapping off.
581 */
582void pmap_bootstrap(void)
583{
584 /*
585 * Mapping is turned off; we must reference only physical addresses.
586 * The load image of the system is to be mapped 1-1 physical = virtual.
587 */
588
589 /*
590 * Set ptes_per_vm_page for general use.
591 */
592#if 0
593 ptes_per_vm_page1 = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
594#endif
595
596 /*
597 * The kernel's pmap is statically allocated so we don't
598 * have to use pmap_create, which is unlikely to work
599 * correctly at this part of the boot sequence.
600 */
601
602 kernel_pmap = &kernel_pmap_store;
603
604#if NCPUS1 > 1
605 lock_init(&pmap_system_lock, FALSE((boolean_t) 0)); /* NOT a sleep lock */
606#endif /* NCPUS > 1 */
607
608 simple_lock_init(&kernel_pmap->lock);
609
610 kernel_pmap->ref_count = 1;
611
612 /*
613 * Determine the kernel virtual address range.
614 * It starts at the end of the physical memory
615 * mapped into the kernel address space,
616 * and extends to a stupid arbitrary limit beyond that.
617 */
618 kernel_virtual_start = phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL);
619 kernel_virtual_end = phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL) + VM_KERNEL_MAP_SIZE(224 * 1024 * 1024);
620
621 if (kernel_virtual_end < kernel_virtual_start
622 || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL))
623 kernel_virtual_end = VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL);
624
625 /*
626 * Allocate and clear a kernel page directory.
627 */
628 /* Note: initial Xen mapping holds at least 512kB free mapped page.
629 * We use that for directly building our linear mapping. */
630#if PAE1
631 {
632 vm_offset_t addr;
633 init_alloc_aligned(PDPNUM4 * INTEL_PGBYTES4096, &addr);
634 kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(addr)((vm_offset_t)(addr) + 0xC0000000UL);
635 }
636 kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
637 {
638 int i;
639 for (i = 0; i < PDPNUM4; i++)
640 WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte(_kvtophys((void *) kernel_pmap->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID)*(&kernel_pmap->pdpbase[i]) = ((((vm_offset_t)((void *
) kernel_pmap->dirbase + i * 4096) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001?({ vm_offset_t __a = (vm_offset_t) (((((vm_offset_t
)((void *) kernel_pmap->dirbase + i * 4096) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); }):0;
;
641 }
642#else /* PAE */
643 kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
644#endif /* PAE */
645 {
646 int i;
647 for (i = 0; i < NPDES(4 * ((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))); i++)
648 kernel_pmap->dirbase[i] = 0;
649 }
650
651#ifdef MACH_PV_PAGETABLES
652 /* We don't actually deal with the CR3 register content at all */
653 hyp_vm_assist(VMASST_CMD_enable0, VMASST_TYPE_pae_extended_cr33);
654 /*
655 * Xen may only provide as few as 512KB extra bootstrap linear memory,
656 * which is far from enough to map all available memory, so we need to
657 * map more bootstrap linear memory. We here map 1 (resp. 4 for PAE)
658 * other L1 table(s), thus 4MiB extra memory (resp. 8MiB), which is
659 * enough for a pagetable mapping 4GiB.
660 */
661#ifdef PAE1
662#define NSUP_L14 4
663#else
664#define NSUP_L14 1
665#endif
666 pt_entry_t *l1_map[NSUP_L14];
667 {
668 pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
669 vm_offset_t la;
670 int n_l1map;
671 for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS0xC0000000UL; la >= VM_MIN_KERNEL_ADDRESS0xC0000000UL; la += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * PAGE_SIZE(1 << 12)) {
672#ifdef PAE1
673 pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)])(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((base[(((la
) >> 30) & 3)]) & 0x00007ffffffff000ULL); ((((unsigned
long *) 0xF5800000UL)[__a >> 12]) << 12) | (__a &
((1 << 12)-1)); })) + 0xC0000000UL))
;
674#else /* PAE */
675 pt_entry_t *l2_map = base;
676#endif /* PAE */
677 /* Like lin2pdenum, but works with non-contiguous boot L3 */
678 l2_map += (la >> PDESHIFT21) & PDEMASK0x1ff;
679 if (!(*l2_map & INTEL_PTE_VALID0x00000001)) {
680 struct mmu_update update;
681 int j, n;
682
683 l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
684 for (j = 0; j < NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); j++)
685 l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)(mfn_list[(((la - 0xC0000000UL) >> 21) & 0x7ff) * (
(((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) + j])
) << PAGE_SHIFT12) | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
686 pmap_set_page_readonly_init(l1_map[n_l1map]);
687 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn (l1_map[n_l1map])((mfn_list[(((vm_size_t)(((vm_offset_t)(l1_map[n_l1map]) - 0xC0000000UL
))) >> 12)]))
))
688 panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map])({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l1_map[n_l1map
]) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(
__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); })
);
689 update.ptr = kv_to_ma(l2_map)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l2_map) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
690 update.val = kv_to_ma(l1_map[n_l1map])({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l1_map[n_l1map
]) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(
__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); })
| INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
691 hyp_mmu_update(kv_to_la(&update)((vm_offset_t)(((vm_offset_t)(((vm_offset_t)(&update) - 0xC0000000UL
))) + ((0xc0000000UL))))
, 1, kv_to_la(&n)((vm_offset_t)(((vm_offset_t)(((vm_offset_t)(&n) - 0xC0000000UL
))) + ((0xc0000000UL))))
, DOMID_SELF(0x7FF0U));
692 if (n != 1)
693 panic("couldn't complete bootstrap map");
694 /* added the last L1 table, can stop */
695 if (++n_l1map >= NSUP_L14)
696 break;
697 }
698 }
699 }
700#endif /* MACH_PV_PAGETABLES */
701
702 /*
703 * Allocate and set up the kernel page tables.
704 */
705 {
706 vm_offset_t va;
707 pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))) ? INTEL_PTE_GLOBAL0x00000000 : 0;
708
709 /*
710 * Map virtual memory for all known physical memory, 1-1,
711 * from phys_first_addr to phys_last_addr.
712 * Make any mappings completely in the kernel's text segment read-only.
713 *
714 * Also allocate some additional all-null page tables afterwards
715 * for kernel virtual memory allocation,
716 * because this PMAP module is too stupid
717 * to allocate new kernel page tables later.
718 * XX fix this
719 */
720 for (va = phystokv(phys_first_addr)((vm_offset_t)(phys_first_addr) + 0xC0000000UL); va >= phystokv(phys_first_addr)((vm_offset_t)(phys_first_addr) + 0xC0000000UL) && va < kernel_virtual_end; )
721 {
722 pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va))(((((vm_offset_t)(va) - 0xC0000000UL + ((0xc0000000UL)))) >>
21) & 0x7ff)
;
723 pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
724 pt_entry_t *pte;
725
726 /* Initialize the page directory entry. */
727 WRITE_PTE(pde, pa_to_pte((vm_offset_t)_kvtophys(ptable))*(pde) = (((vm_offset_t)((vm_offset_t)(ptable) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002?({ vm_offset_t
__a = (vm_offset_t) ((((vm_offset_t)((vm_offset_t)(ptable) -
0xC0000000UL)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
728 | INTEL_PTE_VALID | INTEL_PTE_WRITE)*(pde) = (((vm_offset_t)((vm_offset_t)(ptable) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002?({ vm_offset_t
__a = (vm_offset_t) ((((vm_offset_t)((vm_offset_t)(ptable) -
0xC0000000UL)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
;
729
730 /* Initialize the page table. */
731 for (pte = ptable; (va < phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL)) && (pte < ptable+NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))); pte++)
732 {
733 if ((pte - ptable) < ptenum(va)(((va) >> 12) & 0x1ff))
734 {
735 WRITE_PTE(pte, 0)*(pte) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
736 }
737 else
738#ifdef MACH_PV_PAGETABLES
739 if (va == (vm_offset_t) &hyp_shared_info)
740 {
741 *pte = boot_info.shared_info | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
742 va += INTEL_PGBYTES4096;
743 }
744 else
745#endif /* MACH_PV_PAGETABLES */
746 {
747 extern char _start[], etext[];
748
749 if (((va >= (vm_offset_t) _start)
750 && (va + INTEL_PGBYTES4096 <= (vm_offset_t)etext))
751#ifdef MACH_PV_PAGETABLES
752 || (va >= (vm_offset_t) boot_info.pt_base
753 && (va + INTEL_PGBYTES4096 <=
754 (vm_offset_t) ptable + INTEL_PGBYTES4096))
755#endif /* MACH_PV_PAGETABLES */
756 )
757 {
758 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
759 | INTEL_PTE_VALID | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
;
760 }
761 else
762 {
763#ifdef MACH_PV_PAGETABLES
764 /* Keep supplementary L1 pages read-only */
765 int i;
766 for (i = 0; i < NSUP_L14; i++)
767 if (va == (vm_offset_t) l1_map[i]) {
768 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
769 | INTEL_PTE_VALID | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
;
770 break;
771 }
772 if (i == NSUP_L14)
773#endif /* MACH_PV_PAGETABLES */
774 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global?({ vm_offset_t __a = (vm_offset_t
) (((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
775 | INTEL_PTE_VALID | INTEL_PTE_WRITE | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global?({ vm_offset_t __a = (vm_offset_t
) (((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
776
777 }
778 va += INTEL_PGBYTES4096;
779 }
780 }
781 for (; pte < ptable+NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); pte++)
782 {
783 if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS2 * PAGE_SIZE(1 << 12) && va < kernel_virtual_end)
784 {
785 pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE))(((vm_size_t)(va - (kernel_virtual_end - 2 * (1 << 12))
)) >> 12)
];
786 win->entry = pte;
787 win->vaddr = va;
788 }
789 WRITE_PTE(pte, 0)*(pte) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
790 va += INTEL_PGBYTES4096;
791 }
792#ifdef MACH_PV_PAGETABLES
793 pmap_set_page_readonly_init(ptable);
794 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn (ptable)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptable) - 0xC0000000UL
))) >> 12)]))
))
795 panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptable) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
796#endif /* MACH_PV_PAGETABLES */
797 }
798 }
799
800 /* Architecture-specific code will turn on paging
801 soon after we return from here. */
802}
803
804#ifdef MACH_PV_PAGETABLES
805/* These are only required because of Xen security policies */
806
807/* Set back a page read write */
808void pmap_set_page_readwrite(void *_vaddr) {
809 vm_offset_t vaddr = (vm_offset_t) _vaddr;
810 vm_offset_t paddr = kvtophys(vaddr);
811 vm_offset_t canon_vaddr = phystokv(paddr)((vm_offset_t)(paddr) + 0xC0000000UL);
812 if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001 | 0x00000002); hyp_update_va_mapping
(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))), __val
& 0xffffffffU, ((__val) >> 32), (0UL<<0)); }
)
)
813 panic("couldn't set hiMMU readwrite for addr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
814 if (canon_vaddr != vaddr)
815 if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001 | 0x00000002); hyp_update_va_mapping
(((vm_offset_t)(canon_vaddr) - 0xC0000000UL + ((0xc0000000UL)
)), __val & 0xffffffffU, ((__val) >> 32), (0UL<<
0)); })
)
816 panic("couldn't set hiMMU readwrite for paddr %p(%p)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
817}
818
819/* Set a page read only (so as to pin it for instance) */
820void pmap_set_page_readonly(void *_vaddr) {
821 vm_offset_t vaddr = (vm_offset_t) _vaddr;
822 vm_offset_t paddr = kvtophys(vaddr);
823 vm_offset_t canon_vaddr = phystokv(paddr)((vm_offset_t)(paddr) + 0xC0000000UL);
824 if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID0x00000001) {
825 if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001); hyp_update_va_mapping(
((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))), __val
& 0xffffffffU, ((__val) >> 32), (0UL<<0)); }
)
)
826 panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
827 }
828 if (canon_vaddr != vaddr &&
829 *pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID0x00000001) {
830 if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001); hyp_update_va_mapping(
((vm_offset_t)(canon_vaddr) - 0xC0000000UL + ((0xc0000000UL))
), __val & 0xffffffffU, ((__val) >> 32), (0UL<<
0)); })
)
831 panic("couldn't set hiMMU readonly for vaddr %p canon_vaddr %p paddr %p (%p)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
832 }
833}
834
835/* This needs to be called instead of pmap_set_page_readonly as long as RC3
836 * still points to the bootstrap dirbase, to also fix the bootstrap table. */
837void pmap_set_page_readonly_init(void *_vaddr) {
838 vm_offset_t vaddr = (vm_offset_t) _vaddr;
839#if PAE1
840 pt_entry_t *pdpbase = (void*) boot_info.pt_base;
841 /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
842 pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)])(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pdpbase[((
(vaddr) >> 30) & 3)]) & 0x00007ffffffff000ULL);
((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })) + 0xC0000000UL))
;
843#else
844 pt_entry_t *dirbase = (void*) boot_info.pt_base;
845#endif
846 pt_entry_t *pte = &dirbase[lin2pdenum(vaddr)(((vaddr) >> 21) & 0x7ff) & PTEMASK0x1ff];
847 /* Modify our future kernel map (can't use update_va_mapping for this)... */
848 if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID0x00000001) {
849 if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID)hyp_mmu_update_pte( (kernel_pmap->dirbase[((((vm_offset_t)
(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))))) >>
21) & 0x7ff)] & 0x00007ffffffff000ULL) + ((((vm_offset_t
)(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))))) >>
12) & 0x1ff) * sizeof(pt_entry_t), ((({ vm_offset_t __a =
(vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })) & 0x00007ffffffff000ULL
) | 0x00000001)
)
850 panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
851 }
852 /* ... and the bootstrap map. */
853 if (*pte & INTEL_PTE_VALID0x00000001) {
854 if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
((vm_offset_t)(vaddr) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); })) & 0x00007ffffffff000ULL) | 0x00000001
); hyp_update_va_mapping(vaddr, __val & 0xffffffffU, ((__val
) >> 32), (0UL<<0)); })
)
855 panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
856 }
857}
858
859void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
860 int i;
861 pt_entry_t *dir;
862 vm_offset_t va = 0;
863#if PAE1
864 int j;
865#endif /* PAE */
866 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, kv_to_mfn(base)((mfn_list[(((vm_size_t)(((vm_offset_t)(base) - 0xC0000000UL)
)) >> 12)]))
))
867 panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%p)\n", base, (vm_offset_t) kv_to_ma(base)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(base) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
868#if PAE1
869 for (j = 0; j < PDPNUM4; j++)
870 {
871 pt_entry_t pdpe = base[j];
872 if (pdpe & INTEL_PTE_VALID0x00000001) {
873 dir = (pt_entry_t *) ptetokv(pdpe)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pdpe) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
874#else /* PAE */
875 dir = base;
876#endif /* PAE */
877 for (i = 0; i < NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); i++) {
878 pt_entry_t pde = dir[i];
879 unsigned long pfn = atop(pte_to_pa(pde))(((vm_size_t)(({ pt_entry_t __a = (pt_entry_t) ((pde) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); }))) >> 12)
;
880 void *pgt = (void*) phystokv(ptoa(pfn))((vm_offset_t)(((vm_offset_t)((pfn) << 12))) + 0xC0000000UL
)
;
881 if (pde & INTEL_PTE_VALID0x00000001)
882 hyp_free_page(pfn, pgt);
883 va += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * INTEL_PGBYTES4096;
884 if (va >= HYP_VIRT_START0xF5800000UL)
885 break;
886 }
887#if PAE1
888 hyp_free_page(atop(_kvtophys(dir))(((vm_size_t)(((vm_offset_t)(dir) - 0xC0000000UL))) >> 12
)
, dir);
889 } else
890 va += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * INTEL_PGBYTES4096;
891 if (va >= HYP_VIRT_START0xF5800000UL)
892 break;
893 }
894#endif /* PAE */
895 hyp_free_page(atop(_kvtophys(base))(((vm_size_t)(((vm_offset_t)(base) - 0xC0000000UL))) >>
12)
, base);
896}
897#endif /* MACH_PV_PAGETABLES */
898
899/*
900 * Create a temporary mapping for a given physical entry
901 *
902 * This can be used to access physical pages which are not mapped 1:1 by
903 * phystokv().
904 */
905pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
906{
907 pmap_mapwindow_t *map;
908
909 /* Find an empty one. */
910 for (map = &mapwindows[0]; map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]; map++)
911 if (!(*map->entry))
912 break;
913 assert(map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)])({ if (!(map < &mapwindows[sizeof (mapwindows) / sizeof
(*mapwindows)])) Assert("map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]"
, "../i386/intel/pmap.c", 913); })
;
914
915 WRITE_PTE(map->entry, entry)*(map->entry) = entry?({ vm_offset_t __a = (vm_offset_t) (
entry); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); }):0
;
;
916 return map;
917}
918
919/*
920 * Destroy a temporary mapping for a physical entry
921 */
922void pmap_put_mapwindow(pmap_mapwindow_t *map)
923{
924 WRITE_PTE(map->entry, 0)*(map->entry) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (
((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)])))
<< 12) | (__a & ((1 << 12)-1)); }):0;
;
925 PMAP_UPDATE_TLBS(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE){ if ((kernel_pmap)->cpus_using) { hyp_mmuext_op_void(6); }
}
;
926}
927
928void pmap_virtual_space(startp, endp)
929 vm_offset_t *startp;
930 vm_offset_t *endp;
931{
932 *startp = kernel_virtual_start;
933 *endp = kernel_virtual_end - PMAP_NMAPWINDOWS2 * PAGE_SIZE(1 << 12);
934}
935
936/*
937 * Initialize the pmap module.
938 * Called by vm_init, to initialize any structures that the pmap
939 * system needs to map virtual memory.
940 */
941void pmap_init(void)
942{
943 long npages;
944 vm_offset_t addr;
945 vm_size_t s;
946#if NCPUS1 > 1
947 int i;
948#endif /* NCPUS > 1 */
949
950 /*
951 * Allocate memory for the pv_head_table and its lock bits,
952 * the modify bit array, and the pte_page table.
953 */
954
955 npages = atop(phys_last_addr - phys_first_addr)(((vm_size_t)(phys_last_addr - phys_first_addr)) >> 12);
956 s = (vm_size_t) (sizeof(struct pv_entry) * npages
957 + pv_lock_table_size(npages)(((npages)+8 -1)/8)
958 + npages);
959
960 s = round_page(s)((vm_offset_t)((((vm_offset_t)(s)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
961 if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS0)
962 panic("pmap_init");
963 memset((void *) addr, 0, s);
964
965 /*
966 * Allocate the structures first to preserve word-alignment.
967 */
968 pv_head_table = (pv_entry_t) addr;
969 addr = (vm_offset_t) (pv_head_table + npages);
970
971 pv_lock_table = (char *) addr;
972 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages)(((npages)+8 -1)/8));
973
974 pmap_phys_attributes = (char *) addr;
975
976 /*
977 * Create the cache of physical maps,
978 * and of the physical-to-virtual entries.
979 */
980 s = (vm_size_t) sizeof(struct pmap);
981 kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
982 s = (vm_size_t) sizeof(struct pv_entry);
983 kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
984
985#if NCPUS1 > 1
986 /*
987 * Set up the pmap request lists
988 */
989 for (i = 0; i < NCPUS1; i++) {
990 pmap_update_list_t up = &cpu_update_list[i];
991
992 simple_lock_init(&up->lock);
993 up->count = 0;
994 }
995#endif /* NCPUS > 1 */
996
997 /*
998 * Indicate that the PMAP module is now fully initialized.
999 */
1000 pmap_initialized = TRUE((boolean_t) 1);
1001}
1002
1003#define valid_page(x)(pmap_initialized && pmap_valid_page(x)) (pmap_initialized && pmap_valid_page(x))
1004
1005boolean_t pmap_verify_free(phys)
1006 vm_offset_t phys;
1007{
1008 pv_entry_t pv_h;
1009 int pai;
1010 int spl;
1011 boolean_t result;
1012
1013 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1013); })
;
1014 if (!pmap_initialized)
1015 return(TRUE((boolean_t) 1));
1016
1017 if (!pmap_valid_page(phys))
1018 return(FALSE((boolean_t) 0));
1019
1020 PMAP_WRITE_LOCK(spl)((void)(spl));
1021
1022 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
1023 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1024
1025 result = (pv_h->pmap == PMAP_NULL((pmap_t) 0));
1026 PMAP_WRITE_UNLOCK(spl)((void)(spl));
1027
1028 return(result);
1029}
1030
1031/*
1032 * Routine: pmap_page_table_page_alloc
1033 *
1034 * Allocates a new physical page to be used as a page-table page.
1035 *
1036 * Must be called with the pmap system and the pmap unlocked,
1037 * since these must be unlocked to use vm_page_grab.
1038 */
1039vm_offset_t
1040pmap_page_table_page_alloc(void)
1041{
1042 vm_page_t m;
1043 vm_offset_t pa;
1044
1045 check_simple_locks();
1046
1047 /*
1048 * We cannot allocate the pmap_object in pmap_init,
1049 * because it is called before the cache package is up.
1050 * Allocate it now if it is missing.
1051 */
1052 if (pmap_object == VM_OBJECT_NULL((vm_object_t) 0))
1053 pmap_object = vm_object_allocate(phys_last_addr - phys_first_addr);
1054
1055 /*
1056 * Allocate a VM page for the level 2 page table entries.
1057 */
1058 while ((m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0))
1059 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1060
1061 /*
1062 * Map the page to its physical address so that it
1063 * can be found later.
1064 */
1065 pa = m->phys_addr;
1066 vm_object_lock(pmap_object);
1067 vm_page_insert(m, pmap_object, pa);
1068 vm_page_lock_queues();
1069 vm_page_wire(m);
1070 inuse_ptepages_count++;
1071 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1072 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1073
1074 /*
1075 * Zero the page.
1076 */
1077 memset((void *)phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL), 0, PAGE_SIZE(1 << 12));
1078
1079 return pa;
1080}
1081
1082#ifdef MACH_XEN
1083void pmap_map_mfn(void *_addr, unsigned long mfn) {
1084 vm_offset_t addr = (vm_offset_t) _addr;
1085 pt_entry_t *pte, *pdp;
1086 vm_offset_t ptp;
1087 pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT12;
1088
1089 /* Add a ptp if none exist yet for this pte */
1090 if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL((pt_entry_t *) 0)) {
1091 ptp = phystokv(pmap_page_table_page_alloc())((vm_offset_t)(pmap_page_table_page_alloc()) + 0xC0000000UL);
1092#ifdef MACH_PV_PAGETABLES
1093 pmap_set_page_readonly((void*) ptp);
1094 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, pa_to_mfn(ptp)((mfn_list[(((vm_size_t)(ptp)) >> 12)]))))
1095 panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1096#endif /* MACH_PV_PAGETABLES */
1097 pdp = pmap_pde(kernel_pmap, addr);
1098
1099#ifdef MACH_PV_PAGETABLES
1100 if (!hyp_mmu_update_pte(kv_to_ma(pdp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pdp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
,
1101 pa_to_pte(kv_to_ma(ptp))((({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL)
| INTEL_PTE_VALID0x00000001
1102 | INTEL_PTE_USER0x00000004
1103 | INTEL_PTE_WRITE0x00000002))
1104 panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__"../i386/intel/pmap.c",__LINE__1104,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pdp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ptp, (vm_offset_t) pa_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (ptp); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); })
);
1105#else /* MACH_PV_PAGETABLES */
1106 *pdp = pa_to_pte(kvtophys(ptp))((kvtophys(ptp)) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001
1107 | INTEL_PTE_USER0x00000004
1108 | INTEL_PTE_WRITE0x00000002;
1109#endif /* MACH_PV_PAGETABLES */
1110 pte = pmap_pte(kernel_pmap, addr);
1111 }
1112
1113#ifdef MACH_PV_PAGETABLES
1114 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ma | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002))
1115 panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__"../i386/intel/pmap.c",__LINE__1115,pte,(vm_offset_t) kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ma, ma_to_pa(ma)({ pt_entry_t __a = (pt_entry_t) (ma); ((((unsigned long *) 0xF5800000UL
)[__a >> 12]) << 12) | (__a & ((1 << 12
)-1)); })
);
1116#else /* MACH_PV_PAGETABLES */
1117 /* Note: in this case, mfn is actually a pfn. */
1118 WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE)*(pte) = ma | 0x00000001 | 0x00000002?({ vm_offset_t __a = (vm_offset_t
) (ma | 0x00000001 | 0x00000002); (((pt_entry_t) ((mfn_list[(
((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
;
1119#endif /* MACH_PV_PAGETABLES */
1120}
1121#endif /* MACH_XEN */
1122
1123/*
1124 * Deallocate a page-table page.
1125 * The page-table page must have all mappings removed,
1126 * and be removed from its page directory.
1127 */
1128void
1129pmap_page_table_page_dealloc(pa)
1130 vm_offset_t pa;
1131{
1132 vm_page_t m;
1133
1134 vm_object_lock(pmap_object);
1135 m = vm_page_lookup(pmap_object, pa);
1136 vm_page_lock_queues();
1137 vm_page_free(m);
1138 inuse_ptepages_count--;
1139 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1140 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1141}
1142
1143/*
1144 * Create and return a physical map.
1145 *
1146 * If the size specified for the map
1147 * is zero, the map is an actual physical
1148 * map, and may be referenced by the
1149 * hardware.
1150 *
1151 * If the size specified is non-zero,
1152 * the map will be used in software only, and
1153 * is bounded by that size.
1154 */
1155pmap_t pmap_create(size)
1156 vm_size_t size;
1157{
1158 pmap_t p;
1159 pmap_statistics_t stats;
1160
1161 /*
1162 * A software use-only map doesn't even need a map.
1163 */
1164
1165 if (size != 0) {
1166 return(PMAP_NULL((pmap_t) 0));
1167 }
1168
1169/*
1170 * Allocate a pmap struct from the pmap_cache. Then allocate
1171 * the page descriptor table.
1172 */
1173
1174 p = (pmap_t) kmem_cache_alloc(&pmap_cache);
1175 if (p == PMAP_NULL((pmap_t) 0))
1176 panic("pmap_create");
1177
1178 if (kmem_alloc_wired(kernel_map,
1179 (vm_offset_t *)&p->dirbase, PDPNUM4 * INTEL_PGBYTES4096)
1180 != KERN_SUCCESS0)
1181 panic("pmap_create");
1182
1183 memcpy(p->dirbase, kernel_page_dir, PDPNUM4 * INTEL_PGBYTES4096);
1184#ifdef LINUX_DEV
1185#if VM_MIN_KERNEL_ADDRESS0xC0000000UL != 0
1186 /* Do not map BIOS in user tasks */
1187 p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)(((((0xc0000000UL)) - 0xC0000000UL) >> 21) & 0x7ff)] = 0;
1188#endif
1189#endif
1190#ifdef MACH_PV_PAGETABLES
1191 {
1192 int i;
1193 for (i = 0; i < PDPNUM4; i++)
1194 pmap_set_page_readonly((void*) p->dirbase + i * INTEL_PGBYTES4096);
1195 }
1196#endif /* MACH_PV_PAGETABLES */
1197
1198#if PAE1
1199 if (kmem_alloc_wired(kernel_map,
1200 (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES4096)
1201 != KERN_SUCCESS0)
1202 panic("pmap_create");
1203 {
1204 int i;
1205 for (i = 0; i < PDPNUM4; i++)
1206 WRITE_PTE(&p->pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID)*(&p->pdpbase[i]) = ((kvtophys((vm_offset_t) p->dirbase
+ i * 4096)) & 0x00007ffffffff000ULL) | 0x00000001?({ vm_offset_t
__a = (vm_offset_t) (((kvtophys((vm_offset_t) p->dirbase +
i * 4096)) & 0x00007ffffffff000ULL) | 0x00000001); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
1207 }
1208#ifdef MACH_PV_PAGETABLES
1209 pmap_set_page_readonly(p->pdpbase);
1210#endif /* MACH_PV_PAGETABLES */
1211#endif /* PAE */
1212
1213 p->ref_count = 1;
1214
1215 simple_lock_init(&p->lock);
1216 p->cpus_using = 0;
1217
1218 /*
1219 * Initialize statistics.
1220 */
1221
1222 stats = &p->stats;
1223 stats->resident_count = 0;
1224 stats->wired_count = 0;
1225
1226 return(p);
1227}
1228
1229/*
1230 * Retire the given physical map from service.
1231 * Should only be called if the map contains
1232 * no valid mappings.
1233 */
1234
1235void pmap_destroy(p)
1236 pmap_t p;
1237{
1238 pt_entry_t *pdep;
1239 vm_offset_t pa;
1240 int c, s;
1241 vm_page_t m;
1242
1243 if (p == PMAP_NULL((pmap_t) 0))
1244 return;
1245
1246 SPLVM(s)((void)(s));
1247 simple_lock(&p->lock);
1248 c = --p->ref_count;
1249 simple_unlock(&p->lock)((void)(&p->lock));
1250 SPLX(s)((void)(s));
1251
1252 if (c != 0) {
1253 return; /* still in use */
1254 }
1255
1256 /*
1257 * Free the memory maps, then the
1258 * pmap structure.
1259 */
1260 for (pdep = p->dirbase;
1261 pdep < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)(((((0xc0000000UL))) >> 21) & 0x7ff)];
1262 pdep += ptes_per_vm_page1) {
1263 if (*pdep & INTEL_PTE_VALID0x00000001) {
1264 pa = pte_to_pa(*pdep)({ pt_entry_t __a = (pt_entry_t) ((*pdep) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1265 vm_object_lock(pmap_object);
1266 m = vm_page_lookup(pmap_object, pa);
1267 if (m == VM_PAGE_NULL((vm_page_t) 0))
1268 panic("pmap_destroy: pte page not in object");
1269 vm_page_lock_queues();
1270#ifdef MACH_PV_PAGETABLES
1271 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, pa_to_mfn(pa)((mfn_list[(((vm_size_t)(pa)) >> 12)]))))
1272 panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pa) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1273 pmap_set_page_readwrite((void*) phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL));
1274#endif /* MACH_PV_PAGETABLES */
1275 vm_page_free(m);
1276 inuse_ptepages_count--;
1277 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1278 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1279 }
1280 }
1281#ifdef MACH_PV_PAGETABLES
1282 {
1283 int i;
1284 for (i = 0; i < PDPNUM4; i++)
1285 pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES4096);
1286 }
1287#endif /* MACH_PV_PAGETABLES */
1288 kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM4 * INTEL_PGBYTES4096);
1289#if PAE1
1290#ifdef MACH_PV_PAGETABLES
1291 pmap_set_page_readwrite(p->pdpbase);
1292#endif /* MACH_PV_PAGETABLES */
1293 kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES4096);
1294#endif /* PAE */
1295 kmem_cache_free(&pmap_cache, (vm_offset_t) p);
1296}
1297
1298/*
1299 * Add a reference to the specified pmap.
1300 */
1301
1302void pmap_reference(p)
1303 pmap_t p;
1304{
1305 int s;
1306 if (p != PMAP_NULL((pmap_t) 0)) {
1307 SPLVM(s)((void)(s));
1308 simple_lock(&p->lock);
1309 p->ref_count++;
1310 simple_unlock(&p->lock)((void)(&p->lock));
1311 SPLX(s)((void)(s));
1312 }
1313}
1314
1315/*
1316 * Remove a range of hardware page-table entries.
1317 * The entries given are the first (inclusive)
1318 * and last (exclusive) entries for the VM pages.
1319 * The virtual address is the va for the first pte.
1320 *
1321 * The pmap must be locked.
1322 * If the pmap is not the kernel pmap, the range must lie
1323 * entirely within one pte-page. This is NOT checked.
1324 * Assumes that the pte-page exists.
1325 */
1326
1327/* static */
1328void pmap_remove_range(pmap, va, spte, epte)
1329 pmap_t pmap;
1330 vm_offset_t va;
1331 pt_entry_t *spte;
1332 pt_entry_t *epte;
1333{
1334 pt_entry_t *cpte;
1335 int num_removed, num_unwired;
1336 int pai;
1337 vm_offset_t pa;
1338#ifdef MACH_PV_PAGETABLES
1339 int n, ii = 0;
1340 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
1341#endif /* MACH_PV_PAGETABLES */
1342
1343#if DEBUG_PTE_PAGE0
1344 if (pmap != kernel_pmap)
1345 ptep_check(get_pte_page(spte));
1346#endif /* DEBUG_PTE_PAGE */
1347 num_removed = 0;
1348 num_unwired = 0;
1349
1350 for (cpte = spte; cpte < epte;
1351 cpte += ptes_per_vm_page1, va += PAGE_SIZE(1 << 12)) {
1352
1353 if (*cpte == 0)
1354 continue;
1355 pa = pte_to_pa(*cpte)({ pt_entry_t __a = (pt_entry_t) ((*cpte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1356
1357 num_removed++;
1358 if (*cpte & INTEL_PTE_WIRED0x00000200)
1359 num_unwired++;
1360
1361 if (!valid_page(pa)(pmap_initialized && pmap_valid_page(pa))) {
1362
1363 /*
1364 * Outside range of managed physical memory.
1365 * Just remove the mappings.
1366 */
1367 int i = ptes_per_vm_page1;
1368 pt_entry_t *lpte = cpte;
1369 do {
1370#ifdef MACH_PV_PAGETABLES
1371 update[ii].ptr = kv_to_ma(lpte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(lpte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1372 update[ii].val = 0;
1373 ii++;
1374 if (ii == HYP_BATCH_MMU_UPDATES256) {
1375 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1376 if (n != ii)
1377 panic("couldn't pmap_remove_range\n");
1378 ii = 0;
1379 }
1380#else /* MACH_PV_PAGETABLES */
1381 *lpte = 0;
1382#endif /* MACH_PV_PAGETABLES */
1383 lpte++;
1384 } while (--i > 0);
1385 continue;
1386 }
1387
1388 pai = pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12));
1389 LOCK_PVH(pai);
1390
1391 /*
1392 * Get the modify and reference bits.
1393 */
1394 {
1395 int i;
1396 pt_entry_t *lpte;
1397
1398 i = ptes_per_vm_page1;
1399 lpte = cpte;
1400 do {
1401 pmap_phys_attributes[pai] |=
1402 *lpte & (PHYS_MODIFIED0x00000040|PHYS_REFERENCED0x00000020);
1403#ifdef MACH_PV_PAGETABLES
1404 update[ii].ptr = kv_to_ma(lpte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(lpte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1405 update[ii].val = 0;
1406 ii++;
1407 if (ii == HYP_BATCH_MMU_UPDATES256) {
1408 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1409 if (n != ii)
1410 panic("couldn't pmap_remove_range\n");
1411 ii = 0;
1412 }
1413#else /* MACH_PV_PAGETABLES */
1414 *lpte = 0;
1415#endif /* MACH_PV_PAGETABLES */
1416 lpte++;
1417 } while (--i > 0);
1418 }
1419
1420 /*
1421 * Remove the mapping from the pvlist for
1422 * this physical page.
1423 */
1424 {
1425 pv_entry_t pv_h, prev, cur;
1426
1427 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1428 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1429 panic("pmap_remove: null pv_list!");
1430 }
1431 if (pv_h->va == va && pv_h->pmap == pmap) {
1432 /*
1433 * Header is the pv_entry. Copy the next one
1434 * to header and free the next one (we cannot
1435 * free the header)
1436 */
1437 cur = pv_h->next;
1438 if (cur != PV_ENTRY_NULL((pv_entry_t) 0)) {
1439 *pv_h = *cur;
1440 PV_FREE(cur){ ; cur->next = pv_free_list; pv_free_list = cur; ((void)(
&pv_free_list_lock)); }
;
1441 }
1442 else {
1443 pv_h->pmap = PMAP_NULL((pmap_t) 0);
1444 }
1445 }
1446 else {
1447 cur = pv_h;
1448 do {
1449 prev = cur;
1450 if ((cur = prev->next) == PV_ENTRY_NULL((pv_entry_t) 0)) {
1451 panic("pmap-remove: mapping not in pv_list!");
1452 }
1453 } while (cur->va != va || cur->pmap != pmap);
1454 prev->next = cur->next;
1455 PV_FREE(cur){ ; cur->next = pv_free_list; pv_free_list = cur; ((void)(
&pv_free_list_lock)); }
;
1456 }
1457 UNLOCK_PVH(pai);
1458 }
1459 }
1460
1461#ifdef MACH_PV_PAGETABLES
1462 if (ii > HYP_BATCH_MMU_UPDATES256)
1463 panic("overflowed array in pmap_remove_range");
1464 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1465 if (n != ii)
1466 panic("couldn't pmap_remove_range\n");
1467#endif /* MACH_PV_PAGETABLES */
1468
1469 /*
1470 * Update the counts
1471 */
1472 pmap->stats.resident_count -= num_removed;
1473 pmap->stats.wired_count -= num_unwired;
1474}
1475
1476/*
1477 * Remove the given range of addresses
1478 * from the specified map.
1479 *
1480 * It is assumed that the start and end are properly
1481 * rounded to the hardware page size.
1482 */
1483
1484void pmap_remove(map, s, e)
1485 pmap_t map;
1486 vm_offset_t s, e;
1487{
1488 int spl;
1489 pt_entry_t *pde;
1490 pt_entry_t *spte, *epte;
1491 vm_offset_t l;
1492 vm_offset_t _s = s;
1493
1494 if (map == PMAP_NULL((pmap_t) 0))
1495 return;
1496
1497 PMAP_READ_LOCK(map, spl)((void)(spl));
1498
1499 pde = pmap_pde(map, s);
1500 while (s < e) {
1501 l = (s + PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))) & ~(PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))-1);
1502 if (l > e)
1503 l = e;
1504 if (*pde & INTEL_PTE_VALID0x00000001) {
1505 spte = (pt_entry_t *)ptetokv(*pde)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((*pde) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
1506 spte = &spte[ptenum(s)(((s) >> 12) & 0x1ff)];
1507 epte = &spte[intel_btop(l-s)(((unsigned long)(l-s)) >> 12)];
1508 pmap_remove_range(map, s, spte, epte);
1509 }
1510 s = l;
1511 pde++;
1512 }
1513 PMAP_UPDATE_TLBS(map, _s, e){ if ((map)->cpus_using) { hyp_mmuext_op_void(6); } };
1514
1515 PMAP_READ_UNLOCK(map, spl)((void)(spl));
1516}
1517
1518/*
1519 * Routine: pmap_page_protect
1520 *
1521 * Function:
1522 * Lower the permission for all mappings to a given
1523 * page.
1524 */
1525void pmap_page_protect(phys, prot)
1526 vm_offset_t phys;
1527 vm_prot_t prot;
1528{
1529 pv_entry_t pv_h, prev;
1530 pv_entry_t pv_e;
1531 pt_entry_t *pte;
1532 int pai;
1533 pmap_t pmap;
1534 int spl;
1535 boolean_t remove;
1536
1537 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1537); })
;
1538 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
1539 /*
1540 * Not a managed page.
1541 */
1542 return;
1543 }
1544
1545 /*
1546 * Determine the new protection.
1547 */
1548 switch (prot) {
1549 case VM_PROT_READ((vm_prot_t) 0x01):
1550 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04):
1551 remove = FALSE((boolean_t) 0);
1552 break;
1553 case VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)):
1554 return; /* nothing to do */
1555 default:
1556 remove = TRUE((boolean_t) 1);
1557 break;
1558 }
1559
1560 /*
1561 * Lock the pmap system first, since we will be changing
1562 * several pmaps.
1563 */
1564
1565 PMAP_WRITE_LOCK(spl)((void)(spl));
1566
1567 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
1568 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1569
1570 /*
1571 * Walk down PV list, changing or removing all mappings.
1572 * We do not have to lock the pv_list because we have
1573 * the entire pmap system locked.
1574 */
1575 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
1576
1577 prev = pv_e = pv_h;
1578 do {
1579 vm_offset_t va;
1580
1581 pmap = pv_e->pmap;
1582 /*
1583 * Lock the pmap to block pmap_extract and similar routines.
1584 */
1585 simple_lock(&pmap->lock);
1586
1587 va = pv_e->va;
1588 pte = pmap_pte(pmap, va);
1589
1590 /*
1591 * Consistency checks.
1592 */
1593 /* assert(*pte & INTEL_PTE_VALID); XXX */
1594 /* assert(pte_to_phys(*pte) == phys); */
1595
1596 /*
1597 * Remove the mapping if new protection is NONE
1598 * or if write-protecting a kernel mapping.
1599 */
1600 if (remove || pmap == kernel_pmap) {
1601 /*
1602 * Remove the mapping, collecting any modify bits.
1603 */
1604 if (*pte & INTEL_PTE_WIRED0x00000200)
1605 panic("pmap_remove_all removing a wired page");
1606
1607 {
1608 int i = ptes_per_vm_page1;
1609
1610 do {
1611 pmap_phys_attributes[pai] |=
1612 *pte & (PHYS_MODIFIED0x00000040|PHYS_REFERENCED0x00000020);
1613#ifdef MACH_PV_PAGETABLES
1614 if (!hyp_mmu_update_pte(kv_to_ma(pte++)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte++) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0))
1615 panic("%s:%d could not clear pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1615,pte-1);
1616#else /* MACH_PV_PAGETABLES */
1617 *pte++ = 0;
1618#endif /* MACH_PV_PAGETABLES */
1619 } while (--i > 0);
1620 }
1621
1622 pmap->stats.resident_count--;
1623
1624 /*
1625 * Remove the pv_entry.
1626 */
1627 if (pv_e == pv_h) {
1628 /*
1629 * Fix up head later.
1630 */
1631 pv_h->pmap = PMAP_NULL((pmap_t) 0);
1632 }
1633 else {
1634 /*
1635 * Delete this entry.
1636 */
1637 prev->next = pv_e->next;
1638 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
1639 }
1640 }
1641 else {
1642 /*
1643 * Write-protect.
1644 */
1645 int i = ptes_per_vm_page1;
1646
1647 do {
1648#ifdef MACH_PV_PAGETABLES
1649 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~INTEL_PTE_WRITE0x00000002))
1650 panic("%s:%d could not disable write on pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1650,pte);
1651#else /* MACH_PV_PAGETABLES */
1652 *pte &= ~INTEL_PTE_WRITE0x00000002;
1653#endif /* MACH_PV_PAGETABLES */
1654 pte++;
1655 } while (--i > 0);
1656
1657 /*
1658 * Advance prev.
1659 */
1660 prev = pv_e;
1661 }
1662 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1663
1664 simple_unlock(&pmap->lock)((void)(&pmap->lock));
1665
1666 } while ((pv_e = prev->next) != PV_ENTRY_NULL((pv_entry_t) 0));
1667
1668 /*
1669 * If pv_head mapping was removed, fix it up.
1670 */
1671 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1672 pv_e = pv_h->next;
1673 if (pv_e != PV_ENTRY_NULL((pv_entry_t) 0)) {
1674 *pv_h = *pv_e;
1675 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
1676 }
1677 }
1678 }
1679
1680 PMAP_WRITE_UNLOCK(spl)((void)(spl));
1681}
1682
1683/*
1684 * Set the physical protection on the
1685 * specified range of this map as requested.
1686 * Will not increase permissions.
1687 */
1688void pmap_protect(map, s, e, prot)
1689 pmap_t map;
1690 vm_offset_t s, e;
1691 vm_prot_t prot;
1692{
1693 pt_entry_t *pde;
1694 pt_entry_t *spte, *epte;
1695 vm_offset_t l;
1696 int spl;
1697 vm_offset_t _s = s;
1698
1699 if (map == PMAP_NULL((pmap_t) 0))
1700 return;
1701
1702 /*
1703 * Determine the new protection.
1704 */
1705 switch (prot) {
1706 case VM_PROT_READ((vm_prot_t) 0x01):
1707 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04):
1708 break;
1709 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_WRITE((vm_prot_t) 0x02):
1710 case VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)):
1711 return; /* nothing to do */
1712 default:
1713 pmap_remove(map, s, e);
1714 return;
1715 }
1716
1717 /*
1718 * If write-protecting in the kernel pmap,
1719 * remove the mappings; the i386 ignores
1720 * the write-permission bit in kernel mode.
1721 *
1722 * XXX should be #if'd for i386
1723 */
1724 if (map == kernel_pmap) {
1725 pmap_remove(map, s, e);
1726 return;
1727 }
1728
1729 SPLVM(spl)((void)(spl));
1730 simple_lock(&map->lock);
1731
1732 pde = pmap_pde(map, s);
1733 while (s < e) {
1734 l = (s + PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))) & ~(PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))-1);
1735 if (l > e)
1736 l = e;
1737 if (*pde & INTEL_PTE_VALID0x00000001) {
1738 spte = (pt_entry_t *)ptetokv(*pde)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((*pde) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
1739 spte = &spte[ptenum(s)(((s) >> 12) & 0x1ff)];
1740 epte = &spte[intel_btop(l-s)(((unsigned long)(l-s)) >> 12)];
1741
1742#ifdef MACH_PV_PAGETABLES
1743 int n, i = 0;
1744 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
1745#endif /* MACH_PV_PAGETABLES */
1746
1747 while (spte < epte) {
1748 if (*spte & INTEL_PTE_VALID0x00000001) {
1749#ifdef MACH_PV_PAGETABLES
1750 update[i].ptr = kv_to_ma(spte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(spte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1751 update[i].val = *spte & ~INTEL_PTE_WRITE0x00000002;
1752 i++;
1753 if (i == HYP_BATCH_MMU_UPDATES256) {
1754 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1755 if (n != i)
1756 panic("couldn't pmap_protect\n");
1757 i = 0;
1758 }
1759#else /* MACH_PV_PAGETABLES */
1760 *spte &= ~INTEL_PTE_WRITE0x00000002;
1761#endif /* MACH_PV_PAGETABLES */
1762 }
1763 spte++;
1764 }
1765#ifdef MACH_PV_PAGETABLES
1766 if (i > HYP_BATCH_MMU_UPDATES256)
1767 panic("overflowed array in pmap_protect");
1768 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1769 if (n != i)
1770 panic("couldn't pmap_protect\n");
1771#endif /* MACH_PV_PAGETABLES */
1772 }
1773 s = l;
1774 pde++;
1775 }
1776 PMAP_UPDATE_TLBS(map, _s, e){ if ((map)->cpus_using) { hyp_mmuext_op_void(6); } };
1777
1778 simple_unlock(&map->lock)((void)(&map->lock));
1779 SPLX(spl)((void)(spl));
1780}
1781
1782/*
1783 * Insert the given physical page (p) at
1784 * the specified virtual address (v) in the
1785 * target physical map with the protection requested.
1786 *
1787 * If specified, the page will be wired down, meaning
1788 * that the related pte can not be reclaimed.
1789 *
1790 * NB: This is the only routine which MAY NOT lazy-evaluate
1791 * or lose information. That is, this routine must actually
1792 * insert this page into the given map NOW.
1793 */
1794void pmap_enter(pmap, v, pa, prot, wired)
1795 pmap_t pmap;
1796 vm_offset_t v;
1797 vm_offset_t pa;
1798 vm_prot_t prot;
1799 boolean_t wired;
1800{
1801 pt_entry_t *pte;
1802 pv_entry_t pv_h;
1803 int i, pai;
1804 pv_entry_t pv_e;
1805 pt_entry_t template;
1806 int spl;
1807 vm_offset_t old_pa;
1808
1809 assert(pa != vm_page_fictitious_addr)({ if (!(pa != vm_page_fictitious_addr)) Assert("pa != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1809); })
;
1810if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa);
1811 if (pmap == PMAP_NULL((pmap_t) 0))
1812 return;
1813
1814#if !MACH_KDB1
1815 if (pmap == kernel_pmap && (v < kernel_virtual_start || v >= kernel_virtual_end))
1816 panic("pmap_enter(%p, %p) falls in physical memory area!\n", v, pa);
1817#endif
1818 if (pmap == kernel_pmap && (prot & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0
1819 && !wired /* hack for io_wire */ ) {
1820 /*
1821 * Because the 386 ignores write protection in kernel mode,
1822 * we cannot enter a read-only kernel mapping, and must
1823 * remove an existing mapping if changing it.
1824 *
1825 * XXX should be #if'd for i386
1826 */
1827 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1828
1829 pte = pmap_pte(pmap, v);
1830 if (pte != PT_ENTRY_NULL((pt_entry_t *) 0) && *pte != 0) {
1831 /*
1832 * Invalidate the translation buffer,
1833 * then remove the mapping.
1834 */
1835 pmap_remove_range(pmap, v, pte,
1836 pte + ptes_per_vm_page1);
1837 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1838 }
1839 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1840 return;
1841 }
1842
1843 /*
1844 * Must allocate a new pvlist entry while we're unlocked;
1845 * Allocating may cause pageout (which will lock the pmap system).
1846 * If we determine we need a pvlist entry, we will unlock
1847 * and allocate one. Then we will retry, throughing away
1848 * the allocated entry later (if we no longer need it).
1849 */
1850 pv_e = PV_ENTRY_NULL((pv_entry_t) 0);
1851Retry:
1852 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1853
1854 /*
1855 * Expand pmap to include this pte. Assume that
1856 * pmap is always expanded to include enough hardware
1857 * pages to map one VM page.
1858 */
1859
1860 while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL((pt_entry_t *) 0)) {
1861 /*
1862 * Need to allocate a new page-table page.
1863 */
1864 vm_offset_t ptp;
1865 pt_entry_t *pdp;
1866 int i;
1867
1868 if (pmap == kernel_pmap) {
1869 /*
1870 * Would have to enter the new page-table page in
1871 * EVERY pmap.
1872 */
1873 panic("pmap_expand kernel pmap to %#x", v);
1874 }
1875
1876 /*
1877 * Unlock the pmap and allocate a new page-table page.
1878 */
1879 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1880
1881 ptp = phystokv(pmap_page_table_page_alloc())((vm_offset_t)(pmap_page_table_page_alloc()) + 0xC0000000UL);
1882
1883 /*
1884 * Re-lock the pmap and check that another thread has
1885 * not already allocated the page-table page. If it
1886 * has, discard the new page-table page (and try
1887 * again to make sure).
1888 */
1889 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1890
1891 if (pmap_pte(pmap, v) != PT_ENTRY_NULL((pt_entry_t *) 0)) {
1892 /*
1893 * Oops...
1894 */
1895 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1896 pmap_page_table_page_dealloc(kvtophys(ptp));
1897 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1898 continue;
1899 }
1900
1901 /*
1902 * Enter the new page table page in the page directory.
1903 */
1904 i = ptes_per_vm_page1;
1905 /*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
1906 pdp = pmap_pde(pmap, v);
1907 do {
1908#ifdef MACH_PV_PAGETABLES
1909 pmap_set_page_readonly((void *) ptp);
1910 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn(ptp)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptp) - 0xC0000000UL))
) >> 12)]))
))
1911 panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1912 if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdp
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
,
1913 pa_to_pte(pa_to_ma(kvtophys(ptp)))((({ vm_offset_t __a = (vm_offset_t) (kvtophys(ptp)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })) & 0x00007ffffffff000ULL
)
| INTEL_PTE_VALID0x00000001
1914 | INTEL_PTE_USER0x00000004
1915 | INTEL_PTE_WRITE0x00000002))
1916 panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1916, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdp
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp))({ vm_offset_t __a = (vm_offset_t) (kvtophys(ptp)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })
, (vm_offset_t) pa_to_pte(kv_to_ma(ptp))((({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL)
);
1917#else /* MACH_PV_PAGETABLES */
1918 *pdp = pa_to_pte(kvtophys(ptp))((kvtophys(ptp)) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001
1919 | INTEL_PTE_USER0x00000004
1920 | INTEL_PTE_WRITE0x00000002;
1921#endif /* MACH_PV_PAGETABLES */
1922 pdp++;
1923 ptp += INTEL_PGBYTES4096;
1924 } while (--i > 0);
1925
1926 /*
1927 * Now, get the address of the page-table entry.
1928 */
1929 continue;
1930 }
1931
1932 /*
1933 * Special case if the physical page is already mapped
1934 * at this address.
1935 */
1936 old_pa = pte_to_pa(*pte)({ pt_entry_t __a = (pt_entry_t) ((*pte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1937 if (*pte && old_pa == pa) {
1938 /*
1939 * May be changing its wired attribute or protection
1940 */
1941
1942 if (wired && !(*pte & INTEL_PTE_WIRED0x00000200))
1943 pmap->stats.wired_count++;
1944 else if (!wired && (*pte & INTEL_PTE_WIRED0x00000200))
1945 pmap->stats.wired_count--;
1946
1947 template = pa_to_pte(pa)((pa) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001;
1948 if (pmap != kernel_pmap)
1949 template |= INTEL_PTE_USER0x00000004;
1950 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
1951 template |= INTEL_PTE_WRITE0x00000002;
1952 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17)
1953 && pa >= phys_last_addr)
1954 template |= INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008;
1955 if (wired)
1956 template |= INTEL_PTE_WIRED0x00000200;
1957 i = ptes_per_vm_page1;
1958 do {
1959 if (*pte & INTEL_PTE_MOD0x00000040)
1960 template |= INTEL_PTE_MOD0x00000040;
1961#ifdef MACH_PV_PAGETABLES
1962 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
))
1963 panic("%s:%d could not set pte %p to %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1963,pte,template);
1964#else /* MACH_PV_PAGETABLES */
1965 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
1966#endif /* MACH_PV_PAGETABLES */
1967 pte++;
1968 pte_increment_pa(template)((template) += 0xfff +1);
1969 } while (--i > 0);
1970 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1971 }
1972 else {
1973
1974 /*
1975 * Remove old mapping from the PV list if necessary.
1976 */
1977 if (*pte) {
1978 /*
1979 * Don't free the pte page if removing last
1980 * mapping - we will immediately replace it.
1981 */
1982 pmap_remove_range(pmap, v, pte,
1983 pte + ptes_per_vm_page1);
1984 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1985 }
1986
1987 if (valid_page(pa)(pmap_initialized && pmap_valid_page(pa))) {
1988
1989 /*
1990 * Enter the mapping in the PV list for this
1991 * physical page.
1992 */
1993
1994 pai = pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12));
1995 LOCK_PVH(pai);
1996 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1997
1998 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1999 /*
2000 * No mappings yet
2001 */
2002 pv_h->va = v;
2003 pv_h->pmap = pmap;
2004 pv_h->next = PV_ENTRY_NULL((pv_entry_t) 0);
2005 }
2006 else {
2007#if DEBUG
2008 {
2009 /* check that this mapping is not already there */
2010 pv_entry_t e = pv_h;
2011 while (e != PV_ENTRY_NULL((pv_entry_t) 0)) {
2012 if (e->pmap == pmap && e->va == v)
2013 panic("pmap_enter: already in pv_list");
2014 e = e->next;
2015 }
2016 }
2017#endif /* DEBUG */
2018
2019 /*
2020 * Add new pv_entry after header.
2021 */
2022 if (pv_e == PV_ENTRY_NULL((pv_entry_t) 0)) {
2023 PV_ALLOC(pv_e){ ; if ((pv_e = pv_free_list) != 0) { pv_free_list = pv_e->
next; } ((void)(&pv_free_list_lock)); }
;
2024 if (pv_e == PV_ENTRY_NULL((pv_entry_t) 0)) {
2025 UNLOCK_PVH(pai);
2026 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
2027
2028 /*
2029 * Refill from cache.
2030 */
2031 pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache);
2032 goto Retry;
2033 }
2034 }
2035 pv_e->va = v;
2036 pv_e->pmap = pmap;
2037 pv_e->next = pv_h->next;
2038 pv_h->next = pv_e;
2039 /*
2040 * Remember that we used the pvlist entry.
2041 */
2042 pv_e = PV_ENTRY_NULL((pv_entry_t) 0);
2043 }
2044 UNLOCK_PVH(pai);
2045 }
2046
2047 /*
2048 * And count the mapping.
2049 */
2050
2051 pmap->stats.resident_count++;
2052 if (wired)
2053 pmap->stats.wired_count++;
2054
2055 /*
2056 * Build a template to speed up entering -
2057 * only the pfn changes.
2058 */
2059 template = pa_to_pte(pa)((pa) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001;
2060 if (pmap != kernel_pmap)
2061 template |= INTEL_PTE_USER0x00000004;
2062 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
2063 template |= INTEL_PTE_WRITE0x00000002;
2064 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17)
2065 && pa >= phys_last_addr)
2066 template |= INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008;
2067 if (wired)
2068 template |= INTEL_PTE_WIRED0x00000200;
2069 i = ptes_per_vm_page1;
2070 do {
2071#ifdef MACH_PV_PAGETABLES
2072 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
)))
2073 panic("%s:%d could not set pte %p to %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2073,pte,template);
2074#else /* MACH_PV_PAGETABLES */
2075 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
2076#endif /* MACH_PV_PAGETABLES */
2077 pte++;
2078 pte_increment_pa(template)((template) += 0xfff +1);
2079 } while (--i > 0);
2080 }
2081
2082 if (pv_e != PV_ENTRY_NULL((pv_entry_t) 0)) {
2083 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
2084 }
2085
2086 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
2087}
2088
2089/*
2090 * Routine: pmap_change_wiring
2091 * Function: Change the wiring attribute for a map/virtual-address
2092 * pair.
2093 * In/out conditions:
2094 * The mapping must already exist in the pmap.
2095 */
2096void pmap_change_wiring(map, v, wired)
2097 pmap_t map;
2098 vm_offset_t v;
2099 boolean_t wired;
2100{
2101 pt_entry_t *pte;
2102 int i;
2103 int spl;
2104
2105 /*
2106 * We must grab the pmap system lock because we may
2107 * change a pte_page queue.
2108 */
2109 PMAP_READ_LOCK(map, spl)((void)(spl));
2110
2111 if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL((pt_entry_t *) 0))
2112 panic("pmap_change_wiring: pte missing");
2113
2114 if (wired && !(*pte & INTEL_PTE_WIRED0x00000200)) {
2115 /*
2116 * wiring down mapping
2117 */
2118 map->stats.wired_count++;
2119 i = ptes_per_vm_page1;
2120 do {
2121 *pte++ |= INTEL_PTE_WIRED0x00000200;
2122 } while (--i > 0);
2123 }
2124 else if (!wired && (*pte & INTEL_PTE_WIRED0x00000200)) {
2125 /*
2126 * unwiring mapping
2127 */
2128 map->stats.wired_count--;
2129 i = ptes_per_vm_page1;
2130 do {
2131#ifdef MACH_PV_PAGETABLES
2132 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~INTEL_PTE_WIRED0x00000200)))
2133 panic("%s:%d could not wire down pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2133,pte);
2134#else /* MACH_PV_PAGETABLES */
2135 *pte &= ~INTEL_PTE_WIRED0x00000200;
2136#endif /* MACH_PV_PAGETABLES */
2137 pte++;
2138 } while (--i > 0);
2139 }
2140
2141 PMAP_READ_UNLOCK(map, spl)((void)(spl));
2142}
2143
2144/*
2145 * Routine: pmap_extract
2146 * Function:
2147 * Extract the physical page address associated
2148 * with the given map/virtual_address pair.
2149 */
2150
2151vm_offset_t pmap_extract(pmap, va)
2152 pmap_t pmap;
2153 vm_offset_t va;
2154{
2155 pt_entry_t *pte;
2156 vm_offset_t pa;
2157 int spl;
2158
2159 SPLVM(spl)((void)(spl));
2160 simple_lock(&pmap->lock);
2161 if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL((pt_entry_t *) 0))
2162 pa = (vm_offset_t) 0;
2163 else if (!(*pte & INTEL_PTE_VALID0x00000001))
2164 pa = (vm_offset_t) 0;
2165 else
2166 pa = pte_to_pa(*pte)({ pt_entry_t __a = (pt_entry_t) ((*pte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
+ (va & INTEL_OFFMASK0xfff);
2167 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2168 SPLX(spl)((void)(spl));
2169 return(pa);
2170}
2171
2172/*
2173 * Copy the range specified by src_addr/len
2174 * from the source map to the range dst_addr/len
2175 * in the destination map.
2176 *
2177 * This routine is only advisory and need not do anything.
2178 */
2179#if 0
2180void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2181 pmap_t dst_pmap;
2182 pmap_t src_pmap;
2183 vm_offset_t dst_addr;
2184 vm_size_t len;
2185 vm_offset_t src_addr;
2186{
2187}
2188#endif /* 0 */
2189
2190/*
2191 * Routine: pmap_collect
2192 * Function:
2193 * Garbage collects the physical map system for
2194 * pages which are no longer used.
2195 * Success need not be guaranteed -- that is, there
2196 * may well be pages which are not referenced, but
2197 * others may be collected.
2198 * Usage:
2199 * Called by the pageout daemon when pages are scarce.
2200 */
2201void pmap_collect(p)
2202 pmap_t p;
2203{
2204 pt_entry_t *pdp, *ptp;
2205 pt_entry_t *eptp;
2206 vm_offset_t pa;
2207 int spl, wired;
2208
2209 if (p == PMAP_NULL((pmap_t) 0))
2210 return;
2211
2212 if (p == kernel_pmap)
2213 return;
2214
2215 /*
2216 * Garbage collect map.
2217 */
2218 PMAP_READ_LOCK(p, spl)((void)(spl));
2219 for (pdp = p->dirbase;
2220 pdp < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)(((((0xc0000000UL))) >> 21) & 0x7ff)];
2221 pdp += ptes_per_vm_page1)
2222 {
2223 if (*pdp & INTEL_PTE_VALID0x00000001) {
2224
2225 pa = pte_to_pa(*pdp)({ pt_entry_t __a = (pt_entry_t) ((*pdp) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
2226 ptp = (pt_entry_t *)phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL);
2227 eptp = ptp + NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))*ptes_per_vm_page1;
2228
2229 /*
2230 * If the pte page has any wired mappings, we cannot
2231 * free it.
2232 */
2233 wired = 0;
2234 {
2235 pt_entry_t *ptep;
2236 for (ptep = ptp; ptep < eptp; ptep++) {
2237 if (*ptep & INTEL_PTE_WIRED0x00000200) {
2238 wired = 1;
2239 break;
2240 }
2241 }
2242 }
2243 if (!wired) {
2244 /*
2245 * Remove the virtual addresses mapped by this pte page.
2246 */
2247 { /*XXX big hack*/
2248 vm_offset_t va = pdenum2lin(pdp - p->dirbase)((vm_offset_t)(pdp - p->dirbase) << 21);
2249 if (p == kernel_pmap)
2250 va = lintokv(va)((vm_offset_t)(va) - ((0xc0000000UL)) + 0xC0000000UL);
2251 pmap_remove_range(p,
2252 va,
2253 ptp,
2254 eptp);
2255 }
2256
2257 /*
2258 * Invalidate the page directory pointer.
2259 */
2260 {
2261 int i = ptes_per_vm_page1;
2262 pt_entry_t *pdep = pdp;
2263 do {
2264#ifdef MACH_PV_PAGETABLES
2265 unsigned long pte = *pdep;
2266 void *ptable = (void*) ptetokv(pte)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pte) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
2267 if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdep
++)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0)))
2268 panic("%s:%d could not clear pde %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2268,pdep-1);
2269 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, kv_to_mfn(ptable)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptable) - 0xC0000000UL
))) >> 12)]))
))
2270 panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)ptable
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
2271 pmap_set_page_readwrite(ptable);
2272#else /* MACH_PV_PAGETABLES */
2273 *pdep++ = 0;
2274#endif /* MACH_PV_PAGETABLES */
2275 } while (--i > 0);
2276 }
2277
2278 PMAP_READ_UNLOCK(p, spl)((void)(spl));
2279
2280 /*
2281 * And free the pte page itself.
2282 */
2283 {
2284 vm_page_t m;
2285
2286 vm_object_lock(pmap_object);
2287 m = vm_page_lookup(pmap_object, pa);
2288 if (m == VM_PAGE_NULL((vm_page_t) 0))
2289 panic("pmap_collect: pte page not in object");
2290 vm_page_lock_queues();
2291 vm_page_free(m);
2292 inuse_ptepages_count--;
2293 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
2294 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
2295 }
2296
2297 PMAP_READ_LOCK(p, spl)((void)(spl));
2298 }
2299 }
2300 }
2301 PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS){ if ((p)->cpus_using) { hyp_mmuext_op_void(6); } };
2302
2303 PMAP_READ_UNLOCK(p, spl)((void)(spl));
2304 return;
2305
2306}
2307
2308/*
2309 * Routine: pmap_activate
2310 * Function:
2311 * Binds the given physical map to the given
2312 * processor, and returns a hardware map description.
2313 */
2314#if 0
2315void pmap_activate(my_pmap, th, my_cpu)
2316 register pmap_t my_pmap;
2317 thread_t th;
2318 int my_cpu;
2319{
2320 PMAP_ACTIVATE(my_pmap, th, my_cpu);
2321}
2322#endif /* 0 */
2323
2324/*
2325 * Routine: pmap_deactivate
2326 * Function:
2327 * Indicates that the given physical map is no longer
2328 * in use on the specified processor. (This is a macro
2329 * in pmap.h)
2330 */
2331#if 0
2332void pmap_deactivate(pmap, th, which_cpu)
2333 pmap_t pmap;
2334 thread_t th;
2335 int which_cpu;
2336{
2337 PMAP_DEACTIVATE(pmap, th, which_cpu);
2338}
2339#endif /* 0 */
2340
2341/*
2342 * Routine: pmap_kernel
2343 * Function:
2344 * Returns the physical map handle for the kernel.
2345 */
2346#if 0
2347pmap_t pmap_kernel()(kernel_pmap)
2348{
2349 return (kernel_pmap);
2350}
2351#endif /* 0 */
2352
2353/*
2354 * pmap_zero_page zeros the specified (machine independent) page.
2355 * See machine/phys.c or machine/phys.s for implementation.
2356 */
2357#if 0
2358pmap_zero_page(phys)
2359 register vm_offset_t phys;
2360{
2361 register int i;
2362
2363 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2363); })
;
2364 i = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
2365 phys = intel_pfn(phys);
2366
2367 while (i--)
2368 zero_phys(phys++);
2369}
2370#endif /* 0 */
2371
2372/*
2373 * pmap_copy_page copies the specified (machine independent) page.
2374 * See machine/phys.c or machine/phys.s for implementation.
2375 */
2376#if 0
2377pmap_copy_page(src, dst)
2378 vm_offset_t src, dst;
2379{
2380 int i;
2381
2382 assert(src != vm_page_fictitious_addr)({ if (!(src != vm_page_fictitious_addr)) Assert("src != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2382); })
;
2383 assert(dst != vm_page_fictitious_addr)({ if (!(dst != vm_page_fictitious_addr)) Assert("dst != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2383); })
;
2384 i = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
2385
2386 while (i--) {
2387 copy_phys(intel_pfn(src), intel_pfn(dst));
2388 src += INTEL_PGBYTES4096;
2389 dst += INTEL_PGBYTES4096;
2390 }
2391}
2392#endif /* 0 */
2393
2394/*
2395 * Routine: pmap_pageable
2396 * Function:
2397 * Make the specified pages (by pmap, offset)
2398 * pageable (or not) as requested.
2399 *
2400 * A page which is not pageable may not take
2401 * a fault; therefore, its page table entry
2402 * must remain valid for the duration.
2403 *
2404 * This routine is merely advisory; pmap_enter
2405 * will specify that these pages are to be wired
2406 * down (or not) as appropriate.
2407 */
2408void
2409pmap_pageable(pmap, start, end, pageable)
2410 pmap_t pmap;
2411 vm_offset_t start;
2412 vm_offset_t end;
2413 boolean_t pageable;
2414{
2415}
2416
2417/*
2418 * Clear specified attribute bits.
2419 */
2420void
2421phys_attribute_clear(phys, bits)
2422 vm_offset_t phys;
2423 int bits;
2424{
2425 pv_entry_t pv_h;
2426 pv_entry_t pv_e;
2427 pt_entry_t *pte;
2428 int pai;
2429 pmap_t pmap;
2430 int spl;
2431
2432 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2432); })
;
2433 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
2434 /*
2435 * Not a managed page.
2436 */
2437 return;
2438 }
2439
2440 /*
2441 * Lock the pmap system first, since we will be changing
2442 * several pmaps.
2443 */
2444
2445 PMAP_WRITE_LOCK(spl)((void)(spl));
2446
2447 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
2448 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
2449
2450 /*
2451 * Walk down PV list, clearing all modify or reference bits.
2452 * We do not have to lock the pv_list because we have
2453 * the entire pmap system locked.
2454 */
2455 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
2456 /*
2457 * There are some mappings.
2458 */
2459 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL((pv_entry_t) 0); pv_e = pv_e->next) {
2460 vm_offset_t va;
2461
2462 pmap = pv_e->pmap;
2463 /*
2464 * Lock the pmap to block pmap_extract and similar routines.
2465 */
2466 simple_lock(&pmap->lock);
2467
2468 va = pv_e->va;
2469 pte = pmap_pte(pmap, va);
2470
2471#if 0
2472 /*
2473 * Consistency checks.
2474 */
2475 assert(*pte & INTEL_PTE_VALID)({ if (!(*pte & 0x00000001)) Assert("*pte & INTEL_PTE_VALID"
, "../i386/intel/pmap.c", 2475); })
;
2476 /* assert(pte_to_phys(*pte) == phys); */
2477#endif
2478
2479 /*
2480 * Clear modify or reference bits.
2481 */
2482 {
2483 int i = ptes_per_vm_page1;
2484 do {
2485#ifdef MACH_PV_PAGETABLES
2486 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~bits)))
2487 panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2487,bits,pte);
2488#else /* MACH_PV_PAGETABLES */
2489 *pte &= ~bits;
2490#endif /* MACH_PV_PAGETABLES */
2491 } while (--i > 0);
2492 }
2493 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
2494 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2495 }
2496 }
2497
2498 pmap_phys_attributes[pai] &= ~bits;
2499
2500 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2501}
2502
2503/*
2504 * Check specified attribute bits.
2505 */
2506boolean_t
2507phys_attribute_test(phys, bits)
2508 vm_offset_t phys;
2509 int bits;
2510{
2511 pv_entry_t pv_h;
2512 pv_entry_t pv_e;
2513 pt_entry_t *pte;
2514 int pai;
2515 pmap_t pmap;
2516 int spl;
2517
2518 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2518); })
;
2519 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
2520 /*
2521 * Not a managed page.
2522 */
2523 return (FALSE((boolean_t) 0));
2524 }
2525
2526 /*
2527 * Lock the pmap system first, since we will be checking
2528 * several pmaps.
2529 */
2530
2531 PMAP_WRITE_LOCK(spl)((void)(spl));
2532
2533 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
2534 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
2535
2536 if (pmap_phys_attributes[pai] & bits) {
2537 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2538 return (TRUE((boolean_t) 1));
2539 }
2540
2541 /*
2542 * Walk down PV list, checking all mappings.
2543 * We do not have to lock the pv_list because we have
2544 * the entire pmap system locked.
2545 */
2546 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
2547 /*
2548 * There are some mappings.
2549 */
2550 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL((pv_entry_t) 0); pv_e = pv_e->next) {
2551
2552 pmap = pv_e->pmap;
2553 /*
2554 * Lock the pmap to block pmap_extract and similar routines.
2555 */
2556 simple_lock(&pmap->lock);
2557
2558 {
2559 vm_offset_t va;
2560
2561 va = pv_e->va;
2562 pte = pmap_pte(pmap, va);
2563
2564#if 0
2565 /*
2566 * Consistency checks.
2567 */
2568 assert(*pte & INTEL_PTE_VALID)({ if (!(*pte & 0x00000001)) Assert("*pte & INTEL_PTE_VALID"
, "../i386/intel/pmap.c", 2568); })
;
2569 /* assert(pte_to_phys(*pte) == phys); */
2570#endif
2571 }
2572
2573 /*
2574 * Check modify or reference bits.
2575 */
2576 {
2577 int i = ptes_per_vm_page1;
2578
2579 do {
2580 if (*pte & bits) {
2581 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2582 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2583 return (TRUE((boolean_t) 1));
2584 }
2585 } while (--i > 0);
2586 }
2587 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2588 }
2589 }
2590 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2591 return (FALSE((boolean_t) 0));
2592}
2593
2594/*
2595 * Clear the modify bits on the specified physical page.
2596 */
2597
2598void pmap_clear_modify(phys)
2599 vm_offset_t phys;
2600{
2601 phys_attribute_clear(phys, PHYS_MODIFIED0x00000040);
2602}
2603
2604/*
2605 * pmap_is_modified:
2606 *
2607 * Return whether or not the specified physical page is modified
2608 * by any physical maps.
2609 */
2610
2611boolean_t pmap_is_modified(phys)
2612 vm_offset_t phys;
2613{
2614 return (phys_attribute_test(phys, PHYS_MODIFIED0x00000040));
2615}
2616
2617/*
2618 * pmap_clear_reference:
2619 *
2620 * Clear the reference bit on the specified physical page.
2621 */
2622
2623void pmap_clear_reference(phys)
2624 vm_offset_t phys;
2625{
2626 phys_attribute_clear(phys, PHYS_REFERENCED0x00000020);
2627}
2628
2629/*
2630 * pmap_is_referenced:
2631 *
2632 * Return whether or not the specified physical page is referenced
2633 * by any physical maps.
2634 */
2635
2636boolean_t pmap_is_referenced(phys)
2637 vm_offset_t phys;
2638{
2639 return (phys_attribute_test(phys, PHYS_REFERENCED0x00000020));
2640}
2641
2642#if NCPUS1 > 1
2643/*
2644* TLB Coherence Code (TLB "shootdown" code)
2645*
2646* Threads that belong to the same task share the same address space and
2647* hence share a pmap. However, they may run on distinct cpus and thus
2648* have distinct TLBs that cache page table entries. In order to guarantee
2649* the TLBs are consistent, whenever a pmap is changed, all threads that
2650* are active in that pmap must have their TLB updated. To keep track of
2651* this information, the set of cpus that are currently using a pmap is
2652* maintained within each pmap structure (cpus_using). Pmap_activate() and
2653* pmap_deactivate add and remove, respectively, a cpu from this set.
2654* Since the TLBs are not addressable over the bus, each processor must
2655* flush its own TLB; a processor that needs to invalidate another TLB
2656* needs to interrupt the processor that owns that TLB to signal the
2657* update.
2658*
2659* Whenever a pmap is updated, the lock on that pmap is locked, and all
2660* cpus using the pmap are signaled to invalidate. All threads that need
2661* to activate a pmap must wait for the lock to clear to await any updates
2662* in progress before using the pmap. They must ACQUIRE the lock to add
2663* their cpu to the cpus_using set. An implicit assumption made
2664* throughout the TLB code is that all kernel code that runs at or higher
2665* than splvm blocks out update interrupts, and that such code does not
2666* touch pageable pages.
2667*
2668* A shootdown interrupt serves another function besides signaling a
2669* processor to invalidate. The interrupt routine (pmap_update_interrupt)
2670* waits for the both the pmap lock (and the kernel pmap lock) to clear,
2671* preventing user code from making implicit pmap updates while the
2672* sending processor is performing its update. (This could happen via a
2673* user data write reference that turns on the modify bit in the page
2674* table). It must wait for any kernel updates that may have started
2675* concurrently with a user pmap update because the IPC code
2676* changes mappings.
2677* Spinning on the VALUES of the locks is sufficient (rather than
2678* having to acquire the locks) because any updates that occur subsequent
2679* to finding the lock unlocked will be signaled via another interrupt.
2680* (This assumes the interrupt is cleared before the low level interrupt code
2681* calls pmap_update_interrupt()).
2682*
2683* The signaling processor must wait for any implicit updates in progress
2684* to terminate before continuing with its update. Thus it must wait for an
2685* acknowledgement of the interrupt from each processor for which such
2686* references could be made. For maintaining this information, a set
2687* cpus_active is used. A cpu is in this set if and only if it can
2688* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
2689* this set; when all such cpus are removed, it is safe to update.
2690*
2691* Before attempting to acquire the update lock on a pmap, a cpu (A) must
2692* be at least at the priority of the interprocessor interrupt
2693* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
2694* kernel update; it would spin forever in pmap_update_interrupt() trying
2695* to acquire the user pmap lock it had already acquired. Furthermore A
2696* must remove itself from cpus_active. Otherwise, another cpu holding
2697* the lock (B) could be in the process of sending an update signal to A,
2698* and thus be waiting for A to remove itself from cpus_active. If A is
2699* spinning on the lock at priority this will never happen and a deadlock
2700* will result.
2701*/
2702
2703/*
2704 * Signal another CPU that it must flush its TLB
2705 */
2706void signal_cpus(use_list, pmap, start, end)
2707 cpu_set use_list;
2708 pmap_t pmap;
2709 vm_offset_t start, end;
2710{
2711 int which_cpu, j;
2712 pmap_update_list_t update_list_p;
2713
2714 while ((which_cpu = ffs(use_list)) != 0) {
2715 which_cpu -= 1; /* convert to 0 origin */
2716
2717 update_list_p = &cpu_update_list[which_cpu];
2718 simple_lock(&update_list_p->lock);
2719
2720 j = update_list_p->count;
2721 if (j >= UPDATE_LIST_SIZE) {
2722 /*
2723 * list overflowed. Change last item to
2724 * indicate overflow.
2725 */
2726 update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
2727 update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS(0);
2728 update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL);
2729 }
2730 else {
2731 update_list_p->item[j].pmap = pmap;
2732 update_list_p->item[j].start = start;
2733 update_list_p->item[j].end = end;
2734 update_list_p->count = j+1;
2735 }
2736 cpu_update_needed[which_cpu] = TRUE((boolean_t) 1);
2737 simple_unlock(&update_list_p->lock)((void)(&update_list_p->lock));
2738
2739 if ((cpus_idle & (1 << which_cpu)) == 0)
2740 interrupt_processor(which_cpu);
2741 use_list &= ~(1 << which_cpu);
2742 }
2743}
2744
2745void process_pmap_updates(my_pmap)
2746 pmap_t my_pmap;
2747{
2748 int my_cpu = cpu_number()(0);
2749 pmap_update_list_t update_list_p;
2750 int j;
2751 pmap_t pmap;
2752
2753 update_list_p = &cpu_update_list[my_cpu];
2754 simple_lock(&update_list_p->lock);
2755
2756 for (j = 0; j < update_list_p->count; j++) {
2757 pmap = update_list_p->item[j].pmap;
2758 if (pmap == my_pmap ||
2759 pmap == kernel_pmap) {
2760
2761 INVALIDATE_TLB(pmap,hyp_mmuext_op_void(6)
2762 update_list_p->item[j].start,hyp_mmuext_op_void(6)
2763 update_list_p->item[j].end)hyp_mmuext_op_void(6);
2764 }
2765 }
2766 update_list_p->count = 0;
2767 cpu_update_needed[my_cpu] = FALSE((boolean_t) 0);
2768 simple_unlock(&update_list_p->lock)((void)(&update_list_p->lock));
2769}
2770
2771/*
2772 * Interrupt routine for TBIA requested from other processor.
2773 */
2774void pmap_update_interrupt(void)
2775{
2776 int my_cpu;
2777 pmap_t my_pmap;
2778 int s;
2779
2780 my_cpu = cpu_number()(0);
2781
2782 /*
2783 * Exit now if we're idle. We'll pick up the update request
2784 * when we go active, and we must not put ourselves back in
2785 * the active set because we'll never process the interrupt
2786 * while we're idle (thus hanging the system).
2787 */
2788 if (cpus_idle & (1 << my_cpu))
2789 return;
2790
2791 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
2792 my_pmap = kernel_pmap;
2793 else {
2794 my_pmap = current_pmap()((((active_threads[(0)])->task->map)->pmap));
2795 if (!pmap_in_use(my_pmap, my_cpu)(((my_pmap)->cpus_using & (1 << (my_cpu))) != 0))
2796 my_pmap = kernel_pmap;
2797 }
2798
2799 /*
2800 * Raise spl to splvm (above splip) to block out pmap_extract
2801 * from IO code (which would put this cpu back in the active
2802 * set).
2803 */
2804 s = splvm();
2805
2806 do {
2807
2808 /*
2809 * Indicate that we're not using either user or kernel
2810 * pmap.
2811 */
2812 i_bit_clear(my_cpu, &cpus_active);
2813
2814 /*
2815 * Wait for any pmap updates in progress, on either user
2816 * or kernel pmap.
2817 */
2818 while (*(volatile int *)&my_pmap->lock.lock_data ||
2819 *(volatile int *)&kernel_pmap->lock.lock_data)
2820 continue;
2821
2822 process_pmap_updates(my_pmap);
2823
2824 i_bit_set(my_cpu, &cpus_active);
2825
2826 } while (cpu_update_needed[my_cpu]);
2827
2828 splx(s);
2829}
2830#else /* NCPUS > 1 */
2831/*
2832 * Dummy routine to satisfy external reference.
2833 */
2834void pmap_update_interrupt(void)
2835{
2836 /* should never be called. */
2837}
2838#endif /* NCPUS > 1 */
2839
2840#if defined(__i386__1)
2841/* Unmap page 0 to trap NULL references. */
2842void
2843pmap_unmap_page_zero (void)
2844{
2845 int *pte;
2846
2847 pte = (int *) pmap_pte (kernel_pmap, 0);
2848 if (!pte)
2849 return;
2850 assert (pte)({ if (!(pte)) Assert("pte", "../i386/intel/pmap.c", 2850); }
)
;
2851#ifdef MACH_PV_PAGETABLES
2852 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0))
2853 printf("couldn't unmap page 0\n");
2854#else /* MACH_PV_PAGETABLES */
2855 *pte = 0;
2856 INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE)hyp_mmuext_op_void(6);
2857#endif /* MACH_PV_PAGETABLES */
2858}
2859#endif /* __i386__ */