Bug Summary

File:obj-scan-build/../i386/intel/pmap.c
Location:line 1459, column 6
Description:The left operand of '!=' is a garbage value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * File: pmap.c
28 * Author: Avadis Tevanian, Jr., Michael Wayne Young
29 * (These guys wrote the Vax version)
30 *
31 * Physical Map management code for Intel i386, and i486.
32 *
33 * Manages physical address maps.
34 *
35 * In addition to hardware address maps, this
36 * module is called upon to provide software-use-only
37 * maps which may or may not be stored in the same
38 * form as hardware maps. These pseudo-maps are
39 * used to store intermediate results from copy
40 * operations to and from address spaces.
41 *
42 * Since the information managed by this module is
43 * also stored by the logical address mapping module,
44 * this module may throw away valid virtual-to-physical
45 * mappings at almost any time. However, invalidations
46 * of virtual-to-physical mappings must be done as
47 * requested.
48 *
49 * In order to cope with hardware architectures which
50 * make virtual-to-physical map invalidates expensive,
51 * this module may delay invalidate or reduced protection
52 * operations until such time as they are actually
53 * necessary. This module is given full information as
54 * to which processors are currently using which maps,
55 * and to when physical maps must be made correct.
56 */
57
58#include <string.h>
59
60#include <mach/machine/vm_types.h>
61
62#include <mach/boolean.h>
63#include <kern/debug.h>
64#include <kern/printf.h>
65#include <kern/thread.h>
66#include <kern/slab.h>
67
68#include <kern/lock.h>
69
70#include <vm/pmap.h>
71#include <vm/vm_map.h>
72#include <vm/vm_kern.h>
73#include <i3861/vm_param.h>
74#include <mach/vm_prot.h>
75#include <vm/vm_object.h>
76#include <vm/vm_page.h>
77#include <vm/vm_user.h>
78
79#include <mach/machine/vm_param.h>
80#include <mach/xen.h>
81#include <machine/thread.h>
82#include <i3861/cpu_number.h>
83#include <i3861/proc_reg.h>
84#include <i3861/locore.h>
85#include <i3861/model_dep.h>
86
87#ifdef MACH_PSEUDO_PHYS
88#define WRITE_PTE(pte_p, pte_entry)*(pte_p) = pte_entry?({ vm_offset_t __a = (vm_offset_t) (pte_entry
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
*(pte_p) = pte_entry?pa_to_ma(pte_entry)({ vm_offset_t __a = (vm_offset_t) (pte_entry); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })
:0;
89#else /* MACH_PSEUDO_PHYS */
90#define WRITE_PTE(pte_p, pte_entry)*(pte_p) = pte_entry?({ vm_offset_t __a = (vm_offset_t) (pte_entry
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
*(pte_p) = (pte_entry);
91#endif /* MACH_PSEUDO_PHYS */
92
93/*
94 * Private data structures.
95 */
96
97/*
98 * For each vm_page_t, there is a list of all currently
99 * valid virtual mappings of that page. An entry is
100 * a pv_entry_t; the list is the pv_table.
101 */
102
103typedef struct pv_entry {
104 struct pv_entry *next; /* next pv_entry */
105 pmap_t pmap; /* pmap where mapping lies */
106 vm_offset_t va; /* virtual address for mapping */
107} *pv_entry_t;
108
109#define PV_ENTRY_NULL((pv_entry_t) 0) ((pv_entry_t) 0)
110
111pv_entry_t pv_head_table; /* array of entries, one per page */
112
113/*
114 * pv_list entries are kept on a list that can only be accessed
115 * with the pmap system locked (at SPLVM, not in the cpus_active set).
116 * The list is refilled from the pv_list_cache if it becomes empty.
117 */
118pv_entry_t pv_free_list; /* free list at SPLVM */
119decl_simple_lock_data(, pv_free_list_lock)struct simple_lock_data_empty pv_free_list_lock;
120
121#define PV_ALLOC(pv_e){ ; if ((pv_e = pv_free_list) != 0) { pv_free_list = pv_e->
next; } ((void)(&pv_free_list_lock)); }
{ \
122 simple_lock(&pv_free_list_lock); \
123 if ((pv_e = pv_free_list) != 0) { \
124 pv_free_list = pv_e->next; \
125 } \
126 simple_unlock(&pv_free_list_lock)((void)(&pv_free_list_lock)); \
127}
128
129#define PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
{ \
130 simple_lock(&pv_free_list_lock); \
131 pv_e->next = pv_free_list; \
132 pv_free_list = pv_e; \
133 simple_unlock(&pv_free_list_lock)((void)(&pv_free_list_lock)); \
134}
135
136struct kmem_cache pv_list_cache; /* cache of pv_entry structures */
137
138/*
139 * Each entry in the pv_head_table is locked by a bit in the
140 * pv_lock_table. The lock bits are accessed by the physical
141 * address of the page they lock.
142 */
143
144char *pv_lock_table; /* pointer to array of bits */
145#define pv_lock_table_size(n)(((n)+8 -1)/8) (((n)+BYTE_SIZE8-1)/BYTE_SIZE8)
146
147/* Has pmap_init completed? */
148boolean_t pmap_initialized = FALSE((boolean_t) 0);
149
150/*
151 * Range of kernel virtual addresses available for kernel memory mapping.
152 * Does not include the virtual addresses used to map physical memory 1-1.
153 * Initialized by pmap_bootstrap.
154 */
155vm_offset_t kernel_virtual_start;
156vm_offset_t kernel_virtual_end;
157
158/*
159 * Index into pv_head table, its lock bits, and the modify/reference
160 * bits starting at phys_first_addr.
161 */
162#define pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12)) (atop(pa - phys_first_addr)(((vm_size_t)(pa - phys_first_addr)) >> 12))
163
164#define pai_to_pvh(pai)(&pv_head_table[pai]) (&pv_head_table[pai])
165#define lock_pvh_pai(pai)(bit_lock(pai, pv_lock_table)) (bit_lock(pai, pv_lock_table))
166#define unlock_pvh_pai(pai)(bit_unlock(pai, pv_lock_table)) (bit_unlock(pai, pv_lock_table))
167
168/*
169 * Array of physical page attribites for managed pages.
170 * One byte per physical page.
171 */
172char *pmap_phys_attributes;
173
174/*
175 * Physical page attributes. Copy bits from PTE definition.
176 */
177#define PHYS_MODIFIED0x00000040 INTEL_PTE_MOD0x00000040 /* page modified */
178#define PHYS_REFERENCED0x00000020 INTEL_PTE_REF0x00000020 /* page referenced */
179
180/*
181 * Amount of virtual memory mapped by one
182 * page-directory entry.
183 */
184#define PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21)) (pdenum2lin(1)((vm_offset_t)(1) << 21))
185
186/*
187 * We allocate page table pages directly from the VM system
188 * through this object. It maps physical memory.
189 */
190vm_object_t pmap_object = VM_OBJECT_NULL((vm_object_t) 0);
191
192/*
193 * Locking and TLB invalidation
194 */
195
196/*
197 * Locking Protocols:
198 *
199 * There are two structures in the pmap module that need locking:
200 * the pmaps themselves, and the per-page pv_lists (which are locked
201 * by locking the pv_lock_table entry that corresponds to the pv_head
202 * for the list in question.) Most routines want to lock a pmap and
203 * then do operations in it that require pv_list locking -- however
204 * pmap_remove_all and pmap_copy_on_write operate on a physical page
205 * basis and want to do the locking in the reverse order, i.e. lock
206 * a pv_list and then go through all the pmaps referenced by that list.
207 * To protect against deadlock between these two cases, the pmap_lock
208 * is used. There are three different locking protocols as a result:
209 *
210 * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
211 * the pmap.
212 *
213 * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
214 * lock on the pmap_lock (shared read), then lock the pmap
215 * and finally the pv_lists as needed [i.e. pmap lock before
216 * pv_list lock.]
217 *
218 * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
219 * Get a write lock on the pmap_lock (exclusive write); this
220 * also guaranteees exclusive access to the pv_lists. Lock the
221 * pmaps as needed.
222 *
223 * At no time may any routine hold more than one pmap lock or more than
224 * one pv_list lock. Because interrupt level routines can allocate
225 * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
226 * kernel_pmap can only be held at splvm.
227 */
228
229#if NCPUS1 > 1
230/*
231 * We raise the interrupt level to splvm, to block interprocessor
232 * interrupts during pmap operations. We must take the CPU out of
233 * the cpus_active set while interrupts are blocked.
234 */
235#define SPLVM(spl)((void)(spl)) { \
236 spl = splvm(); \
237 i_bit_clear(cpu_number()(0), &cpus_active); \
238}
239
240#define SPLX(spl)((void)(spl)) { \
241 i_bit_set(cpu_number()(0), &cpus_active); \
242 splx(spl); \
243}
244
245/*
246 * Lock on pmap system
247 */
248lock_data_t pmap_system_lock;
249
250#define PMAP_READ_LOCK(pmap, spl)((void)(spl)) { \
251 SPLVM(spl)((void)(spl)); \
252 lock_read(&pmap_system_lock); \
253 simple_lock(&(pmap)->lock); \
254}
255
256#define PMAP_WRITE_LOCK(spl)((void)(spl)) { \
257 SPLVM(spl)((void)(spl)); \
258 lock_write(&pmap_system_lock); \
259}
260
261#define PMAP_READ_UNLOCK(pmap, spl)((void)(spl)) { \
262 simple_unlock(&(pmap)->lock)((void)(&(pmap)->lock)); \
263 lock_read_done(&pmap_system_lock)lock_done(&pmap_system_lock); \
264 SPLX(spl)((void)(spl)); \
265}
266
267#define PMAP_WRITE_UNLOCK(spl)((void)(spl)) { \
268 lock_write_done(&pmap_system_lock)lock_done(&pmap_system_lock); \
269 SPLX(spl)((void)(spl)); \
270}
271
272#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
273 simple_lock(&(pmap)->lock); \
274 lock_write_to_read(&pmap_system_lock); \
275}
276
277#define LOCK_PVH(index) (lock_pvh_pai(index)(bit_lock(index, pv_lock_table)))
278
279#define UNLOCK_PVH(index) (unlock_pvh_pai(index)(bit_unlock(index, pv_lock_table)))
280
281#define PMAP_UPDATE_TLBS(pmap, s, e){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } } \
282{ \
283 cpu_set cpu_mask = 1 << cpu_number()(0); \
284 cpu_set users; \
285 \
286 /* Since the pmap is locked, other updates are locked */ \
287 /* out, and any pmap_activate has finished. */ \
288 \
289 /* find other cpus using the pmap */ \
290 users = (pmap)->cpus_using & ~cpu_mask; \
291 if (users) { \
292 /* signal them, and wait for them to finish */ \
293 /* using the pmap */ \
294 signal_cpus(users, (pmap), (s), (e)); \
295 while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
296 continue; \
297 } \
298 \
299 /* invalidate our own TLB if pmap is in use */ \
300 if ((pmap)->cpus_using & cpu_mask) { \
301 INVALIDATE_TLB((pmap), (s), (e))hyp_mmuext_op_void(6); \
302 } \
303}
304
305#else /* NCPUS > 1 */
306
307#define SPLVM(spl)((void)(spl)) ((void)(spl))
308#define SPLX(spl)((void)(spl)) ((void)(spl))
309
310#define PMAP_READ_LOCK(pmap, spl)((void)(spl)) SPLVM(spl)((void)(spl))
311#define PMAP_WRITE_LOCK(spl)((void)(spl)) SPLVM(spl)((void)(spl))
312#define PMAP_READ_UNLOCK(pmap, spl)((void)(spl)) SPLX(spl)((void)(spl))
313#define PMAP_WRITE_UNLOCK(spl)((void)(spl)) SPLX(spl)((void)(spl))
314#define PMAP_WRITE_TO_READ_LOCK(pmap)
315
316#define LOCK_PVH(index)
317#define UNLOCK_PVH(index)
318
319#define PMAP_UPDATE_TLBS(pmap, s, e){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } } { \
320 /* invalidate our own TLB if pmap is in use */ \
321 if ((pmap)->cpus_using) { \
322 INVALIDATE_TLB((pmap), (s), (e))hyp_mmuext_op_void(6); \
323 } \
324}
325
326#endif /* NCPUS > 1 */
327
328#define MAX_TBIS_SIZE32 32 /* > this -> TBIA */ /* XXX */
329
330#ifdef MACH_PV_PAGETABLES
331#if 1
332#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL6)
333#else
334#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) do { \
335 if (__builtin_constant_p((e) - (s)) \
336 && (e) - (s) == PAGE_SIZE(1 << 12)) \
337 hyp_invlpg((pmap) == kernel_pmap ? kvtolin(s)((vm_offset_t)(s) - 0xC0000000UL + ((0xc0000000UL))) : (s)); \
338 else \
339 hyp_mmuext_op_void(MMUEXT_TLB_FLUSH_LOCAL6); \
340} while(0)
341#endif
342#else /* MACH_PV_PAGETABLES */
343#if 0
344/* It is hard to know when a TLB flush becomes less expensive than a bunch of
345 * invlpgs. But it surely is more expensive than just one invlpg. */
346#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) { \
347 if (__builtin_constant_p((e) - (s)) \
348 && (e) - (s) == PAGE_SIZE(1 << 12)) \
349 invlpg_linear(s); \
350 else \
351 flush_tlb()({ cr3 = ((cr3)); if (!hyp_mmuext_op_mfn(5, ((mfn_list[(((vm_size_t
)((cr3))) >> 12)])))) panic("set_cr3"); })
; \
352}
353#else
354#define INVALIDATE_TLB(pmap, s, e)hyp_mmuext_op_void(6) { \
355 (void) (pmap); \
356 (void) (s); \
357 (void) (e); \
358 flush_tlb()({ cr3 = ((cr3)); if (!hyp_mmuext_op_mfn(5, ((mfn_list[(((vm_size_t
)((cr3))) >> 12)])))) panic("set_cr3"); })
; \
359}
360#endif
361#endif /* MACH_PV_PAGETABLES */
362
363
364#if NCPUS1 > 1
365/*
366 * Structures to keep track of pending TLB invalidations
367 */
368
369#define UPDATE_LIST_SIZE 4
370
371struct pmap_update_item {
372 pmap_t pmap; /* pmap to invalidate */
373 vm_offset_t start; /* start address to invalidate */
374 vm_offset_t end; /* end address to invalidate */
375} ;
376
377typedef struct pmap_update_item *pmap_update_item_t;
378
379/*
380 * List of pmap updates. If the list overflows,
381 * the last entry is changed to invalidate all.
382 */
383struct pmap_update_list {
384 decl_simple_lock_data(, lock)struct simple_lock_data_empty lock;
385 int count;
386 struct pmap_update_item item[UPDATE_LIST_SIZE];
387} ;
388typedef struct pmap_update_list *pmap_update_list_t;
389
390struct pmap_update_list cpu_update_list[NCPUS1];
391
392#endif /* NCPUS > 1 */
393
394/*
395 * Other useful macros.
396 */
397#define current_pmap()((((active_threads[(0)])->task->map)->pmap)) (vm_map_pmap(current_thread()->task->map)(((active_threads[(0)])->task->map)->pmap))
398#define pmap_in_use(pmap, cpu)(((pmap)->cpus_using & (1 << (cpu))) != 0) (((pmap)->cpus_using & (1 << (cpu))) != 0)
399
400struct pmap kernel_pmap_store;
401pmap_t kernel_pmap;
402
403struct kmem_cache pmap_cache; /* cache of pmap structures */
404
405boolean_t pmap_debug = FALSE((boolean_t) 0); /* flag for debugging prints */
406
407#if 0
408int ptes_per_vm_page1; /* number of hardware ptes needed
409 to map one VM page. */
410#else
411#define ptes_per_vm_page1 1
412#endif
413
414unsigned int inuse_ptepages_count = 0; /* debugging */
415
416/*
417 * Pointer to the basic page directory for the kernel.
418 * Initialized by pmap_bootstrap().
419 */
420pt_entry_t *kernel_page_dir;
421
422/*
423 * Two slots for temporary physical page mapping, to allow for
424 * physical-to-physical transfers.
425 */
426static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS2];
427
428static inline pt_entry_t *
429pmap_pde(const pmap_t pmap, vm_offset_t addr)
430{
431 if (pmap == kernel_pmap)
432 addr = kvtolin(addr)((vm_offset_t)(addr) - 0xC0000000UL + ((0xc0000000UL)));
433 return &pmap->dirbase[lin2pdenum(addr)(((addr) >> 21) & 0x7ff)];
434}
435
436/*
437 * Given an offset and a map, compute the address of the
438 * pte. If the address is invalid with respect to the map
439 * then PT_ENTRY_NULL is returned (and the map may need to grow).
440 *
441 * This is only used internally.
442 */
443pt_entry_t *
444pmap_pte(const pmap_t pmap, vm_offset_t addr)
445{
446 pt_entry_t *ptp;
447 pt_entry_t pte;
448
449 if (pmap->dirbase == 0)
450 return(PT_ENTRY_NULL((pt_entry_t *) 0));
451 pte = *pmap_pde(pmap, addr);
452 if ((pte & INTEL_PTE_VALID0x00000001) == 0)
453 return(PT_ENTRY_NULL((pt_entry_t *) 0));
454 ptp = (pt_entry_t *)ptetokv(pte)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pte) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
455 return(&ptp[ptenum(addr)(((addr) >> 12) & 0x1ff)]);
456}
457
458#define DEBUG_PTE_PAGE0 0
459
460#if DEBUG_PTE_PAGE0
461void ptep_check(ptep_t ptep)
462{
463 pt_entry_t *pte, *epte;
464 int ctu, ctw;
465
466 /* check the use and wired counts */
467 if (ptep == PTE_PAGE_NULL)
468 return;
469 pte = pmap_pte(ptep->pmap, ptep->va);
470 epte = pte + INTEL_PGBYTES4096/sizeof(pt_entry_t);
471 ctu = 0;
472 ctw = 0;
473 while (pte < epte) {
474 if (pte->pfn != 0) {
475 ctu++;
476 if (pte->wired)
477 ctw++;
478 }
479 pte += ptes_per_vm_page1;
480 }
481
482 if (ctu != ptep->use_count || ctw != ptep->wired_count) {
483 printf("use %d wired %d - actual use %d wired %d\n",
484 ptep->use_count, ptep->wired_count, ctu, ctw);
485 panic("pte count");
486 }
487}
488#endif /* DEBUG_PTE_PAGE */
489
490/*
491 * Map memory at initialization. The physical addresses being
492 * mapped are not managed and are never unmapped.
493 *
494 * For now, VM is already on, we only need to map the
495 * specified memory.
496 */
497vm_offset_t pmap_map(
498 vm_offset_t virt,
499 vm_offset_t start,
500 vm_offset_t end,
501 int prot)
502{
503 int ps;
504
505 ps = PAGE_SIZE(1 << 12);
506 while (start < end) {
507 pmap_enter(kernel_pmap, virt, start, prot, FALSE((boolean_t) 0));
508 virt += ps;
509 start += ps;
510 }
511 return(virt);
512}
513
514/*
515 * Back-door routine for mapping kernel VM at initialization.
516 * Useful for mapping memory outside the range
517 * [phys_first_addr, phys_last_addr) (i.e., devices).
518 * Otherwise like pmap_map.
519 */
520vm_offset_t pmap_map_bd(
521 vm_offset_t virt,
522 vm_offset_t start,
523 vm_offset_t end,
524 vm_prot_t prot)
525{
526 pt_entry_t template;
527 pt_entry_t *pte;
528 int spl;
529#ifdef MACH_PV_PAGETABLES
530 int n, i = 0;
531 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
532#endif /* MACH_PV_PAGETABLES */
533
534 template = pa_to_pte(start)((start) & 0x00007ffffffff000ULL)
535 | INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008
536 | INTEL_PTE_VALID0x00000001;
537 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
538 template |= INTEL_PTE_GLOBAL0x00000000;
539 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
540 template |= INTEL_PTE_WRITE0x00000002;
541
542 PMAP_READ_LOCK(pmap, spl)((void)(spl));
543 while (start < end) {
544 pte = pmap_pte(kernel_pmap, virt);
545 if (pte == PT_ENTRY_NULL((pt_entry_t *) 0))
546 panic("pmap_map_bd: Invalid kernel address\n");
547#ifdef MACH_PV_PAGETABLES
548 update[i].ptr = kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
549 update[i].val = pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
;
550 i++;
551 if (i == HYP_BATCH_MMU_UPDATES256) {
552 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
553 if (n != i)
554 panic("couldn't pmap_map_bd\n");
555 i = 0;
556 }
557#else /* MACH_PV_PAGETABLES */
558 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
559#endif /* MACH_PV_PAGETABLES */
560 pte_increment_pa(template)((template) += 0xfff +1);
561 virt += PAGE_SIZE(1 << 12);
562 start += PAGE_SIZE(1 << 12);
563 }
564#ifdef MACH_PV_PAGETABLES
565 if (i > HYP_BATCH_MMU_UPDATES256)
566 panic("overflowed array in pmap_map_bd");
567 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
568 if (n != i)
569 panic("couldn't pmap_map_bd\n");
570#endif /* MACH_PV_PAGETABLES */
571 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
572 return(virt);
573}
574
575/*
576 * Bootstrap the system enough to run with virtual memory.
577 * Allocate the kernel page directory and page tables,
578 * and direct-map all physical memory.
579 * Called with mapping off.
580 */
581void pmap_bootstrap(void)
582{
583 /*
584 * Mapping is turned off; we must reference only physical addresses.
585 * The load image of the system is to be mapped 1-1 physical = virtual.
586 */
587
588 /*
589 * Set ptes_per_vm_page for general use.
590 */
591#if 0
592 ptes_per_vm_page1 = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
593#endif
594
595 /*
596 * The kernel's pmap is statically allocated so we don't
597 * have to use pmap_create, which is unlikely to work
598 * correctly at this part of the boot sequence.
599 */
600
601 kernel_pmap = &kernel_pmap_store;
602
603#if NCPUS1 > 1
604 lock_init(&pmap_system_lock, FALSE((boolean_t) 0)); /* NOT a sleep lock */
605#endif /* NCPUS > 1 */
606
607 simple_lock_init(&kernel_pmap->lock);
608
609 kernel_pmap->ref_count = 1;
610
611 /*
612 * Determine the kernel virtual address range.
613 * It starts at the end of the physical memory
614 * mapped into the kernel address space,
615 * and extends to a stupid arbitrary limit beyond that.
616 */
617 kernel_virtual_start = phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL);
618 kernel_virtual_end = phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL) + VM_KERNEL_MAP_SIZE(224 * 1024 * 1024);
619
620 if (kernel_virtual_end < kernel_virtual_start
621 || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL))
622 kernel_virtual_end = VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL);
623
624 /*
625 * Allocate and clear a kernel page directory.
626 */
627 /* Note: initial Xen mapping holds at least 512kB free mapped page.
628 * We use that for directly building our linear mapping. */
629#if PAE1
630 {
631 vm_offset_t addr;
632 init_alloc_aligned(PDPNUM4 * INTEL_PGBYTES4096, &addr);
633 kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(addr)((vm_offset_t)(addr) + 0xC0000000UL);
634 }
635 kernel_pmap->pdpbase = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
636 {
637 int i;
638 for (i = 0; i < PDPNUM4; i++)
639 WRITE_PTE(&kernel_pmap->pdpbase[i], pa_to_pte(_kvtophys((void *) kernel_pmap->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID)*(&kernel_pmap->pdpbase[i]) = ((((vm_offset_t)((void *
) kernel_pmap->dirbase + i * 4096) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001?({ vm_offset_t __a = (vm_offset_t) (((((vm_offset_t
)((void *) kernel_pmap->dirbase + i * 4096) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); }):0;
;
640 }
641#else /* PAE */
642 kernel_pmap->dirbase = kernel_page_dir = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
643#endif /* PAE */
644 {
645 int i;
646 for (i = 0; i < NPDES(4 * ((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))); i++)
647 kernel_pmap->dirbase[i] = 0;
648 }
649
650#ifdef MACH_PV_PAGETABLES
651 /* We don't actually deal with the CR3 register content at all */
652 hyp_vm_assist(VMASST_CMD_enable0, VMASST_TYPE_pae_extended_cr33);
653 /*
654 * Xen may only provide as few as 512KB extra bootstrap linear memory,
655 * which is far from enough to map all available memory, so we need to
656 * map more bootstrap linear memory. We here map 1 (resp. 4 for PAE)
657 * other L1 table(s), thus 4MiB extra memory (resp. 8MiB), which is
658 * enough for a pagetable mapping 4GiB.
659 */
660#ifdef PAE1
661#define NSUP_L14 4
662#else
663#define NSUP_L14 1
664#endif
665 pt_entry_t *l1_map[NSUP_L14];
666 {
667 pt_entry_t *base = (pt_entry_t*) boot_info.pt_base;
668 vm_offset_t la;
669 int n_l1map;
670 for (n_l1map = 0, la = VM_MIN_KERNEL_ADDRESS0xC0000000UL; la >= VM_MIN_KERNEL_ADDRESS0xC0000000UL; la += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * PAGE_SIZE(1 << 12)) {
671#ifdef PAE1
672 pt_entry_t *l2_map = (pt_entry_t*) ptetokv(base[lin2pdpnum(la)])(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((base[(((la
) >> 30) & 3)]) & 0x00007ffffffff000ULL); ((((unsigned
long *) 0xF5800000UL)[__a >> 12]) << 12) | (__a &
((1 << 12)-1)); })) + 0xC0000000UL))
;
673#else /* PAE */
674 pt_entry_t *l2_map = base;
675#endif /* PAE */
676 /* Like lin2pdenum, but works with non-contiguous boot L3 */
677 l2_map += (la >> PDESHIFT21) & PDEMASK0x1ff;
678 if (!(*l2_map & INTEL_PTE_VALID0x00000001)) {
679 struct mmu_update update;
680 int j, n;
681
682 l1_map[n_l1map] = (pt_entry_t*) phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
683 for (j = 0; j < NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); j++)
684 l1_map[n_l1map][j] = (((pt_entry_t)pfn_to_mfn(lin2pdenum(la - VM_MIN_KERNEL_ADDRESS) * NPTES + j)(mfn_list[(((la - 0xC0000000UL) >> 21) & 0x7ff) * (
(((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) + j])
) << PAGE_SHIFT12) | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
685 pmap_set_page_readonly_init(l1_map[n_l1map]);
686 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn (l1_map[n_l1map])((mfn_list[(((vm_size_t)(((vm_offset_t)(l1_map[n_l1map]) - 0xC0000000UL
))) >> 12)]))
))
687 panic("couldn't pin page %p(%p)", l1_map[n_l1map], (vm_offset_t) kv_to_ma (l1_map[n_l1map])({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l1_map[n_l1map
]) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(
__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); })
);
688 update.ptr = kv_to_ma(l2_map)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l2_map) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
689 update.val = kv_to_ma(l1_map[n_l1map])({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(l1_map[n_l1map
]) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(
__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); })
| INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
690 hyp_mmu_update(kv_to_la(&update)((vm_offset_t)(((vm_offset_t)(((vm_offset_t)(&update) - 0xC0000000UL
))) + ((0xc0000000UL))))
, 1, kv_to_la(&n)((vm_offset_t)(((vm_offset_t)(((vm_offset_t)(&n) - 0xC0000000UL
))) + ((0xc0000000UL))))
, DOMID_SELF(0x7FF0U));
691 if (n != 1)
692 panic("couldn't complete bootstrap map");
693 /* added the last L1 table, can stop */
694 if (++n_l1map >= NSUP_L14)
695 break;
696 }
697 }
698 }
699#endif /* MACH_PV_PAGETABLES */
700
701 /*
702 * Allocate and set up the kernel page tables.
703 */
704 {
705 vm_offset_t va;
706 pt_entry_t global = CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))) ? INTEL_PTE_GLOBAL0x00000000 : 0;
707
708 /*
709 * Map virtual memory for all known physical memory, 1-1,
710 * from phys_first_addr to phys_last_addr.
711 * Make any mappings completely in the kernel's text segment read-only.
712 *
713 * Also allocate some additional all-null page tables afterwards
714 * for kernel virtual memory allocation,
715 * because this PMAP module is too stupid
716 * to allocate new kernel page tables later.
717 * XX fix this
718 */
719 for (va = phystokv(phys_first_addr)((vm_offset_t)(phys_first_addr) + 0xC0000000UL); va >= phystokv(phys_first_addr)((vm_offset_t)(phys_first_addr) + 0xC0000000UL) && va < kernel_virtual_end; )
720 {
721 pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va))(((((vm_offset_t)(va) - 0xC0000000UL + ((0xc0000000UL)))) >>
21) & 0x7ff)
;
722 pt_entry_t *ptable = (pt_entry_t*)phystokv(pmap_grab_page())((vm_offset_t)(pmap_grab_page()) + 0xC0000000UL);
723 pt_entry_t *pte;
724
725 /* Initialize the page directory entry. */
726 WRITE_PTE(pde, pa_to_pte((vm_offset_t)_kvtophys(ptable))*(pde) = (((vm_offset_t)((vm_offset_t)(ptable) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002?({ vm_offset_t
__a = (vm_offset_t) ((((vm_offset_t)((vm_offset_t)(ptable) -
0xC0000000UL)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
727 | INTEL_PTE_VALID | INTEL_PTE_WRITE)*(pde) = (((vm_offset_t)((vm_offset_t)(ptable) - 0xC0000000UL
)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002?({ vm_offset_t
__a = (vm_offset_t) ((((vm_offset_t)((vm_offset_t)(ptable) -
0xC0000000UL)) & 0x00007ffffffff000ULL) | 0x00000001 | 0x00000002
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
;
728
729 /* Initialize the page table. */
730 for (pte = ptable; (va < phystokv(phys_last_addr)((vm_offset_t)(phys_last_addr) + 0xC0000000UL)) && (pte < ptable+NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))); pte++)
731 {
732 if ((pte - ptable) < ptenum(va)(((va) >> 12) & 0x1ff))
733 {
734 WRITE_PTE(pte, 0)*(pte) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
735 }
736 else
737#ifdef MACH_PV_PAGETABLES
738 if (va == (vm_offset_t) &hyp_shared_info)
739 {
740 *pte = boot_info.shared_info | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002;
741 va += INTEL_PGBYTES4096;
742 }
743 else
744#endif /* MACH_PV_PAGETABLES */
745 {
746 extern char _start[], etext[];
747
748 if (((va >= (vm_offset_t) _start)
749 && (va + INTEL_PGBYTES4096 <= (vm_offset_t)etext))
750#ifdef MACH_PV_PAGETABLES
751 || (va >= (vm_offset_t) boot_info.pt_base
752 && (va + INTEL_PGBYTES4096 <=
753 (vm_offset_t) ptable + INTEL_PGBYTES4096))
754#endif /* MACH_PV_PAGETABLES */
755 )
756 {
757 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
758 | INTEL_PTE_VALID | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
;
759 }
760 else
761 {
762#ifdef MACH_PV_PAGETABLES
763 /* Keep supplementary L1 pages read-only */
764 int i;
765 for (i = 0; i < NSUP_L14; i++)
766 if (va == (vm_offset_t) l1_map[i]) {
767 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
768 | INTEL_PTE_VALID | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global?({ vm_offset_t __a = (vm_offset_t) ((
(((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | global); (((pt_entry_t) ((mfn_list[(((vm_size_t
)(__a)) >> 12)]))) << 12) | (__a & ((1 <<
12)-1)); }):0;
;
769 break;
770 }
771 if (i == NSUP_L14)
772#endif /* MACH_PV_PAGETABLES */
773 WRITE_PTE(pte, pa_to_pte(_kvtophys(va))*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global?({ vm_offset_t __a = (vm_offset_t
) (((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
774 | INTEL_PTE_VALID | INTEL_PTE_WRITE | global)*(pte) = ((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global?({ vm_offset_t __a = (vm_offset_t
) (((((vm_offset_t)(va) - 0xC0000000UL)) & 0x00007ffffffff000ULL
) | 0x00000001 | 0x00000002 | global); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
775
776 }
777 va += INTEL_PGBYTES4096;
778 }
779 }
780 for (; pte < ptable+NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); pte++)
781 {
782 if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS2 * PAGE_SIZE(1 << 12) && va < kernel_virtual_end)
783 {
784 pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE))(((vm_size_t)(va - (kernel_virtual_end - 2 * (1 << 12))
)) >> 12)
];
785 win->entry = pte;
786 win->vaddr = va;
787 }
788 WRITE_PTE(pte, 0)*(pte) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
789 va += INTEL_PGBYTES4096;
790 }
791#ifdef MACH_PV_PAGETABLES
792 pmap_set_page_readonly_init(ptable);
793 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn (ptable)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptable) - 0xC0000000UL
))) >> 12)]))
))
794 panic("couldn't pin page %p(%p)\n", ptable, (vm_offset_t) kv_to_ma (ptable)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptable) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
795#endif /* MACH_PV_PAGETABLES */
796 }
797 }
798
799 /* Architecture-specific code will turn on paging
800 soon after we return from here. */
801}
802
803#ifdef MACH_PV_PAGETABLES
804/* These are only required because of Xen security policies */
805
806/* Set back a page read write */
807void pmap_set_page_readwrite(void *_vaddr) {
808 vm_offset_t vaddr = (vm_offset_t) _vaddr;
809 vm_offset_t paddr = kvtophys(vaddr);
810 vm_offset_t canon_vaddr = phystokv(paddr)((vm_offset_t)(paddr) + 0xC0000000UL);
811 if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001 | 0x00000002); hyp_update_va_mapping
(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))), __val
& 0xffffffffU, ((__val) >> 32), (0UL<<0)); }
)
)
812 panic("couldn't set hiMMU readwrite for addr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
813 if (canon_vaddr != vaddr)
814 if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID | INTEL_PTE_WRITE, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001 | 0x00000002); hyp_update_va_mapping
(((vm_offset_t)(canon_vaddr) - 0xC0000000UL + ((0xc0000000UL)
)), __val & 0xffffffffU, ((__val) >> 32), (0UL<<
0)); })
)
815 panic("couldn't set hiMMU readwrite for paddr %p(%p)\n", canon_vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
816}
817
818/* Set a page read only (so as to pin it for instance) */
819void pmap_set_page_readonly(void *_vaddr) {
820 vm_offset_t vaddr = (vm_offset_t) _vaddr;
821 vm_offset_t paddr = kvtophys(vaddr);
822 vm_offset_t canon_vaddr = phystokv(paddr)((vm_offset_t)(paddr) + 0xC0000000UL);
823 if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID0x00000001) {
824 if (hyp_do_update_va_mapping (kvtolin(vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001); hyp_update_va_mapping(
((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))), __val
& 0xffffffffU, ((__val) >> 32), (0UL<<0)); }
)
)
825 panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
826 }
827 if (canon_vaddr != vaddr &&
828 *pmap_pde(kernel_pmap, canon_vaddr) & INTEL_PTE_VALID0x00000001) {
829 if (hyp_do_update_va_mapping (kvtolin(canon_vaddr), pa_to_pte (pa_to_ma(paddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
paddr); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL) | 0x00000001); hyp_update_va_mapping(
((vm_offset_t)(canon_vaddr) - 0xC0000000UL + ((0xc0000000UL))
), __val & 0xffffffffU, ((__val) >> 32), (0UL<<
0)); })
)
830 panic("couldn't set hiMMU readonly for vaddr %p canon_vaddr %p paddr %p (%p)\n", vaddr, canon_vaddr, paddr, (vm_offset_t) pa_to_ma (paddr)({ vm_offset_t __a = (vm_offset_t) (paddr); (((pt_entry_t) ((
mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12) | (
__a & ((1 << 12)-1)); })
);
831 }
832}
833
834/* This needs to be called instead of pmap_set_page_readonly as long as RC3
835 * still points to the bootstrap dirbase, to also fix the bootstrap table. */
836void pmap_set_page_readonly_init(void *_vaddr) {
837 vm_offset_t vaddr = (vm_offset_t) _vaddr;
838#if PAE1
839 pt_entry_t *pdpbase = (void*) boot_info.pt_base;
840 /* The bootstrap table does not necessarily use contiguous pages for the pde tables */
841 pt_entry_t *dirbase = (void*) ptetokv(pdpbase[lin2pdpnum(vaddr)])(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pdpbase[((
(vaddr) >> 30) & 3)]) & 0x00007ffffffff000ULL);
((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })) + 0xC0000000UL))
;
842#else
843 pt_entry_t *dirbase = (void*) boot_info.pt_base;
844#endif
845 pt_entry_t *pte = &dirbase[lin2pdenum(vaddr)(((vaddr) >> 21) & 0x7ff) & PTEMASK0x1ff];
846 /* Modify our future kernel map (can't use update_va_mapping for this)... */
847 if (*pmap_pde(kernel_pmap, vaddr) & INTEL_PTE_VALID0x00000001) {
848 if (!hyp_mmu_update_la (kvtolin(vaddr), pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID)hyp_mmu_update_pte( (kernel_pmap->dirbase[((((vm_offset_t)
(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))))) >>
21) & 0x7ff)] & 0x00007ffffffff000ULL) + ((((vm_offset_t
)(((vm_offset_t)(vaddr) - 0xC0000000UL + ((0xc0000000UL))))) >>
12) & 0x1ff) * sizeof(pt_entry_t), ((({ vm_offset_t __a =
(vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })) & 0x00007ffffffff000ULL
) | 0x00000001)
)
849 panic("couldn't set hiMMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
850 }
851 /* ... and the bootstrap map. */
852 if (*pte & INTEL_PTE_VALID0x00000001) {
853 if (hyp_do_update_va_mapping (vaddr, pa_to_pte (kv_to_ma(vaddr)) | INTEL_PTE_VALID, UVMF_NONE)({ pt_entry_t __val = (((({ vm_offset_t __a = (vm_offset_t) (
((vm_offset_t)(vaddr) - 0xC0000000UL)); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); })) & 0x00007ffffffff000ULL) | 0x00000001
); hyp_update_va_mapping(vaddr, __val & 0xffffffffU, ((__val
) >> 32), (0UL<<0)); })
)
854 panic("couldn't set MMU readonly for vaddr %p(%p)\n", vaddr, (vm_offset_t) kv_to_ma (vaddr)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(vaddr) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
855 }
856}
857
858void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
859 int i;
860 pt_entry_t *dir;
861 vm_offset_t va = 0;
862#if PAE1
863 int j;
864#endif /* PAE */
865 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, kv_to_mfn(base)((mfn_list[(((vm_size_t)(((vm_offset_t)(base) - 0xC0000000UL)
)) >> 12)]))
))
866 panic("pmap_clear_bootstrap_pagetable: couldn't unpin page %p(%p)\n", base, (vm_offset_t) kv_to_ma(base)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(base) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
867#if PAE1
868 for (j = 0; j < PDPNUM4; j++)
869 {
870 pt_entry_t pdpe = base[j];
871 if (pdpe & INTEL_PTE_VALID0x00000001) {
872 dir = (pt_entry_t *) ptetokv(pdpe)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pdpe) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
873#else /* PAE */
874 dir = base;
875#endif /* PAE */
876 for (i = 0; i < NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)); i++) {
877 pt_entry_t pde = dir[i];
878 unsigned long pfn = atop(pte_to_pa(pde))(((vm_size_t)(({ pt_entry_t __a = (pt_entry_t) ((pde) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); }))) >> 12)
;
879 void *pgt = (void*) phystokv(ptoa(pfn))((vm_offset_t)(((vm_offset_t)((pfn) << 12))) + 0xC0000000UL
)
;
880 if (pde & INTEL_PTE_VALID0x00000001)
881 hyp_free_page(pfn, pgt);
882 va += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * INTEL_PGBYTES4096;
883 if (va >= HYP_VIRT_START0xF5800000UL)
884 break;
885 }
886#if PAE1
887 hyp_free_page(atop(_kvtophys(dir))(((vm_size_t)(((vm_offset_t)(dir) - 0xC0000000UL))) >> 12
)
, dir);
888 } else
889 va += NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t)) * INTEL_PGBYTES4096;
890 if (va >= HYP_VIRT_START0xF5800000UL)
891 break;
892 }
893#endif /* PAE */
894 hyp_free_page(atop(_kvtophys(base))(((vm_size_t)(((vm_offset_t)(base) - 0xC0000000UL))) >>
12)
, base);
895}
896#endif /* MACH_PV_PAGETABLES */
897
898/*
899 * Create a temporary mapping for a given physical entry
900 *
901 * This can be used to access physical pages which are not mapped 1:1 by
902 * phystokv().
903 */
904pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
905{
906 pmap_mapwindow_t *map;
907
908 /* Find an empty one. */
909 for (map = &mapwindows[0]; map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]; map++)
910 if (!(*map->entry))
911 break;
912 assert(map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)])({ if (!(map < &mapwindows[sizeof (mapwindows) / sizeof
(*mapwindows)])) Assert("map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]"
, "../i386/intel/pmap.c", 912); })
;
913
914 WRITE_PTE(map->entry, entry)*(map->entry) = entry?({ vm_offset_t __a = (vm_offset_t) (
entry); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >>
12)]))) << 12) | (__a & ((1 << 12)-1)); }):0
;
;
915 return map;
916}
917
918/*
919 * Destroy a temporary mapping for a physical entry
920 */
921void pmap_put_mapwindow(pmap_mapwindow_t *map)
922{
923 WRITE_PTE(map->entry, 0)*(map->entry) = 0?({ vm_offset_t __a = (vm_offset_t) (0); (
((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)])))
<< 12) | (__a & ((1 << 12)-1)); }):0;
;
924 PMAP_UPDATE_TLBS(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE){ if ((kernel_pmap)->cpus_using) { hyp_mmuext_op_void(6); }
}
;
925}
926
927void pmap_virtual_space(
928 vm_offset_t *startp,
929 vm_offset_t *endp)
930{
931 *startp = kernel_virtual_start;
932 *endp = kernel_virtual_end - PMAP_NMAPWINDOWS2 * PAGE_SIZE(1 << 12);
933}
934
935/*
936 * Initialize the pmap module.
937 * Called by vm_init, to initialize any structures that the pmap
938 * system needs to map virtual memory.
939 */
940void pmap_init(void)
941{
942 long npages;
943 vm_offset_t addr;
944 vm_size_t s;
945#if NCPUS1 > 1
946 int i;
947#endif /* NCPUS > 1 */
948
949 /*
950 * Allocate memory for the pv_head_table and its lock bits,
951 * the modify bit array, and the pte_page table.
952 */
953
954 npages = atop(phys_last_addr - phys_first_addr)(((vm_size_t)(phys_last_addr - phys_first_addr)) >> 12);
955 s = (vm_size_t) (sizeof(struct pv_entry) * npages
956 + pv_lock_table_size(npages)(((npages)+8 -1)/8)
957 + npages);
958
959 s = round_page(s)((vm_offset_t)((((vm_offset_t)(s)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
960 if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS0)
961 panic("pmap_init");
962 memset((void *) addr, 0, s);
963
964 /*
965 * Allocate the structures first to preserve word-alignment.
966 */
967 pv_head_table = (pv_entry_t) addr;
968 addr = (vm_offset_t) (pv_head_table + npages);
969
970 pv_lock_table = (char *) addr;
971 addr = (vm_offset_t) (pv_lock_table + pv_lock_table_size(npages)(((npages)+8 -1)/8));
972
973 pmap_phys_attributes = (char *) addr;
974
975 /*
976 * Create the cache of physical maps,
977 * and of the physical-to-virtual entries.
978 */
979 s = (vm_size_t) sizeof(struct pmap);
980 kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
981 s = (vm_size_t) sizeof(struct pv_entry);
982 kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
983
984#if NCPUS1 > 1
985 /*
986 * Set up the pmap request lists
987 */
988 for (i = 0; i < NCPUS1; i++) {
989 pmap_update_list_t up = &cpu_update_list[i];
990
991 simple_lock_init(&up->lock);
992 up->count = 0;
993 }
994#endif /* NCPUS > 1 */
995
996 /*
997 * Indicate that the PMAP module is now fully initialized.
998 */
999 pmap_initialized = TRUE((boolean_t) 1);
1000}
1001
1002#define valid_page(x)(pmap_initialized && pmap_valid_page(x)) (pmap_initialized && pmap_valid_page(x))
1003
1004boolean_t pmap_verify_free(vm_offset_t phys)
1005{
1006 pv_entry_t pv_h;
1007 int pai;
1008 int spl;
1009 boolean_t result;
1010
1011 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1011); })
;
1012 if (!pmap_initialized)
1013 return(TRUE((boolean_t) 1));
1014
1015 if (!pmap_valid_page(phys))
1016 return(FALSE((boolean_t) 0));
1017
1018 PMAP_WRITE_LOCK(spl)((void)(spl));
1019
1020 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
1021 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1022
1023 result = (pv_h->pmap == PMAP_NULL((pmap_t) 0));
1024 PMAP_WRITE_UNLOCK(spl)((void)(spl));
1025
1026 return(result);
1027}
1028
1029/*
1030 * Routine: pmap_page_table_page_alloc
1031 *
1032 * Allocates a new physical page to be used as a page-table page.
1033 *
1034 * Must be called with the pmap system and the pmap unlocked,
1035 * since these must be unlocked to use vm_page_grab.
1036 */
1037vm_offset_t
1038pmap_page_table_page_alloc(void)
1039{
1040 vm_page_t m;
1041 vm_offset_t pa;
1042
1043 check_simple_locks();
1044
1045 /*
1046 * We cannot allocate the pmap_object in pmap_init,
1047 * because it is called before the cache package is up.
1048 * Allocate it now if it is missing.
1049 */
1050 if (pmap_object == VM_OBJECT_NULL((vm_object_t) 0))
1051 pmap_object = vm_object_allocate(phys_last_addr - phys_first_addr);
1052
1053 /*
1054 * Allocate a VM page for the level 2 page table entries.
1055 */
1056 while ((m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0))
1057 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1058
1059 /*
1060 * Map the page to its physical address so that it
1061 * can be found later.
1062 */
1063 pa = m->phys_addr;
1064 vm_object_lock(pmap_object);
1065 vm_page_insert(m, pmap_object, pa);
1066 vm_page_lock_queues();
1067 vm_page_wire(m);
1068 inuse_ptepages_count++;
1069 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1070 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1071
1072 /*
1073 * Zero the page.
1074 */
1075 memset((void *)phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL), 0, PAGE_SIZE(1 << 12));
1076
1077 return pa;
1078}
1079
1080#ifdef MACH_XEN
1081void pmap_map_mfn(void *_addr, unsigned long mfn) {
1082 vm_offset_t addr = (vm_offset_t) _addr;
1083 pt_entry_t *pte, *pdp;
1084 vm_offset_t ptp;
1085 pt_entry_t ma = ((pt_entry_t) mfn) << PAGE_SHIFT12;
1086
1087 /* Add a ptp if none exist yet for this pte */
1088 if ((pte = pmap_pte(kernel_pmap, addr)) == PT_ENTRY_NULL((pt_entry_t *) 0)) {
1089 ptp = phystokv(pmap_page_table_page_alloc())((vm_offset_t)(pmap_page_table_page_alloc()) + 0xC0000000UL);
1090#ifdef MACH_PV_PAGETABLES
1091 pmap_set_page_readonly((void*) ptp);
1092 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, pa_to_mfn(ptp)((mfn_list[(((vm_size_t)(ptp)) >> 12)]))))
1093 panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1094#endif /* MACH_PV_PAGETABLES */
1095 pdp = pmap_pde(kernel_pmap, addr);
1096
1097#ifdef MACH_PV_PAGETABLES
1098 if (!hyp_mmu_update_pte(kv_to_ma(pdp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pdp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
,
1099 pa_to_pte(kv_to_ma(ptp))((({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL)
| INTEL_PTE_VALID0x00000001
1100 | INTEL_PTE_USER0x00000004
1101 | INTEL_PTE_WRITE0x00000002))
1102 panic("%s:%d could not set pde %p(%p) to %p(%p)\n",__FILE__"../i386/intel/pmap.c",__LINE__1102,kvtophys((vm_offset_t)pdp),(vm_offset_t) kv_to_ma(pdp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pdp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ptp, (vm_offset_t) pa_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (ptp); (((pt_entry_t) ((mfn_list
[(((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); })
);
1103#else /* MACH_PV_PAGETABLES */
1104 *pdp = pa_to_pte(kvtophys(ptp))((kvtophys(ptp)) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001
1105 | INTEL_PTE_USER0x00000004
1106 | INTEL_PTE_WRITE0x00000002;
1107#endif /* MACH_PV_PAGETABLES */
1108 pte = pmap_pte(kernel_pmap, addr);
1109 }
1110
1111#ifdef MACH_PV_PAGETABLES
1112 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ma | INTEL_PTE_VALID0x00000001 | INTEL_PTE_WRITE0x00000002))
1113 panic("%s:%d could not set pte %p(%p) to %p(%p)\n",__FILE__"../i386/intel/pmap.c",__LINE__1113,pte,(vm_offset_t) kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ma, ma_to_pa(ma)({ pt_entry_t __a = (pt_entry_t) (ma); ((((unsigned long *) 0xF5800000UL
)[__a >> 12]) << 12) | (__a & ((1 << 12
)-1)); })
);
1114#else /* MACH_PV_PAGETABLES */
1115 /* Note: in this case, mfn is actually a pfn. */
1116 WRITE_PTE(pte, ma | INTEL_PTE_VALID | INTEL_PTE_WRITE)*(pte) = ma | 0x00000001 | 0x00000002?({ vm_offset_t __a = (vm_offset_t
) (ma | 0x00000001 | 0x00000002); (((pt_entry_t) ((mfn_list[(
((vm_size_t)(__a)) >> 12)]))) << 12) | (__a &
((1 << 12)-1)); }):0;
;
1117#endif /* MACH_PV_PAGETABLES */
1118}
1119#endif /* MACH_XEN */
1120
1121/*
1122 * Deallocate a page-table page.
1123 * The page-table page must have all mappings removed,
1124 * and be removed from its page directory.
1125 */
1126void
1127pmap_page_table_page_dealloc(vm_offset_t pa)
1128{
1129 vm_page_t m;
1130
1131 vm_object_lock(pmap_object);
1132 m = vm_page_lookup(pmap_object, pa);
1133 vm_page_lock_queues();
1134 vm_page_free(m);
1135 inuse_ptepages_count--;
1136 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1137 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1138}
1139
1140/*
1141 * Create and return a physical map.
1142 *
1143 * If the size specified for the map
1144 * is zero, the map is an actual physical
1145 * map, and may be referenced by the
1146 * hardware.
1147 *
1148 * If the size specified is non-zero,
1149 * the map will be used in software only, and
1150 * is bounded by that size.
1151 */
1152pmap_t pmap_create(vm_size_t size)
1153{
1154 pmap_t p;
1155 pmap_statistics_t stats;
1156
1157 /*
1158 * A software use-only map doesn't even need a map.
1159 */
1160
1161 if (size != 0) {
1162 return(PMAP_NULL((pmap_t) 0));
1163 }
1164
1165/*
1166 * Allocate a pmap struct from the pmap_cache. Then allocate
1167 * the page descriptor table.
1168 */
1169
1170 p = (pmap_t) kmem_cache_alloc(&pmap_cache);
1171 if (p == PMAP_NULL((pmap_t) 0))
1172 panic("pmap_create");
1173
1174 if (kmem_alloc_wired(kernel_map,
1175 (vm_offset_t *)&p->dirbase, PDPNUM4 * INTEL_PGBYTES4096)
1176 != KERN_SUCCESS0)
1177 panic("pmap_create");
1178
1179 memcpy(p->dirbase, kernel_page_dir, PDPNUM4 * INTEL_PGBYTES4096);
1180#ifdef LINUX_DEV
1181#if VM_MIN_KERNEL_ADDRESS0xC0000000UL != 0
1182 /* Do not map BIOS in user tasks */
1183 p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS)(((((0xc0000000UL)) - 0xC0000000UL) >> 21) & 0x7ff)] = 0;
1184#endif
1185#endif
1186#ifdef MACH_PV_PAGETABLES
1187 {
1188 int i;
1189 for (i = 0; i < PDPNUM4; i++)
1190 pmap_set_page_readonly((void*) p->dirbase + i * INTEL_PGBYTES4096);
1191 }
1192#endif /* MACH_PV_PAGETABLES */
1193
1194#if PAE1
1195 if (kmem_alloc_wired(kernel_map,
1196 (vm_offset_t *)&p->pdpbase, INTEL_PGBYTES4096)
1197 != KERN_SUCCESS0)
1198 panic("pmap_create");
1199 {
1200 int i;
1201 for (i = 0; i < PDPNUM4; i++)
1202 WRITE_PTE(&p->pdpbase[i], pa_to_pte(kvtophys((vm_offset_t) p->dirbase + i * INTEL_PGBYTES)) | INTEL_PTE_VALID)*(&p->pdpbase[i]) = ((kvtophys((vm_offset_t) p->dirbase
+ i * 4096)) & 0x00007ffffffff000ULL) | 0x00000001?({ vm_offset_t
__a = (vm_offset_t) (((kvtophys((vm_offset_t) p->dirbase +
i * 4096)) & 0x00007ffffffff000ULL) | 0x00000001); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); }):0;
;
1203 }
1204#ifdef MACH_PV_PAGETABLES
1205 pmap_set_page_readonly(p->pdpbase);
1206#endif /* MACH_PV_PAGETABLES */
1207#endif /* PAE */
1208
1209 p->ref_count = 1;
1210
1211 simple_lock_init(&p->lock);
1212 p->cpus_using = 0;
1213
1214 /*
1215 * Initialize statistics.
1216 */
1217
1218 stats = &p->stats;
1219 stats->resident_count = 0;
1220 stats->wired_count = 0;
1221
1222 return(p);
1223}
1224
1225/*
1226 * Retire the given physical map from service.
1227 * Should only be called if the map contains
1228 * no valid mappings.
1229 */
1230
1231void pmap_destroy(pmap_t p)
1232{
1233 pt_entry_t *pdep;
1234 vm_offset_t pa;
1235 int c, s;
1236 vm_page_t m;
1237
1238 if (p == PMAP_NULL((pmap_t) 0))
1239 return;
1240
1241 SPLVM(s)((void)(s));
1242 simple_lock(&p->lock);
1243 c = --p->ref_count;
1244 simple_unlock(&p->lock)((void)(&p->lock));
1245 SPLX(s)((void)(s));
1246
1247 if (c != 0) {
1248 return; /* still in use */
1249 }
1250
1251 /*
1252 * Free the memory maps, then the
1253 * pmap structure.
1254 */
1255 for (pdep = p->dirbase;
1256 pdep < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)(((((0xc0000000UL))) >> 21) & 0x7ff)];
1257 pdep += ptes_per_vm_page1) {
1258 if (*pdep & INTEL_PTE_VALID0x00000001) {
1259 pa = pte_to_pa(*pdep)({ pt_entry_t __a = (pt_entry_t) ((*pdep) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1260 vm_object_lock(pmap_object);
1261 m = vm_page_lookup(pmap_object, pa);
1262 if (m == VM_PAGE_NULL((vm_page_t) 0))
1263 panic("pmap_destroy: pte page not in object");
1264 vm_page_lock_queues();
1265#ifdef MACH_PV_PAGETABLES
1266 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, pa_to_mfn(pa)((mfn_list[(((vm_size_t)(pa)) >> 12)]))))
1267 panic("pmap_destroy: couldn't unpin page %p(%p)\n", pa, (vm_offset_t) kv_to_ma(pa)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pa) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1268 pmap_set_page_readwrite((void*) phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL));
1269#endif /* MACH_PV_PAGETABLES */
1270 vm_page_free(m);
1271 inuse_ptepages_count--;
1272 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1273 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
1274 }
1275 }
1276#ifdef MACH_PV_PAGETABLES
1277 {
1278 int i;
1279 for (i = 0; i < PDPNUM4; i++)
1280 pmap_set_page_readwrite((void*) p->dirbase + i * INTEL_PGBYTES4096);
1281 }
1282#endif /* MACH_PV_PAGETABLES */
1283 kmem_free(kernel_map, (vm_offset_t)p->dirbase, PDPNUM4 * INTEL_PGBYTES4096);
1284#if PAE1
1285#ifdef MACH_PV_PAGETABLES
1286 pmap_set_page_readwrite(p->pdpbase);
1287#endif /* MACH_PV_PAGETABLES */
1288 kmem_free(kernel_map, (vm_offset_t)p->pdpbase, INTEL_PGBYTES4096);
1289#endif /* PAE */
1290 kmem_cache_free(&pmap_cache, (vm_offset_t) p);
1291}
1292
1293/*
1294 * Add a reference to the specified pmap.
1295 */
1296
1297void pmap_reference(pmap_t p)
1298{
1299 int s;
1300 if (p != PMAP_NULL((pmap_t) 0)) {
1301 SPLVM(s)((void)(s));
1302 simple_lock(&p->lock);
1303 p->ref_count++;
1304 simple_unlock(&p->lock)((void)(&p->lock));
1305 SPLX(s)((void)(s));
1306 }
1307}
1308
1309/*
1310 * Remove a range of hardware page-table entries.
1311 * The entries given are the first (inclusive)
1312 * and last (exclusive) entries for the VM pages.
1313 * The virtual address is the va for the first pte.
1314 *
1315 * The pmap must be locked.
1316 * If the pmap is not the kernel pmap, the range must lie
1317 * entirely within one pte-page. This is NOT checked.
1318 * Assumes that the pte-page exists.
1319 */
1320
1321/* static */
1322void pmap_remove_range(
1323 pmap_t pmap,
1324 vm_offset_t va,
1325 pt_entry_t *spte,
1326 pt_entry_t *epte)
1327{
1328 pt_entry_t *cpte;
1329 int num_removed, num_unwired;
1330 int pai;
1331 vm_offset_t pa;
1332#ifdef MACH_PV_PAGETABLES
1333 int n, ii = 0;
16
Variable 'n' declared without an initial value
1334 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
1335#endif /* MACH_PV_PAGETABLES */
1336
1337#if DEBUG_PTE_PAGE0
1338 if (pmap != kernel_pmap)
1339 ptep_check(get_pte_page(spte));
1340#endif /* DEBUG_PTE_PAGE */
1341 num_removed = 0;
1342 num_unwired = 0;
1343
1344 for (cpte = spte; cpte < epte;
17
Loop condition is false. Execution continues on line 1456
1345 cpte += ptes_per_vm_page1, va += PAGE_SIZE(1 << 12)) {
1346
1347 if (*cpte == 0)
1348 continue;
1349 pa = pte_to_pa(*cpte)({ pt_entry_t __a = (pt_entry_t) ((*cpte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1350
1351 num_removed++;
1352 if (*cpte & INTEL_PTE_WIRED0x00000200)
1353 num_unwired++;
1354
1355 if (!valid_page(pa)(pmap_initialized && pmap_valid_page(pa))) {
1356
1357 /*
1358 * Outside range of managed physical memory.
1359 * Just remove the mappings.
1360 */
1361 int i = ptes_per_vm_page1;
1362 pt_entry_t *lpte = cpte;
1363 do {
1364#ifdef MACH_PV_PAGETABLES
1365 update[ii].ptr = kv_to_ma(lpte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(lpte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1366 update[ii].val = 0;
1367 ii++;
1368 if (ii == HYP_BATCH_MMU_UPDATES256) {
1369 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1370 if (n != ii)
1371 panic("couldn't pmap_remove_range\n");
1372 ii = 0;
1373 }
1374#else /* MACH_PV_PAGETABLES */
1375 *lpte = 0;
1376#endif /* MACH_PV_PAGETABLES */
1377 lpte++;
1378 } while (--i > 0);
1379 continue;
1380 }
1381
1382 pai = pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12));
1383 LOCK_PVH(pai);
1384
1385 /*
1386 * Get the modify and reference bits.
1387 */
1388 {
1389 int i;
1390 pt_entry_t *lpte;
1391
1392 i = ptes_per_vm_page1;
1393 lpte = cpte;
1394 do {
1395 pmap_phys_attributes[pai] |=
1396 *lpte & (PHYS_MODIFIED0x00000040|PHYS_REFERENCED0x00000020);
1397#ifdef MACH_PV_PAGETABLES
1398 update[ii].ptr = kv_to_ma(lpte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(lpte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1399 update[ii].val = 0;
1400 ii++;
1401 if (ii == HYP_BATCH_MMU_UPDATES256) {
1402 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1403 if (n != ii)
1404 panic("couldn't pmap_remove_range\n");
1405 ii = 0;
1406 }
1407#else /* MACH_PV_PAGETABLES */
1408 *lpte = 0;
1409#endif /* MACH_PV_PAGETABLES */
1410 lpte++;
1411 } while (--i > 0);
1412 }
1413
1414 /*
1415 * Remove the mapping from the pvlist for
1416 * this physical page.
1417 */
1418 {
1419 pv_entry_t pv_h, prev, cur;
1420
1421 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1422 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1423 panic("pmap_remove: null pv_list!");
1424 }
1425 if (pv_h->va == va && pv_h->pmap == pmap) {
1426 /*
1427 * Header is the pv_entry. Copy the next one
1428 * to header and free the next one (we cannot
1429 * free the header)
1430 */
1431 cur = pv_h->next;
1432 if (cur != PV_ENTRY_NULL((pv_entry_t) 0)) {
1433 *pv_h = *cur;
1434 PV_FREE(cur){ ; cur->next = pv_free_list; pv_free_list = cur; ((void)(
&pv_free_list_lock)); }
;
1435 }
1436 else {
1437 pv_h->pmap = PMAP_NULL((pmap_t) 0);
1438 }
1439 }
1440 else {
1441 cur = pv_h;
1442 do {
1443 prev = cur;
1444 if ((cur = prev->next) == PV_ENTRY_NULL((pv_entry_t) 0)) {
1445 panic("pmap-remove: mapping not in pv_list!");
1446 }
1447 } while (cur->va != va || cur->pmap != pmap);
1448 prev->next = cur->next;
1449 PV_FREE(cur){ ; cur->next = pv_free_list; pv_free_list = cur; ((void)(
&pv_free_list_lock)); }
;
1450 }
1451 UNLOCK_PVH(pai);
1452 }
1453 }
1454
1455#ifdef MACH_PV_PAGETABLES
1456 if (ii > HYP_BATCH_MMU_UPDATES256)
18
Taking false branch
1457 panic("overflowed array in pmap_remove_range");
1458 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, ii, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1459 if (n != ii)
19
The left operand of '!=' is a garbage value
1460 panic("couldn't pmap_remove_range\n");
1461#endif /* MACH_PV_PAGETABLES */
1462
1463 /*
1464 * Update the counts
1465 */
1466 pmap->stats.resident_count -= num_removed;
1467 pmap->stats.wired_count -= num_unwired;
1468}
1469
1470/*
1471 * Remove the given range of addresses
1472 * from the specified map.
1473 *
1474 * It is assumed that the start and end are properly
1475 * rounded to the hardware page size.
1476 */
1477
1478void pmap_remove(
1479 pmap_t map,
1480 vm_offset_t s,
1481 vm_offset_t e)
1482{
1483 int spl;
1484 pt_entry_t *pde;
1485 pt_entry_t *spte, *epte;
1486 vm_offset_t l;
1487 vm_offset_t _s = s;
1488
1489 if (map == PMAP_NULL((pmap_t) 0))
1490 return;
1491
1492 PMAP_READ_LOCK(map, spl)((void)(spl));
1493
1494 pde = pmap_pde(map, s);
1495 while (s < e) {
1496 l = (s + PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))) & ~(PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))-1);
1497 if (l > e)
1498 l = e;
1499 if (*pde & INTEL_PTE_VALID0x00000001) {
1500 spte = (pt_entry_t *)ptetokv(*pde)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((*pde) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
1501 spte = &spte[ptenum(s)(((s) >> 12) & 0x1ff)];
1502 epte = &spte[intel_btop(l-s)(((unsigned long)(l-s)) >> 12)];
1503 pmap_remove_range(map, s, spte, epte);
1504 }
1505 s = l;
1506 pde++;
1507 }
1508 PMAP_UPDATE_TLBS(map, _s, e){ if ((map)->cpus_using) { hyp_mmuext_op_void(6); } };
1509
1510 PMAP_READ_UNLOCK(map, spl)((void)(spl));
1511}
1512
1513/*
1514 * Routine: pmap_page_protect
1515 *
1516 * Function:
1517 * Lower the permission for all mappings to a given
1518 * page.
1519 */
1520void pmap_page_protect(
1521 vm_offset_t phys,
1522 vm_prot_t prot)
1523{
1524 pv_entry_t pv_h, prev;
1525 pv_entry_t pv_e;
1526 pt_entry_t *pte;
1527 int pai;
1528 pmap_t pmap;
1529 int spl;
1530 boolean_t remove;
1531
1532 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1532); })
;
1533 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
1534 /*
1535 * Not a managed page.
1536 */
1537 return;
1538 }
1539
1540 /*
1541 * Determine the new protection.
1542 */
1543 switch (prot) {
1544 case VM_PROT_READ((vm_prot_t) 0x01):
1545 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04):
1546 remove = FALSE((boolean_t) 0);
1547 break;
1548 case VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)):
1549 return; /* nothing to do */
1550 default:
1551 remove = TRUE((boolean_t) 1);
1552 break;
1553 }
1554
1555 /*
1556 * Lock the pmap system first, since we will be changing
1557 * several pmaps.
1558 */
1559
1560 PMAP_WRITE_LOCK(spl)((void)(spl));
1561
1562 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
1563 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1564
1565 /*
1566 * Walk down PV list, changing or removing all mappings.
1567 * We do not have to lock the pv_list because we have
1568 * the entire pmap system locked.
1569 */
1570 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
1571
1572 prev = pv_e = pv_h;
1573 do {
1574 vm_offset_t va;
1575
1576 pmap = pv_e->pmap;
1577 /*
1578 * Lock the pmap to block pmap_extract and similar routines.
1579 */
1580 simple_lock(&pmap->lock);
1581
1582 va = pv_e->va;
1583 pte = pmap_pte(pmap, va);
1584
1585 /*
1586 * Consistency checks.
1587 */
1588 /* assert(*pte & INTEL_PTE_VALID); XXX */
1589 /* assert(pte_to_phys(*pte) == phys); */
1590
1591 /*
1592 * Remove the mapping if new protection is NONE
1593 * or if write-protecting a kernel mapping.
1594 */
1595 if (remove || pmap == kernel_pmap) {
1596 /*
1597 * Remove the mapping, collecting any modify bits.
1598 */
1599 if (*pte & INTEL_PTE_WIRED0x00000200)
1600 panic("pmap_remove_all removing a wired page");
1601
1602 {
1603 int i = ptes_per_vm_page1;
1604
1605 do {
1606 pmap_phys_attributes[pai] |=
1607 *pte & (PHYS_MODIFIED0x00000040|PHYS_REFERENCED0x00000020);
1608#ifdef MACH_PV_PAGETABLES
1609 if (!hyp_mmu_update_pte(kv_to_ma(pte++)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte++) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0))
1610 panic("%s:%d could not clear pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1610,pte-1);
1611#else /* MACH_PV_PAGETABLES */
1612 *pte++ = 0;
1613#endif /* MACH_PV_PAGETABLES */
1614 } while (--i > 0);
1615 }
1616
1617 pmap->stats.resident_count--;
1618
1619 /*
1620 * Remove the pv_entry.
1621 */
1622 if (pv_e == pv_h) {
1623 /*
1624 * Fix up head later.
1625 */
1626 pv_h->pmap = PMAP_NULL((pmap_t) 0);
1627 }
1628 else {
1629 /*
1630 * Delete this entry.
1631 */
1632 prev->next = pv_e->next;
1633 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
1634 }
1635 }
1636 else {
1637 /*
1638 * Write-protect.
1639 */
1640 int i = ptes_per_vm_page1;
1641
1642 do {
1643#ifdef MACH_PV_PAGETABLES
1644 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~INTEL_PTE_WRITE0x00000002))
1645 panic("%s:%d could not disable write on pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1645,pte);
1646#else /* MACH_PV_PAGETABLES */
1647 *pte &= ~INTEL_PTE_WRITE0x00000002;
1648#endif /* MACH_PV_PAGETABLES */
1649 pte++;
1650 } while (--i > 0);
1651
1652 /*
1653 * Advance prev.
1654 */
1655 prev = pv_e;
1656 }
1657 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1658
1659 simple_unlock(&pmap->lock)((void)(&pmap->lock));
1660
1661 } while ((pv_e = prev->next) != PV_ENTRY_NULL((pv_entry_t) 0));
1662
1663 /*
1664 * If pv_head mapping was removed, fix it up.
1665 */
1666 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1667 pv_e = pv_h->next;
1668 if (pv_e != PV_ENTRY_NULL((pv_entry_t) 0)) {
1669 *pv_h = *pv_e;
1670 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
1671 }
1672 }
1673 }
1674
1675 PMAP_WRITE_UNLOCK(spl)((void)(spl));
1676}
1677
1678/*
1679 * Set the physical protection on the
1680 * specified range of this map as requested.
1681 * Will not increase permissions.
1682 */
1683void pmap_protect(
1684 pmap_t map,
1685 vm_offset_t s,
1686 vm_offset_t e,
1687 vm_prot_t prot)
1688{
1689 pt_entry_t *pde;
1690 pt_entry_t *spte, *epte;
1691 vm_offset_t l;
1692 int spl;
1693 vm_offset_t _s = s;
1694
1695 if (map == PMAP_NULL((pmap_t) 0))
1696 return;
1697
1698 /*
1699 * Determine the new protection.
1700 */
1701 switch (prot) {
1702 case VM_PROT_READ((vm_prot_t) 0x01):
1703 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04):
1704 break;
1705 case VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_WRITE((vm_prot_t) 0x02):
1706 case VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)):
1707 return; /* nothing to do */
1708 default:
1709 pmap_remove(map, s, e);
1710 return;
1711 }
1712
1713 /*
1714 * If write-protecting in the kernel pmap,
1715 * remove the mappings; the i386 ignores
1716 * the write-permission bit in kernel mode.
1717 *
1718 * XXX should be #if'd for i386
1719 */
1720 if (map == kernel_pmap) {
1721 pmap_remove(map, s, e);
1722 return;
1723 }
1724
1725 SPLVM(spl)((void)(spl));
1726 simple_lock(&map->lock);
1727
1728 pde = pmap_pde(map, s);
1729 while (s < e) {
1730 l = (s + PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))) & ~(PDE_MAPPED_SIZE(((vm_offset_t)(1) << 21))-1);
1731 if (l > e)
1732 l = e;
1733 if (*pde & INTEL_PTE_VALID0x00000001) {
1734 spte = (pt_entry_t *)ptetokv(*pde)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((*pde) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
1735 spte = &spte[ptenum(s)(((s) >> 12) & 0x1ff)];
1736 epte = &spte[intel_btop(l-s)(((unsigned long)(l-s)) >> 12)];
1737
1738#ifdef MACH_PV_PAGETABLES
1739 int n, i = 0;
1740 struct mmu_update update[HYP_BATCH_MMU_UPDATES256];
1741#endif /* MACH_PV_PAGETABLES */
1742
1743 while (spte < epte) {
1744 if (*spte & INTEL_PTE_VALID0x00000001) {
1745#ifdef MACH_PV_PAGETABLES
1746 update[i].ptr = kv_to_ma(spte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(spte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
;
1747 update[i].val = *spte & ~INTEL_PTE_WRITE0x00000002;
1748 i++;
1749 if (i == HYP_BATCH_MMU_UPDATES256) {
1750 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1751 if (n != i)
1752 panic("couldn't pmap_protect\n");
1753 i = 0;
1754 }
1755#else /* MACH_PV_PAGETABLES */
1756 *spte &= ~INTEL_PTE_WRITE0x00000002;
1757#endif /* MACH_PV_PAGETABLES */
1758 }
1759 spte++;
1760 }
1761#ifdef MACH_PV_PAGETABLES
1762 if (i > HYP_BATCH_MMU_UPDATES256)
1763 panic("overflowed array in pmap_protect");
1764 hyp_mmu_update(kvtolin(&update)((vm_offset_t)(&update) - 0xC0000000UL + ((0xc0000000UL))
)
, i, kvtolin(&n)((vm_offset_t)(&n) - 0xC0000000UL + ((0xc0000000UL))), DOMID_SELF(0x7FF0U));
1765 if (n != i)
1766 panic("couldn't pmap_protect\n");
1767#endif /* MACH_PV_PAGETABLES */
1768 }
1769 s = l;
1770 pde++;
1771 }
1772 PMAP_UPDATE_TLBS(map, _s, e){ if ((map)->cpus_using) { hyp_mmuext_op_void(6); } };
1773
1774 simple_unlock(&map->lock)((void)(&map->lock));
1775 SPLX(spl)((void)(spl));
1776}
1777
1778/*
1779 * Insert the given physical page (p) at
1780 * the specified virtual address (v) in the
1781 * target physical map with the protection requested.
1782 *
1783 * If specified, the page will be wired down, meaning
1784 * that the related pte can not be reclaimed.
1785 *
1786 * NB: This is the only routine which MAY NOT lazy-evaluate
1787 * or lose information. That is, this routine must actually
1788 * insert this page into the given map NOW.
1789 */
1790void pmap_enter(
1791 pmap_t pmap,
1792 vm_offset_t v,
1793 vm_offset_t pa,
1794 vm_prot_t prot,
1795 boolean_t wired)
1796{
1797 pt_entry_t *pte;
1798 pv_entry_t pv_h;
1799 int i, pai;
1800 pv_entry_t pv_e;
1801 pt_entry_t template;
1802 int spl;
1803 vm_offset_t old_pa;
1804
1805 assert(pa != vm_page_fictitious_addr)({ if (!(pa != vm_page_fictitious_addr)) Assert("pa != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 1805); })
;
1806if (pmap_debug) printf("pmap(%lx, %lx)\n", v, pa);
1807 if (pmap == PMAP_NULL((pmap_t) 0))
1808 return;
1809
1810#if !MACH_KDB1
1811 if (pmap == kernel_pmap && (v < kernel_virtual_start || v >= kernel_virtual_end))
1812 panic("pmap_enter(%p, %p) falls in physical memory area!\n", v, pa);
1813#endif
1814 if (pmap == kernel_pmap && (prot & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0
1815 && !wired /* hack for io_wire */ ) {
1816 /*
1817 * Because the 386 ignores write protection in kernel mode,
1818 * we cannot enter a read-only kernel mapping, and must
1819 * remove an existing mapping if changing it.
1820 *
1821 * XXX should be #if'd for i386
1822 */
1823 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1824
1825 pte = pmap_pte(pmap, v);
1826 if (pte != PT_ENTRY_NULL((pt_entry_t *) 0) && *pte != 0) {
1827 /*
1828 * Invalidate the translation buffer,
1829 * then remove the mapping.
1830 */
1831 pmap_remove_range(pmap, v, pte,
1832 pte + ptes_per_vm_page1);
1833 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1834 }
1835 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1836 return;
1837 }
1838
1839 /*
1840 * Must allocate a new pvlist entry while we're unlocked;
1841 * Allocating may cause pageout (which will lock the pmap system).
1842 * If we determine we need a pvlist entry, we will unlock
1843 * and allocate one. Then we will retry, throughing away
1844 * the allocated entry later (if we no longer need it).
1845 */
1846 pv_e = PV_ENTRY_NULL((pv_entry_t) 0);
1847Retry:
1848 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1849
1850 /*
1851 * Expand pmap to include this pte. Assume that
1852 * pmap is always expanded to include enough hardware
1853 * pages to map one VM page.
1854 */
1855
1856 while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL((pt_entry_t *) 0)) {
1857 /*
1858 * Need to allocate a new page-table page.
1859 */
1860 vm_offset_t ptp;
1861 pt_entry_t *pdp;
1862 int i;
1863
1864 if (pmap == kernel_pmap) {
1865 /*
1866 * Would have to enter the new page-table page in
1867 * EVERY pmap.
1868 */
1869 panic("pmap_expand kernel pmap to %#x", v);
1870 }
1871
1872 /*
1873 * Unlock the pmap and allocate a new page-table page.
1874 */
1875 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1876
1877 ptp = phystokv(pmap_page_table_page_alloc())((vm_offset_t)(pmap_page_table_page_alloc()) + 0xC0000000UL);
1878
1879 /*
1880 * Re-lock the pmap and check that another thread has
1881 * not already allocated the page-table page. If it
1882 * has, discard the new page-table page (and try
1883 * again to make sure).
1884 */
1885 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1886
1887 if (pmap_pte(pmap, v) != PT_ENTRY_NULL((pt_entry_t *) 0)) {
1888 /*
1889 * Oops...
1890 */
1891 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
1892 pmap_page_table_page_dealloc(kvtophys(ptp));
1893 PMAP_READ_LOCK(pmap, spl)((void)(spl));
1894 continue;
1895 }
1896
1897 /*
1898 * Enter the new page table page in the page directory.
1899 */
1900 i = ptes_per_vm_page1;
1901 /*XX pdp = &pmap->dirbase[pdenum(v) & ~(i-1)];*/
1902 pdp = pmap_pde(pmap, v);
1903 do {
1904#ifdef MACH_PV_PAGETABLES
1905 pmap_set_page_readonly((void *) ptp);
1906 if (!hyp_mmuext_op_mfn (MMUEXT_PIN_L1_TABLE0, kv_to_mfn(ptp)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptp) - 0xC0000000UL))
) >> 12)]))
))
1907 panic("couldn't pin page %p(%p)\n",ptp,(vm_offset_t) kv_to_ma(ptp)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
1908 if (!hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdp))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdp
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
,
1909 pa_to_pte(pa_to_ma(kvtophys(ptp)))((({ vm_offset_t __a = (vm_offset_t) (kvtophys(ptp)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })) & 0x00007ffffffff000ULL
)
| INTEL_PTE_VALID0x00000001
1910 | INTEL_PTE_USER0x00000004
1911 | INTEL_PTE_WRITE0x00000002))
1912 panic("%s:%d could not set pde %p(%p,%p) to %p(%p,%p) %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1912, pdp, kvtophys((vm_offset_t)pdp), (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)pdp))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdp
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, ptp, kvtophys(ptp), (vm_offset_t) pa_to_ma(kvtophys(ptp))({ vm_offset_t __a = (vm_offset_t) (kvtophys(ptp)); (((pt_entry_t
) ((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12
) | (__a & ((1 << 12)-1)); })
, (vm_offset_t) pa_to_pte(kv_to_ma(ptp))((({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(ptp) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })) &
0x00007ffffffff000ULL)
);
1913#else /* MACH_PV_PAGETABLES */
1914 *pdp = pa_to_pte(kvtophys(ptp))((kvtophys(ptp)) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001
1915 | INTEL_PTE_USER0x00000004
1916 | INTEL_PTE_WRITE0x00000002;
1917#endif /* MACH_PV_PAGETABLES */
1918 pdp++;
1919 ptp += INTEL_PGBYTES4096;
1920 } while (--i > 0);
1921
1922 /*
1923 * Now, get the address of the page-table entry.
1924 */
1925 continue;
1926 }
1927
1928 /*
1929 * Special case if the physical page is already mapped
1930 * at this address.
1931 */
1932 old_pa = pte_to_pa(*pte)({ pt_entry_t __a = (pt_entry_t) ((*pte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
1933 if (*pte && old_pa == pa) {
1934 /*
1935 * May be changing its wired attribute or protection
1936 */
1937
1938 if (wired && !(*pte & INTEL_PTE_WIRED0x00000200))
1939 pmap->stats.wired_count++;
1940 else if (!wired && (*pte & INTEL_PTE_WIRED0x00000200))
1941 pmap->stats.wired_count--;
1942
1943 template = pa_to_pte(pa)((pa) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001;
1944 if (pmap != kernel_pmap)
1945 template |= INTEL_PTE_USER0x00000004;
1946 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
1947 template |= INTEL_PTE_WRITE0x00000002;
1948 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17)
1949 && pa >= phys_last_addr)
1950 template |= INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008;
1951 if (wired)
1952 template |= INTEL_PTE_WIRED0x00000200;
1953 i = ptes_per_vm_page1;
1954 do {
1955 if (*pte & INTEL_PTE_MOD0x00000040)
1956 template |= INTEL_PTE_MOD0x00000040;
1957#ifdef MACH_PV_PAGETABLES
1958 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
))
1959 panic("%s:%d could not set pte %p to %p\n",__FILE__"../i386/intel/pmap.c",__LINE__1959,pte,template);
1960#else /* MACH_PV_PAGETABLES */
1961 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
1962#endif /* MACH_PV_PAGETABLES */
1963 pte++;
1964 pte_increment_pa(template)((template) += 0xfff +1);
1965 } while (--i > 0);
1966 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1967 }
1968 else {
1969
1970 /*
1971 * Remove old mapping from the PV list if necessary.
1972 */
1973 if (*pte) {
1974 /*
1975 * Don't free the pte page if removing last
1976 * mapping - we will immediately replace it.
1977 */
1978 pmap_remove_range(pmap, v, pte,
1979 pte + ptes_per_vm_page1);
1980 PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
1981 }
1982
1983 if (valid_page(pa)(pmap_initialized && pmap_valid_page(pa))) {
1984
1985 /*
1986 * Enter the mapping in the PV list for this
1987 * physical page.
1988 */
1989
1990 pai = pa_index(pa)((((vm_size_t)(pa - phys_first_addr)) >> 12));
1991 LOCK_PVH(pai);
1992 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
1993
1994 if (pv_h->pmap == PMAP_NULL((pmap_t) 0)) {
1995 /*
1996 * No mappings yet
1997 */
1998 pv_h->va = v;
1999 pv_h->pmap = pmap;
2000 pv_h->next = PV_ENTRY_NULL((pv_entry_t) 0);
2001 }
2002 else {
2003#if DEBUG
2004 {
2005 /* check that this mapping is not already there */
2006 pv_entry_t e = pv_h;
2007 while (e != PV_ENTRY_NULL((pv_entry_t) 0)) {
2008 if (e->pmap == pmap && e->va == v)
2009 panic("pmap_enter: already in pv_list");
2010 e = e->next;
2011 }
2012 }
2013#endif /* DEBUG */
2014
2015 /*
2016 * Add new pv_entry after header.
2017 */
2018 if (pv_e == PV_ENTRY_NULL((pv_entry_t) 0)) {
2019 PV_ALLOC(pv_e){ ; if ((pv_e = pv_free_list) != 0) { pv_free_list = pv_e->
next; } ((void)(&pv_free_list_lock)); }
;
2020 if (pv_e == PV_ENTRY_NULL((pv_entry_t) 0)) {
2021 UNLOCK_PVH(pai);
2022 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
2023
2024 /*
2025 * Refill from cache.
2026 */
2027 pv_e = (pv_entry_t) kmem_cache_alloc(&pv_list_cache);
2028 goto Retry;
2029 }
2030 }
2031 pv_e->va = v;
2032 pv_e->pmap = pmap;
2033 pv_e->next = pv_h->next;
2034 pv_h->next = pv_e;
2035 /*
2036 * Remember that we used the pvlist entry.
2037 */
2038 pv_e = PV_ENTRY_NULL((pv_entry_t) 0);
2039 }
2040 UNLOCK_PVH(pai);
2041 }
2042
2043 /*
2044 * And count the mapping.
2045 */
2046
2047 pmap->stats.resident_count++;
2048 if (wired)
2049 pmap->stats.wired_count++;
2050
2051 /*
2052 * Build a template to speed up entering -
2053 * only the pfn changes.
2054 */
2055 template = pa_to_pte(pa)((pa) & 0x00007ffffffff000ULL) | INTEL_PTE_VALID0x00000001;
2056 if (pmap != kernel_pmap)
2057 template |= INTEL_PTE_USER0x00000004;
2058 if (prot & VM_PROT_WRITE((vm_prot_t) 0x02))
2059 template |= INTEL_PTE_WRITE0x00000002;
2060 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17)
2061 && pa >= phys_last_addr)
2062 template |= INTEL_PTE_NCACHE0x00000010|INTEL_PTE_WTHRU0x00000008;
2063 if (wired)
2064 template |= INTEL_PTE_WIRED0x00000200;
2065 i = ptes_per_vm_page1;
2066 do {
2067#ifdef MACH_PV_PAGETABLES
2068 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, pa_to_ma(template)({ vm_offset_t __a = (vm_offset_t) (template); (((pt_entry_t)
((mfn_list[(((vm_size_t)(__a)) >> 12)]))) << 12)
| (__a & ((1 << 12)-1)); })
)))
2069 panic("%s:%d could not set pte %p to %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2069,pte,template);
2070#else /* MACH_PV_PAGETABLES */
2071 WRITE_PTE(pte, template)*(pte) = template?({ vm_offset_t __a = (vm_offset_t) (template
); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12)
]))) << 12) | (__a & ((1 << 12)-1)); }):0;
2072#endif /* MACH_PV_PAGETABLES */
2073 pte++;
2074 pte_increment_pa(template)((template) += 0xfff +1);
2075 } while (--i > 0);
2076 }
2077
2078 if (pv_e != PV_ENTRY_NULL((pv_entry_t) 0)) {
2079 PV_FREE(pv_e){ ; pv_e->next = pv_free_list; pv_free_list = pv_e; ((void
)(&pv_free_list_lock)); }
;
2080 }
2081
2082 PMAP_READ_UNLOCK(pmap, spl)((void)(spl));
2083}
2084
2085/*
2086 * Routine: pmap_change_wiring
2087 * Function: Change the wiring attribute for a map/virtual-address
2088 * pair.
2089 * In/out conditions:
2090 * The mapping must already exist in the pmap.
2091 */
2092void pmap_change_wiring(
2093 pmap_t map,
2094 vm_offset_t v,
2095 boolean_t wired)
2096{
2097 pt_entry_t *pte;
2098 int i;
2099 int spl;
2100
2101 /*
2102 * We must grab the pmap system lock because we may
2103 * change a pte_page queue.
2104 */
2105 PMAP_READ_LOCK(map, spl)((void)(spl));
2106
2107 if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL((pt_entry_t *) 0))
2108 panic("pmap_change_wiring: pte missing");
2109
2110 if (wired && !(*pte & INTEL_PTE_WIRED0x00000200)) {
2111 /*
2112 * wiring down mapping
2113 */
2114 map->stats.wired_count++;
2115 i = ptes_per_vm_page1;
2116 do {
2117 *pte++ |= INTEL_PTE_WIRED0x00000200;
2118 } while (--i > 0);
2119 }
2120 else if (!wired && (*pte & INTEL_PTE_WIRED0x00000200)) {
2121 /*
2122 * unwiring mapping
2123 */
2124 map->stats.wired_count--;
2125 i = ptes_per_vm_page1;
2126 do {
2127#ifdef MACH_PV_PAGETABLES
2128 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~INTEL_PTE_WIRED0x00000200)))
2129 panic("%s:%d could not wire down pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2129,pte);
2130#else /* MACH_PV_PAGETABLES */
2131 *pte &= ~INTEL_PTE_WIRED0x00000200;
2132#endif /* MACH_PV_PAGETABLES */
2133 pte++;
2134 } while (--i > 0);
2135 }
2136
2137 PMAP_READ_UNLOCK(map, spl)((void)(spl));
2138}
2139
2140/*
2141 * Routine: pmap_extract
2142 * Function:
2143 * Extract the physical page address associated
2144 * with the given map/virtual_address pair.
2145 */
2146
2147vm_offset_t pmap_extract(
2148 pmap_t pmap,
2149 vm_offset_t va)
2150{
2151 pt_entry_t *pte;
2152 vm_offset_t pa;
2153 int spl;
2154
2155 SPLVM(spl)((void)(spl));
2156 simple_lock(&pmap->lock);
2157 if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL((pt_entry_t *) 0))
2158 pa = (vm_offset_t) 0;
2159 else if (!(*pte & INTEL_PTE_VALID0x00000001))
2160 pa = (vm_offset_t) 0;
2161 else
2162 pa = pte_to_pa(*pte)({ pt_entry_t __a = (pt_entry_t) ((*pte) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
+ (va & INTEL_OFFMASK0xfff);
2163 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2164 SPLX(spl)((void)(spl));
2165 return(pa);
2166}
2167
2168/*
2169 * Copy the range specified by src_addr/len
2170 * from the source map to the range dst_addr/len
2171 * in the destination map.
2172 *
2173 * This routine is only advisory and need not do anything.
2174 */
2175#if 0
2176void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2177 pmap_t dst_pmap;
2178 pmap_t src_pmap;
2179 vm_offset_t dst_addr;
2180 vm_size_t len;
2181 vm_offset_t src_addr;
2182{
2183}
2184#endif /* 0 */
2185
2186/*
2187 * Routine: pmap_collect
2188 * Function:
2189 * Garbage collects the physical map system for
2190 * pages which are no longer used.
2191 * Success need not be guaranteed -- that is, there
2192 * may well be pages which are not referenced, but
2193 * others may be collected.
2194 * Usage:
2195 * Called by the pageout daemon when pages are scarce.
2196 */
2197void pmap_collect(pmap_t p)
2198{
2199 pt_entry_t *pdp, *ptp;
2200 pt_entry_t *eptp;
2201 vm_offset_t pa;
2202 int spl, wired;
2203
2204 if (p == PMAP_NULL((pmap_t) 0))
1
Assuming 'p' is not equal to null
2
Taking false branch
2205 return;
2206
2207 if (p == kernel_pmap)
3
Taking false branch
2208 return;
2209
2210 /*
2211 * Garbage collect map.
2212 */
2213 PMAP_READ_LOCK(p, spl)((void)(spl));
2214 for (pdp = p->dirbase;
4
Loop condition is true. Entering loop body
6
Loop condition is true. Entering loop body
8
Loop condition is true. Entering loop body
10
Loop condition is true. Entering loop body
2215 pdp < &p->dirbase[lin2pdenum(LINEAR_MIN_KERNEL_ADDRESS)(((((0xc0000000UL))) >> 21) & 0x7ff)];
2216 pdp += ptes_per_vm_page1)
2217 {
2218 if (*pdp & INTEL_PTE_VALID0x00000001) {
5
Taking false branch
7
Taking false branch
9
Taking false branch
11
Taking true branch
2219
2220 pa = pte_to_pa(*pdp)({ pt_entry_t __a = (pt_entry_t) ((*pdp) & 0x00007ffffffff000ULL
); ((((unsigned long *) 0xF5800000UL)[__a >> 12]) <<
12) | (__a & ((1 << 12)-1)); })
;
2221 ptp = (pt_entry_t *)phystokv(pa)((vm_offset_t)(pa) + 0xC0000000UL);
2222 eptp = ptp + NPTES((((unsigned long)(1)) << 12)/sizeof(pt_entry_t))*ptes_per_vm_page1;
2223
2224 /*
2225 * If the pte page has any wired mappings, we cannot
2226 * free it.
2227 */
2228 wired = 0;
2229 {
2230 pt_entry_t *ptep;
2231 for (ptep = ptp; ptep < eptp; ptep++) {
12
Loop condition is false. Execution continues on line 2238
2232 if (*ptep & INTEL_PTE_WIRED0x00000200) {
2233 wired = 1;
2234 break;
2235 }
2236 }
2237 }
2238 if (!wired) {
13
Taking true branch
2239 /*
2240 * Remove the virtual addresses mapped by this pte page.
2241 */
2242 { /*XXX big hack*/
2243 vm_offset_t va = pdenum2lin(pdp - p->dirbase)((vm_offset_t)(pdp - p->dirbase) << 21);
2244 if (p == kernel_pmap)
14
Taking false branch
2245 va = lintokv(va)((vm_offset_t)(va) - ((0xc0000000UL)) + 0xC0000000UL);
2246 pmap_remove_range(p,
15
Calling 'pmap_remove_range'
2247 va,
2248 ptp,
2249 eptp);
2250 }
2251
2252 /*
2253 * Invalidate the page directory pointer.
2254 */
2255 {
2256 int i = ptes_per_vm_page1;
2257 pt_entry_t *pdep = pdp;
2258 do {
2259#ifdef MACH_PV_PAGETABLES
2260 unsigned long pte = *pdep;
2261 void *ptable = (void*) ptetokv(pte)(((vm_offset_t)(({ pt_entry_t __a = (pt_entry_t) ((pte) &
0x00007ffffffff000ULL); ((((unsigned long *) 0xF5800000UL)[__a
>> 12]) << 12) | (__a & ((1 << 12)-1))
; })) + 0xC0000000UL))
;
2262 if (!(hyp_mmu_update_pte(pa_to_ma(kvtophys((vm_offset_t)pdep++))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)pdep
++)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0)))
2263 panic("%s:%d could not clear pde %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2263,pdep-1);
2264 if (!hyp_mmuext_op_mfn (MMUEXT_UNPIN_TABLE4, kv_to_mfn(ptable)((mfn_list[(((vm_size_t)(((vm_offset_t)(ptable) - 0xC0000000UL
))) >> 12)]))
))
2265 panic("couldn't unpin page %p(%p)\n", ptable, (vm_offset_t) pa_to_ma(kvtophys((vm_offset_t)ptable))({ vm_offset_t __a = (vm_offset_t) (kvtophys((vm_offset_t)ptable
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
);
2266 pmap_set_page_readwrite(ptable);
2267#else /* MACH_PV_PAGETABLES */
2268 *pdep++ = 0;
2269#endif /* MACH_PV_PAGETABLES */
2270 } while (--i > 0);
2271 }
2272
2273 PMAP_READ_UNLOCK(p, spl)((void)(spl));
2274
2275 /*
2276 * And free the pte page itself.
2277 */
2278 {
2279 vm_page_t m;
2280
2281 vm_object_lock(pmap_object);
2282 m = vm_page_lookup(pmap_object, pa);
2283 if (m == VM_PAGE_NULL((vm_page_t) 0))
2284 panic("pmap_collect: pte page not in object");
2285 vm_page_lock_queues();
2286 vm_page_free(m);
2287 inuse_ptepages_count--;
2288 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
2289 vm_object_unlock(pmap_object)((void)(&(pmap_object)->Lock));
2290 }
2291
2292 PMAP_READ_LOCK(p, spl)((void)(spl));
2293 }
2294 }
2295 }
2296 PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS){ if ((p)->cpus_using) { hyp_mmuext_op_void(6); } };
2297
2298 PMAP_READ_UNLOCK(p, spl)((void)(spl));
2299 return;
2300
2301}
2302
2303/*
2304 * Routine: pmap_activate
2305 * Function:
2306 * Binds the given physical map to the given
2307 * processor, and returns a hardware map description.
2308 */
2309#if 0
2310void pmap_activate(my_pmap, th, my_cpu)
2311 register pmap_t my_pmap;
2312 thread_t th;
2313 int my_cpu;
2314{
2315 PMAP_ACTIVATE(my_pmap, th, my_cpu);
2316}
2317#endif /* 0 */
2318
2319/*
2320 * Routine: pmap_deactivate
2321 * Function:
2322 * Indicates that the given physical map is no longer
2323 * in use on the specified processor. (This is a macro
2324 * in pmap.h)
2325 */
2326#if 0
2327void pmap_deactivate(pmap, th, which_cpu)
2328 pmap_t pmap;
2329 thread_t th;
2330 int which_cpu;
2331{
2332 PMAP_DEACTIVATE(pmap, th, which_cpu);
2333}
2334#endif /* 0 */
2335
2336/*
2337 * Routine: pmap_kernel
2338 * Function:
2339 * Returns the physical map handle for the kernel.
2340 */
2341#if 0
2342pmap_t pmap_kernel()(kernel_pmap)
2343{
2344 return (kernel_pmap);
2345}
2346#endif /* 0 */
2347
2348/*
2349 * pmap_zero_page zeros the specified (machine independent) page.
2350 * See machine/phys.c or machine/phys.s for implementation.
2351 */
2352#if 0
2353pmap_zero_page(phys)
2354 register vm_offset_t phys;
2355{
2356 register int i;
2357
2358 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2358); })
;
2359 i = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
2360 phys = intel_pfn(phys);
2361
2362 while (i--)
2363 zero_phys(phys++);
2364}
2365#endif /* 0 */
2366
2367/*
2368 * pmap_copy_page copies the specified (machine independent) page.
2369 * See machine/phys.c or machine/phys.s for implementation.
2370 */
2371#if 0
2372pmap_copy_page(src, dst)
2373 vm_offset_t src, dst;
2374{
2375 int i;
2376
2377 assert(src != vm_page_fictitious_addr)({ if (!(src != vm_page_fictitious_addr)) Assert("src != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2377); })
;
2378 assert(dst != vm_page_fictitious_addr)({ if (!(dst != vm_page_fictitious_addr)) Assert("dst != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2378); })
;
2379 i = PAGE_SIZE(1 << 12) / INTEL_PGBYTES4096;
2380
2381 while (i--) {
2382 copy_phys(intel_pfn(src), intel_pfn(dst));
2383 src += INTEL_PGBYTES4096;
2384 dst += INTEL_PGBYTES4096;
2385 }
2386}
2387#endif /* 0 */
2388
2389/*
2390 * Routine: pmap_pageable
2391 * Function:
2392 * Make the specified pages (by pmap, offset)
2393 * pageable (or not) as requested.
2394 *
2395 * A page which is not pageable may not take
2396 * a fault; therefore, its page table entry
2397 * must remain valid for the duration.
2398 *
2399 * This routine is merely advisory; pmap_enter
2400 * will specify that these pages are to be wired
2401 * down (or not) as appropriate.
2402 */
2403void
2404pmap_pageable(
2405 pmap_t pmap,
2406 vm_offset_t start,
2407 vm_offset_t end,
2408 boolean_t pageable)
2409{
2410}
2411
2412/*
2413 * Clear specified attribute bits.
2414 */
2415void
2416phys_attribute_clear(
2417 vm_offset_t phys,
2418 int bits)
2419{
2420 pv_entry_t pv_h;
2421 pv_entry_t pv_e;
2422 pt_entry_t *pte;
2423 int pai;
2424 pmap_t pmap;
2425 int spl;
2426
2427 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2427); })
;
2428 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
2429 /*
2430 * Not a managed page.
2431 */
2432 return;
2433 }
2434
2435 /*
2436 * Lock the pmap system first, since we will be changing
2437 * several pmaps.
2438 */
2439
2440 PMAP_WRITE_LOCK(spl)((void)(spl));
2441
2442 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
2443 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
2444
2445 /*
2446 * Walk down PV list, clearing all modify or reference bits.
2447 * We do not have to lock the pv_list because we have
2448 * the entire pmap system locked.
2449 */
2450 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
2451 /*
2452 * There are some mappings.
2453 */
2454 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL((pv_entry_t) 0); pv_e = pv_e->next) {
2455 vm_offset_t va;
2456
2457 pmap = pv_e->pmap;
2458 /*
2459 * Lock the pmap to block pmap_extract and similar routines.
2460 */
2461 simple_lock(&pmap->lock);
2462
2463 va = pv_e->va;
2464 pte = pmap_pte(pmap, va);
2465
2466#if 0
2467 /*
2468 * Consistency checks.
2469 */
2470 assert(*pte & INTEL_PTE_VALID)({ if (!(*pte & 0x00000001)) Assert("*pte & INTEL_PTE_VALID"
, "../i386/intel/pmap.c", 2470); })
;
2471 /* assert(pte_to_phys(*pte) == phys); */
2472#endif
2473
2474 /*
2475 * Clear modify or reference bits.
2476 */
2477 {
2478 int i = ptes_per_vm_page1;
2479 do {
2480#ifdef MACH_PV_PAGETABLES
2481 if (!(hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, *pte & ~bits)))
2482 panic("%s:%d could not clear bits %lx from pte %p\n",__FILE__"../i386/intel/pmap.c",__LINE__2482,bits,pte);
2483#else /* MACH_PV_PAGETABLES */
2484 *pte &= ~bits;
2485#endif /* MACH_PV_PAGETABLES */
2486 } while (--i > 0);
2487 }
2488 PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE){ if ((pmap)->cpus_using) { hyp_mmuext_op_void(6); } };
2489 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2490 }
2491 }
2492
2493 pmap_phys_attributes[pai] &= ~bits;
2494
2495 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2496}
2497
2498/*
2499 * Check specified attribute bits.
2500 */
2501boolean_t
2502phys_attribute_test(
2503 vm_offset_t phys,
2504 int bits)
2505{
2506 pv_entry_t pv_h;
2507 pv_entry_t pv_e;
2508 pt_entry_t *pte;
2509 int pai;
2510 pmap_t pmap;
2511 int spl;
2512
2513 assert(phys != vm_page_fictitious_addr)({ if (!(phys != vm_page_fictitious_addr)) Assert("phys != vm_page_fictitious_addr"
, "../i386/intel/pmap.c", 2513); })
;
2514 if (!valid_page(phys)(pmap_initialized && pmap_valid_page(phys))) {
2515 /*
2516 * Not a managed page.
2517 */
2518 return (FALSE((boolean_t) 0));
2519 }
2520
2521 /*
2522 * Lock the pmap system first, since we will be checking
2523 * several pmaps.
2524 */
2525
2526 PMAP_WRITE_LOCK(spl)((void)(spl));
2527
2528 pai = pa_index(phys)((((vm_size_t)(phys - phys_first_addr)) >> 12));
2529 pv_h = pai_to_pvh(pai)(&pv_head_table[pai]);
2530
2531 if (pmap_phys_attributes[pai] & bits) {
2532 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2533 return (TRUE((boolean_t) 1));
2534 }
2535
2536 /*
2537 * Walk down PV list, checking all mappings.
2538 * We do not have to lock the pv_list because we have
2539 * the entire pmap system locked.
2540 */
2541 if (pv_h->pmap != PMAP_NULL((pmap_t) 0)) {
2542 /*
2543 * There are some mappings.
2544 */
2545 for (pv_e = pv_h; pv_e != PV_ENTRY_NULL((pv_entry_t) 0); pv_e = pv_e->next) {
2546
2547 pmap = pv_e->pmap;
2548 /*
2549 * Lock the pmap to block pmap_extract and similar routines.
2550 */
2551 simple_lock(&pmap->lock);
2552
2553 {
2554 vm_offset_t va;
2555
2556 va = pv_e->va;
2557 pte = pmap_pte(pmap, va);
2558
2559#if 0
2560 /*
2561 * Consistency checks.
2562 */
2563 assert(*pte & INTEL_PTE_VALID)({ if (!(*pte & 0x00000001)) Assert("*pte & INTEL_PTE_VALID"
, "../i386/intel/pmap.c", 2563); })
;
2564 /* assert(pte_to_phys(*pte) == phys); */
2565#endif
2566 }
2567
2568 /*
2569 * Check modify or reference bits.
2570 */
2571 {
2572 int i = ptes_per_vm_page1;
2573
2574 do {
2575 if (*pte & bits) {
2576 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2577 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2578 return (TRUE((boolean_t) 1));
2579 }
2580 } while (--i > 0);
2581 }
2582 simple_unlock(&pmap->lock)((void)(&pmap->lock));
2583 }
2584 }
2585 PMAP_WRITE_UNLOCK(spl)((void)(spl));
2586 return (FALSE((boolean_t) 0));
2587}
2588
2589/*
2590 * Clear the modify bits on the specified physical page.
2591 */
2592
2593void pmap_clear_modify(vm_offset_t phys)
2594{
2595 phys_attribute_clear(phys, PHYS_MODIFIED0x00000040);
2596}
2597
2598/*
2599 * pmap_is_modified:
2600 *
2601 * Return whether or not the specified physical page is modified
2602 * by any physical maps.
2603 */
2604
2605boolean_t pmap_is_modified(vm_offset_t phys)
2606{
2607 return (phys_attribute_test(phys, PHYS_MODIFIED0x00000040));
2608}
2609
2610/*
2611 * pmap_clear_reference:
2612 *
2613 * Clear the reference bit on the specified physical page.
2614 */
2615
2616void pmap_clear_reference(vm_offset_t phys)
2617{
2618 phys_attribute_clear(phys, PHYS_REFERENCED0x00000020);
2619}
2620
2621/*
2622 * pmap_is_referenced:
2623 *
2624 * Return whether or not the specified physical page is referenced
2625 * by any physical maps.
2626 */
2627
2628boolean_t pmap_is_referenced(vm_offset_t phys)
2629{
2630 return (phys_attribute_test(phys, PHYS_REFERENCED0x00000020));
2631}
2632
2633#if NCPUS1 > 1
2634/*
2635* TLB Coherence Code (TLB "shootdown" code)
2636*
2637* Threads that belong to the same task share the same address space and
2638* hence share a pmap. However, they may run on distinct cpus and thus
2639* have distinct TLBs that cache page table entries. In order to guarantee
2640* the TLBs are consistent, whenever a pmap is changed, all threads that
2641* are active in that pmap must have their TLB updated. To keep track of
2642* this information, the set of cpus that are currently using a pmap is
2643* maintained within each pmap structure (cpus_using). Pmap_activate() and
2644* pmap_deactivate add and remove, respectively, a cpu from this set.
2645* Since the TLBs are not addressable over the bus, each processor must
2646* flush its own TLB; a processor that needs to invalidate another TLB
2647* needs to interrupt the processor that owns that TLB to signal the
2648* update.
2649*
2650* Whenever a pmap is updated, the lock on that pmap is locked, and all
2651* cpus using the pmap are signaled to invalidate. All threads that need
2652* to activate a pmap must wait for the lock to clear to await any updates
2653* in progress before using the pmap. They must ACQUIRE the lock to add
2654* their cpu to the cpus_using set. An implicit assumption made
2655* throughout the TLB code is that all kernel code that runs at or higher
2656* than splvm blocks out update interrupts, and that such code does not
2657* touch pageable pages.
2658*
2659* A shootdown interrupt serves another function besides signaling a
2660* processor to invalidate. The interrupt routine (pmap_update_interrupt)
2661* waits for the both the pmap lock (and the kernel pmap lock) to clear,
2662* preventing user code from making implicit pmap updates while the
2663* sending processor is performing its update. (This could happen via a
2664* user data write reference that turns on the modify bit in the page
2665* table). It must wait for any kernel updates that may have started
2666* concurrently with a user pmap update because the IPC code
2667* changes mappings.
2668* Spinning on the VALUES of the locks is sufficient (rather than
2669* having to acquire the locks) because any updates that occur subsequent
2670* to finding the lock unlocked will be signaled via another interrupt.
2671* (This assumes the interrupt is cleared before the low level interrupt code
2672* calls pmap_update_interrupt()).
2673*
2674* The signaling processor must wait for any implicit updates in progress
2675* to terminate before continuing with its update. Thus it must wait for an
2676* acknowledgement of the interrupt from each processor for which such
2677* references could be made. For maintaining this information, a set
2678* cpus_active is used. A cpu is in this set if and only if it can
2679* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
2680* this set; when all such cpus are removed, it is safe to update.
2681*
2682* Before attempting to acquire the update lock on a pmap, a cpu (A) must
2683* be at least at the priority of the interprocessor interrupt
2684* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
2685* kernel update; it would spin forever in pmap_update_interrupt() trying
2686* to acquire the user pmap lock it had already acquired. Furthermore A
2687* must remove itself from cpus_active. Otherwise, another cpu holding
2688* the lock (B) could be in the process of sending an update signal to A,
2689* and thus be waiting for A to remove itself from cpus_active. If A is
2690* spinning on the lock at priority this will never happen and a deadlock
2691* will result.
2692*/
2693
2694/*
2695 * Signal another CPU that it must flush its TLB
2696 */
2697void signal_cpus(
2698 cpu_set use_list,
2699 pmap_t pmap,
2700 vm_offset_t start,
2701 vm_offset_t end)
2702{
2703 int which_cpu, j;
2704 pmap_update_list_t update_list_p;
2705
2706 while ((which_cpu = ffs(use_list)) != 0) {
2707 which_cpu -= 1; /* convert to 0 origin */
2708
2709 update_list_p = &cpu_update_list[which_cpu];
2710 simple_lock(&update_list_p->lock);
2711
2712 j = update_list_p->count;
2713 if (j >= UPDATE_LIST_SIZE) {
2714 /*
2715 * list overflowed. Change last item to
2716 * indicate overflow.
2717 */
2718 update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
2719 update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS(0);
2720 update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS(0xF5800000UL - ((0xc0000000UL)) + 0xC0000000UL);
2721 }
2722 else {
2723 update_list_p->item[j].pmap = pmap;
2724 update_list_p->item[j].start = start;
2725 update_list_p->item[j].end = end;
2726 update_list_p->count = j+1;
2727 }
2728 cpu_update_needed[which_cpu] = TRUE((boolean_t) 1);
2729 simple_unlock(&update_list_p->lock)((void)(&update_list_p->lock));
2730
2731 if ((cpus_idle & (1 << which_cpu)) == 0)
2732 interrupt_processor(which_cpu);
2733 use_list &= ~(1 << which_cpu);
2734 }
2735}
2736
2737void process_pmap_updates(pmap_t my_pmap)
2738{
2739 int my_cpu = cpu_number()(0);
2740 pmap_update_list_t update_list_p;
2741 int j;
2742 pmap_t pmap;
2743
2744 update_list_p = &cpu_update_list[my_cpu];
2745 simple_lock(&update_list_p->lock);
2746
2747 for (j = 0; j < update_list_p->count; j++) {
2748 pmap = update_list_p->item[j].pmap;
2749 if (pmap == my_pmap ||
2750 pmap == kernel_pmap) {
2751
2752 INVALIDATE_TLB(pmap,hyp_mmuext_op_void(6)
2753 update_list_p->item[j].start,hyp_mmuext_op_void(6)
2754 update_list_p->item[j].end)hyp_mmuext_op_void(6);
2755 }
2756 }
2757 update_list_p->count = 0;
2758 cpu_update_needed[my_cpu] = FALSE((boolean_t) 0);
2759 simple_unlock(&update_list_p->lock)((void)(&update_list_p->lock));
2760}
2761
2762/*
2763 * Interrupt routine for TBIA requested from other processor.
2764 */
2765void pmap_update_interrupt(void)
2766{
2767 int my_cpu;
2768 pmap_t my_pmap;
2769 int s;
2770
2771 my_cpu = cpu_number()(0);
2772
2773 /*
2774 * Exit now if we're idle. We'll pick up the update request
2775 * when we go active, and we must not put ourselves back in
2776 * the active set because we'll never process the interrupt
2777 * while we're idle (thus hanging the system).
2778 */
2779 if (cpus_idle & (1 << my_cpu))
2780 return;
2781
2782 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
2783 my_pmap = kernel_pmap;
2784 else {
2785 my_pmap = current_pmap()((((active_threads[(0)])->task->map)->pmap));
2786 if (!pmap_in_use(my_pmap, my_cpu)(((my_pmap)->cpus_using & (1 << (my_cpu))) != 0))
2787 my_pmap = kernel_pmap;
2788 }
2789
2790 /*
2791 * Raise spl to splvm (above splip) to block out pmap_extract
2792 * from IO code (which would put this cpu back in the active
2793 * set).
2794 */
2795 s = splvm();
2796
2797 do {
2798
2799 /*
2800 * Indicate that we're not using either user or kernel
2801 * pmap.
2802 */
2803 i_bit_clear(my_cpu, &cpus_active);
2804
2805 /*
2806 * Wait for any pmap updates in progress, on either user
2807 * or kernel pmap.
2808 */
2809 while (*(volatile int *)&my_pmap->lock.lock_data ||
2810 *(volatile int *)&kernel_pmap->lock.lock_data)
2811 continue;
2812
2813 process_pmap_updates(my_pmap);
2814
2815 i_bit_set(my_cpu, &cpus_active);
2816
2817 } while (cpu_update_needed[my_cpu]);
2818
2819 splx(s);
2820}
2821#else /* NCPUS > 1 */
2822/*
2823 * Dummy routine to satisfy external reference.
2824 */
2825void pmap_update_interrupt(void)
2826{
2827 /* should never be called. */
2828}
2829#endif /* NCPUS > 1 */
2830
2831#if defined(__i386__1)
2832/* Unmap page 0 to trap NULL references. */
2833void
2834pmap_unmap_page_zero (void)
2835{
2836 int *pte;
2837
2838 pte = (int *) pmap_pte (kernel_pmap, 0);
2839 if (!pte)
2840 return;
2841 assert (pte)({ if (!(pte)) Assert("pte", "../i386/intel/pmap.c", 2841); }
)
;
2842#ifdef MACH_PV_PAGETABLES
2843 if (!hyp_mmu_update_pte(kv_to_ma(pte)({ vm_offset_t __a = (vm_offset_t) (((vm_offset_t)(pte) - 0xC0000000UL
)); (((pt_entry_t) ((mfn_list[(((vm_size_t)(__a)) >> 12
)]))) << 12) | (__a & ((1 << 12)-1)); })
, 0))
2844 printf("couldn't unmap page 0\n");
2845#else /* MACH_PV_PAGETABLES */
2846 *pte = 0;
2847 INVALIDATE_TLB(kernel_pmap, 0, PAGE_SIZE)hyp_mmuext_op_void(6);
2848#endif /* MACH_PV_PAGETABLES */
2849}
2850#endif /* __i386__ */