Bug Summary

File:obj/../vm/vm_fault.c
Location:line 557, column 6
Description:Access to field 'task' results in a dereference of a null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm_fault.c
31 * Author: Avadis Tevanian, Jr., Michael Wayne Young
32 *
33 * Page fault handling module.
34 */
35
36#include <kern/printf.h>
37#include <vm/vm_fault.h>
38#include <mach/kern_return.h>
39#include <mach/message.h> /* for error codes */
40#include <kern/counters.h>
41#include <kern/debug.h>
42#include <kern/thread.h>
43#include <kern/sched_prim.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <vm/vm_page.h>
47#include <vm/pmap.h>
48#include <mach/vm_statistics.h>
49#include <vm/vm_pageout.h>
50#include <mach/vm_param.h>
51#include <mach/memory_object.h>
52#include <vm/memory_object_user.user.h>
53 /* For memory_object_data_{request,unlock} */
54#include <kern/macro_help.h>
55#include <kern/slab.h>
56
57#if MACH_PCSAMPLE1
58#include <kern/pc_sample.h>
59#endif
60
61
62
63/*
64 * State needed by vm_fault_continue.
65 * This is a little hefty to drop directly
66 * into the thread structure.
67 */
68typedef struct vm_fault_state {
69 struct vm_map *vmf_map;
70 vm_offset_t vmf_vaddr;
71 vm_prot_t vmf_fault_type;
72 boolean_t vmf_change_wiring;
73 void (*vmf_continuation)();
74 vm_map_version_t vmf_version;
75 boolean_t vmf_wired;
76 struct vm_object *vmf_object;
77 vm_offset_t vmf_offset;
78 vm_prot_t vmf_prot;
79
80 boolean_t vmfp_backoff;
81 struct vm_object *vmfp_object;
82 vm_offset_t vmfp_offset;
83 struct vm_page *vmfp_first_m;
84 vm_prot_t vmfp_access;
85} vm_fault_state_t;
86
87struct kmem_cache vm_fault_state_cache;
88
89int vm_object_absent_max = 50;
90
91int vm_fault_debug = 0;
92
93boolean_t vm_fault_dirty_handling = FALSE((boolean_t) 0);
94boolean_t vm_fault_interruptible = TRUE((boolean_t) 1);
95
96boolean_t software_reference_bits = TRUE((boolean_t) 1);
97
98#if MACH_KDB0
99extern struct db_watchpoint *db_watchpoint_list;
100#endif /* MACH_KDB */
101
102/*
103 * Routine: vm_fault_init
104 * Purpose:
105 * Initialize our private data structures.
106 */
107void vm_fault_init(void)
108{
109 kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
110 sizeof(vm_fault_state_t), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
111}
112
113/*
114 * Routine: vm_fault_cleanup
115 * Purpose:
116 * Clean up the result of vm_fault_page.
117 * Results:
118 * The paging reference for "object" is released.
119 * "object" is unlocked.
120 * If "top_page" is not null, "top_page" is
121 * freed and the paging reference for the object
122 * containing it is released.
123 *
124 * In/out conditions:
125 * "object" must be locked.
126 */
127void
128vm_fault_cleanup(object, top_page)
129 register vm_object_t object;
130 register vm_page_t top_page;
131{
132 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 132); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
133 vm_object_unlock(object);
134
135 if (top_page != VM_PAGE_NULL((vm_page_t) 0)) {
136 object = top_page->object;
137 vm_object_lock(object);
138 VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ; });
139 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 139); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
140 vm_object_unlock(object);
141 }
142}
143
144
145#if MACH_PCSAMPLE1
146/*
147 * Do PC sampling on current thread, assuming
148 * that it is the thread taking this page fault.
149 *
150 * Must check for THREAD_NULL, since faults
151 * can occur before threads are running.
152 */
153
154#define vm_stat_sample(flavor)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((flavor))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((flavor))); task = (_thread_)->
task; if (task->pc_sample.sampletypes & ((flavor))) take_pc_sample
((_thread_), &task->pc_sample, ((flavor))); }); })
\
155 MACRO_BEGIN({ \
156 thread_t _thread_ = current_thread()(active_threads[(0)]); \
157 \
158 if (_thread_ != THREAD_NULL((thread_t) 0)) \
159 take_pc_sample_macro(_thread_, (flavor))({ task_t task; if ((_thread_)->pc_sample.sampletypes &
((flavor))) take_pc_sample((_thread_), &(_thread_)->pc_sample
, ((flavor))); task = (_thread_)->task; if (task->pc_sample
.sampletypes & ((flavor))) take_pc_sample((_thread_), &
task->pc_sample, ((flavor))); })
; \
160 MACRO_END})
161
162#else
163#define vm_stat_sample(x)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((x))) take_pc_sample((_thread_), &(_thread_
)->pc_sample, ((x))); task = (_thread_)->task; if (task
->pc_sample.sampletypes & ((x))) take_pc_sample((_thread_
), &task->pc_sample, ((x))); }); })
164#endif /* MACH_PCSAMPLE */
165
166
167
168/*
169 * Routine: vm_fault_page
170 * Purpose:
171 * Find the resident page for the virtual memory
172 * specified by the given virtual memory object
173 * and offset.
174 * Additional arguments:
175 * The required permissions for the page is given
176 * in "fault_type". Desired permissions are included
177 * in "protection".
178 *
179 * If the desired page is known to be resident (for
180 * example, because it was previously wired down), asserting
181 * the "unwiring" parameter will speed the search.
182 *
183 * If the operation can be interrupted (by thread_abort
184 * or thread_terminate), then the "interruptible"
185 * parameter should be asserted.
186 *
187 * Results:
188 * The page containing the proper data is returned
189 * in "result_page".
190 *
191 * In/out conditions:
192 * The source object must be locked and referenced,
193 * and must donate one paging reference. The reference
194 * is not affected. The paging reference and lock are
195 * consumed.
196 *
197 * If the call succeeds, the object in which "result_page"
198 * resides is left locked and holding a paging reference.
199 * If this is not the original object, a busy page in the
200 * original object is returned in "top_page", to prevent other
201 * callers from pursuing this same data, along with a paging
202 * reference for the original object. The "top_page" should
203 * be destroyed when this guarantee is no longer required.
204 * The "result_page" is also left busy. It is not removed
205 * from the pageout queues.
206 */
207vm_fault_return_t vm_fault_page(first_object, first_offset,
208 fault_type, must_be_resident, interruptible,
209 protection,
210 result_page, top_page,
211 resume, continuation)
212 /* Arguments: */
213 vm_object_t first_object; /* Object to begin search */
214 vm_offset_t first_offset; /* Offset into object */
215 vm_prot_t fault_type; /* What access is requested */
216 boolean_t must_be_resident;/* Must page be resident? */
217 boolean_t interruptible; /* May fault be interrupted? */
218 /* Modifies in place: */
219 vm_prot_t *protection; /* Protection for mapping */
220 /* Returns: */
221 vm_page_t *result_page; /* Page found, if successful */
222 vm_page_t *top_page; /* Page in top object, if
223 * not result_page.
224 */
225 /* More arguments: */
226 boolean_t resume; /* We are restarting. */
227 void (*continuation)(); /* Continuation for blocking. */
228{
229 register
230 vm_page_t m;
231 register
232 vm_object_t object;
233 register
234 vm_offset_t offset;
235 vm_page_t first_m;
236 vm_object_t next_object;
237 vm_object_t copy_object;
238 boolean_t look_for_page;
239 vm_prot_t access_required;
240
241 if (resume) {
1
Assuming 'resume' is 0
2
Taking false branch
242 register vm_fault_state_t *state =
243 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
244
245 if (state->vmfp_backoff)
246 goto after_block_and_backoff;
247
248 object = state->vmfp_object;
249 offset = state->vmfp_offset;
250 first_m = state->vmfp_first_m;
251 access_required = state->vmfp_access;
252 goto after_thread_block;
253 }
254
255 vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x100))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x100))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x100))) take_pc_sample
((_thread_), &task->pc_sample, ((0x100))); }); })
;
256 vm_stat.faults++; /* needs lock XXX */
257 current_task()((active_threads[(0)])->task)->faults++;
258
259/*
260 * Recovery actions
261 */
262#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
\
263 MACRO_BEGIN({ \
264 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
265 vm_page_lock_queues(); \
266 if (!m->active && !m->inactive) \
267 vm_page_activate(m); \
268 vm_page_unlock_queues(); \
269 MACRO_END})
270
271 if (vm_fault_dirty_handling
3
Assuming 'vm_fault_dirty_handling' is 0
4
Taking false branch
272#if MACH_KDB0
273 /*
274 * If there are watchpoints set, then
275 * we don't want to give away write permission
276 * on a read fault. Make the task write fault,
277 * so that the watchpoint code notices the access.
278 */
279 || db_watchpoint_list
280#endif /* MACH_KDB */
281 ) {
282 /*
283 * If we aren't asking for write permission,
284 * then don't give it away. We're using write
285 * faults to set the dirty bit.
286 */
287 if (!(fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)))
288 *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
289 }
290
291 if (!vm_fault_interruptible)
5
Assuming 'vm_fault_interruptible' is not equal to 0
6
Taking false branch
292 interruptible = FALSE((boolean_t) 0);
293
294 /*
295 * INVARIANTS (through entire routine):
296 *
297 * 1) At all times, we must either have the object
298 * lock or a busy page in some object to prevent
299 * some other thread from trying to bring in
300 * the same page.
301 *
302 * Note that we cannot hold any locks during the
303 * pager access or when waiting for memory, so
304 * we use a busy page then.
305 *
306 * Note also that we aren't as concerned about more than
307 * one thread attempting to memory_object_data_unlock
308 * the same page at once, so we don't hold the page
309 * as busy then, but do record the highest unlock
310 * value so far. [Unlock requests may also be delivered
311 * out of order.]
312 *
313 * 2) To prevent another thread from racing us down the
314 * shadow chain and entering a new page in the top
315 * object before we do, we must keep a busy page in
316 * the top object while following the shadow chain.
317 *
318 * 3) We must increment paging_in_progress on any object
319 * for which we have a busy page, to prevent
320 * vm_object_collapse from removing the busy page
321 * without our noticing.
322 *
323 * 4) We leave busy pages on the pageout queues.
324 * If the pageout daemon comes across a busy page,
325 * it will remove the page from the pageout queues.
326 */
327
328 /*
329 * Search for the page at object/offset.
330 */
331
332 object = first_object;
333 offset = first_offset;
334 first_m = VM_PAGE_NULL((vm_page_t) 0);
335 access_required = fault_type;
336
337 /*
338 * See whether this page is resident
339 */
340
341 while (TRUE((boolean_t) 1)) {
7
Loop condition is true. Entering loop body
16
Loop condition is true. Entering loop body
23
Loop condition is true. Entering loop body
29
Loop condition is true. Entering loop body
342 m = vm_page_lookup(object, offset);
343 if (m != VM_PAGE_NULL((vm_page_t) 0)) {
8
Assuming 'm' is equal to null
9
Taking false branch
17
Assuming 'm' is equal to null
18
Taking false branch
24
Assuming 'm' is equal to null
25
Taking false branch
30
Assuming 'm' is not equal to null
31
Taking true branch
344 /*
345 * If the page is being brought in,
346 * wait for it and then retry.
347 *
348 * A possible optimization: if the page
349 * is known to be resident, we can ignore
350 * pages that are absent (regardless of
351 * whether they're busy).
352 */
353
354 if (m->busy) {
32
Taking false branch
355 kern_return_t wait_result;
356
357 PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (interruptible)); })
;
358 vm_object_unlock(object);
359 if (continuation != (void (*)()) 0) {
360 register vm_fault_state_t *state =
361 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
362
363 /*
364 * Save variables in case
365 * thread_block discards
366 * our kernel stack.
367 */
368
369 state->vmfp_backoff = FALSE((boolean_t) 0);
370 state->vmfp_object = object;
371 state->vmfp_offset = offset;
372 state->vmfp_first_m = first_m;
373 state->vmfp_access =
374 access_required;
375 state->vmf_prot = *protection;
376
377 counter(c_vm_fault_page_block_busy_user++);
378 thread_block(continuation);
379 } else
380 {
381 counter(c_vm_fault_page_block_busy_kernel++);
382 thread_block((void (*)()) 0);
383 }
384 after_thread_block:
385 wait_result = current_thread()(active_threads[(0)])->wait_result;
386 vm_object_lock(object);
387 if (wait_result != THREAD_AWAKENED0) {
388 vm_fault_cleanup(object, first_m);
389 if (wait_result == THREAD_RESTART3)
390 return(VM_FAULT_RETRY1);
391 else
392 return(VM_FAULT_INTERRUPTED2);
393 }
394 continue;
395 }
396
397 /*
398 * If the page is in error, give up now.
399 */
400
401 if (m->error) {
33
Taking false branch
402 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
403 vm_fault_cleanup(object, first_m);
404 return(VM_FAULT_MEMORY_ERROR5);
405 }
406
407 /*
408 * If the page isn't busy, but is absent,
409 * then it was deemed "unavailable".
410 */
411
412 if (m->absent) {
34
Taking false branch
413 /*
414 * Remove the non-existent page (unless it's
415 * in the top object) and move on down to the
416 * next object (if there is one).
417 */
418
419 offset += object->shadow_offset;
420 access_required = VM_PROT_READ((vm_prot_t) 0x01);
421 next_object = object->shadow;
422 if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) {
423 vm_page_t real_m;
424
425 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 425); })
;
426
427 /*
428 * Absent page at bottom of shadow
429 * chain; zero fill the page we left
430 * busy in the first object, and flush
431 * the absent page. But first we
432 * need to allocate a real page.
433 */
434
435 real_m = vm_page_grab(!object->internal);
436 if (real_m == VM_PAGE_NULL((vm_page_t) 0)) {
437 vm_fault_cleanup(object, first_m);
438 return(VM_FAULT_MEMORY_SHORTAGE3);
439 }
440
441 if (object != first_object) {
442 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
443 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 443); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
444 vm_object_unlock(object);
445 object = first_object;
446 offset = first_offset;
447 m = first_m;
448 first_m = VM_PAGE_NULL((vm_page_t) 0);
449 vm_object_lock(object);
450 }
451
452 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
453 assert(real_m->busy)({ if (!(real_m->busy)) Assert("real_m->busy", "../vm/vm_fault.c"
, 453); })
;
454 vm_page_lock_queues();
455 vm_page_insert(real_m, object, offset);
456 vm_page_unlock_queues();
457 m = real_m;
458
459 /*
460 * Drop the lock while zero filling
461 * page. Then break because this
462 * is the page we wanted. Checking
463 * the page lock is a waste of time;
464 * this page was either absent or
465 * newly allocated -- in both cases
466 * it can't be page locked by a pager.
467 */
468 vm_object_unlock(object);
469
470 vm_page_zero_fill(m);
471
472 vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x10))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x10))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample
((_thread_), &task->pc_sample, ((0x10))); }); })
;
473
474 vm_stat.zero_fill_count++;
475 current_task()((active_threads[(0)])->task)->zero_fills++;
476 vm_object_lock(object);
477 pmap_clear_modify(m->phys_addr);
478 break;
479 } else {
480 if (must_be_resident) {
481 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 481); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
482 } else if (object != first_object) {
483 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 483); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
484 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
485 } else {
486 first_m = m;
487 m->absent = FALSE((boolean_t) 0);
488 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
489 m->busy = TRUE((boolean_t) 1);
490
491 vm_page_lock_queues();
492 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { register queue_entry_t next, prev; next
= (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { register
queue_entry_t next, prev; next = (m)->pageq.next; prev = (
m)->pageq.prev; if ((&vm_page_queue_inactive) == next)
(&vm_page_queue_inactive)->prev = prev; else ((vm_page_t
)next)->pageq.prev = prev; if ((&vm_page_queue_inactive
) == prev) (&vm_page_queue_inactive)->next = next; else
((vm_page_t)prev)->pageq.next = next; }; m->inactive =
((boolean_t) 0); vm_page_inactive_count--; } })
;
493 vm_page_unlock_queues();
494 }
495 vm_object_lock(next_object);
496 vm_object_unlock(object);
497 object = next_object;
498 vm_object_paging_begin(object)((object)->paging_in_progress++);
499 continue;
500 }
501 }
502
503 /*
504 * If the desired access to this page has
505 * been locked out, request that it be unlocked.
506 */
507
508 if (access_required & m->page_lock) {
35
Taking false branch
509 if ((access_required & m->unlock_request) != access_required) {
510 vm_prot_t new_unlock_request;
511 kern_return_t rc;
512
513 if (!object->pager_ready) {
514 vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
515 VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
516 interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
;
517 goto block_and_backoff;
518 }
519
520 new_unlock_request = m->unlock_request =
521 (access_required | m->unlock_request);
522 vm_object_unlock(object);
523 if ((rc = memory_object_data_unlock(
524 object->pager,
525 object->pager_request,
526 offset + object->paging_offset,
527 PAGE_SIZE(1 << 12),
528 new_unlock_request))
529 != KERN_SUCCESS0) {
530 printf("vm_fault: memory_object_data_unlock failed\n");
531 vm_object_lock(object);
532 vm_fault_cleanup(object, first_m);
533 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
534 VM_FAULT_INTERRUPTED2 :
535 VM_FAULT_MEMORY_ERROR5);
536 }
537 vm_object_lock(object);
538 continue;
539 }
540
541 PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (interruptible)); })
;
542 goto block_and_backoff;
543 }
544
545 /*
546 * We mark the page busy and leave it on
547 * the pageout queues. If the pageout
548 * deamon comes across it, then it will
549 * remove the page.
550 */
551
552 if (!software_reference_bits) {
36
Assuming 'software_reference_bits' is 0
37
Taking true branch
553 vm_page_lock_queues();
554 if (m->inactive) {
38
Taking true branch
555 vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x20))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x20))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x20))) take_pc_sample
((_thread_), &task->pc_sample, ((0x20))); }); })
;
39
Within the expansion of the macro 'vm_stat_sample':
a
Assuming '_thread_' is equal to null
556 vm_stat.reactivations++;
557 current_task()((active_threads[(0)])->task)->reactivations++;
40
Within the expansion of the macro 'current_task':
a
Access to field 'task' results in a dereference of a null pointer
558 }
559
560 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { register queue_entry_t next, prev; next
= (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { register
queue_entry_t next, prev; next = (m)->pageq.next; prev = (
m)->pageq.prev; if ((&vm_page_queue_inactive) == next)
(&vm_page_queue_inactive)->prev = prev; else ((vm_page_t
)next)->pageq.prev = prev; if ((&vm_page_queue_inactive
) == prev) (&vm_page_queue_inactive)->next = next; else
((vm_page_t)prev)->pageq.next = next; }; m->inactive =
((boolean_t) 0); vm_page_inactive_count--; } })
;
561 vm_page_unlock_queues();
562 }
563
564 assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c"
, 564); })
;
565 m->busy = TRUE((boolean_t) 1);
566 assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c"
, 566); })
;
567 break;
568 }
569
570 look_for_page =
571 (object->pager_created)
572#if MACH_PAGEMAP1
573 && (vm_external_state_get(object->existence_info, offset + object->paging_offset)(((object->existence_info) != ((vm_external_t) 0)) ? _vm_external_state_get
(object->existence_info, offset + object->paging_offset
) : 2)
!=
574 VM_EXTERNAL_STATE_ABSENT3)
575#endif /* MACH_PAGEMAP */
576 ;
577
578 if ((look_for_page || (object == first_object))
11
Taking false branch
579 && !must_be_resident) {
10
Assuming 'must_be_resident' is not equal to 0
580 /*
581 * Allocate a new page for this object/offset
582 * pair.
583 */
584
585 m = vm_page_grab_fictitious();
586 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
587 vm_fault_cleanup(object, first_m);
588 return(VM_FAULT_FICTITIOUS_SHORTAGE4);
589 }
590
591 vm_page_lock_queues();
592 vm_page_insert(m, object, offset);
593 vm_page_unlock_queues();
594 }
595
596 if (look_for_page && !must_be_resident) {
597 kern_return_t rc;
598
599 /*
600 * If the memory manager is not ready, we
601 * cannot make requests.
602 */
603 if (!object->pager_ready) {
604 vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
605 VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
606 interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
;
607 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
608 goto block_and_backoff;
609 }
610
611 if (object->internal) {
612 /*
613 * Requests to the default pager
614 * must reserve a real page in advance,
615 * because the pager's data-provided
616 * won't block for pages.
617 */
618
619 if (m->fictitious && !vm_page_convert(m, FALSE((boolean_t) 0))) {
620 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
621 vm_fault_cleanup(object, first_m);
622 return(VM_FAULT_MEMORY_SHORTAGE3);
623 }
624 } else if (object->absent_count >
625 vm_object_absent_max) {
626 /*
627 * If there are too many outstanding page
628 * requests pending on this object, we
629 * wait for them to be resolved now.
630 */
631
632 vm_object_absent_assert_wait(object, interruptible)({ ({ ((object))->all_wanted |= 1 << (3); assert_wait
((event_t)(((vm_offset_t) (object)) + (3)), ((interruptible))
); }); })
;
633 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
634 goto block_and_backoff;
635 }
636
637 /*
638 * Indicate that the page is waiting for data
639 * from the memory manager.
640 */
641
642 m->absent = TRUE((boolean_t) 1);
643 object->absent_count++;
644
645 /*
646 * We have a busy page, so we can
647 * release the object lock.
648 */
649 vm_object_unlock(object);
650
651 /*
652 * Call the memory manager to retrieve the data.
653 */
654
655 vm_stat.pageins++;
656 vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x40))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x40))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x40))) take_pc_sample
((_thread_), &task->pc_sample, ((0x40))); }); })
;
657 current_task()((active_threads[(0)])->task)->pageins++;
658
659 if ((rc = memory_object_data_request(object->pager,
660 object->pager_request,
661 m->offset + object->paging_offset,
662 PAGE_SIZE(1 << 12), access_required)) != KERN_SUCCESS0) {
663 if (rc != MACH_SEND_INTERRUPTED0x10000007)
664 printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
665 "memory_object_data_request",
666 object->pager,
667 object->pager_request,
668 m->offset + object->paging_offset,
669 PAGE_SIZE(1 << 12), access_required, rc);
670 /*
671 * Don't want to leave a busy page around,
672 * but the data request may have blocked,
673 * so check if it's still there and busy.
674 */
675 vm_object_lock(object);
676 if (m == vm_page_lookup(object,offset) &&
677 m->absent && m->busy)
678 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
679 vm_fault_cleanup(object, first_m);
680 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
681 VM_FAULT_INTERRUPTED2 :
682 VM_FAULT_MEMORY_ERROR5);
683 }
684
685 /*
686 * Retry with same object/offset, since new data may
687 * be in a different page (i.e., m is meaningless at
688 * this point).
689 */
690 vm_object_lock(object);
691 continue;
692 }
693
694 /*
695 * For the XP system, the only case in which we get here is if
696 * object has no pager (or unwiring). If the pager doesn't
697 * have the page this is handled in the m->absent case above
698 * (and if you change things here you should look above).
699 */
700 if (object == first_object)
12
Taking true branch
19
Taking false branch
26
Taking false branch
701 first_m = m;
702 else
703 {
704 assert(m == VM_PAGE_NULL)({ if (!(m == ((vm_page_t) 0))) Assert("m == VM_PAGE_NULL", "../vm/vm_fault.c"
, 704); })
;
705 }
706
707 /*
708 * Move on to the next object. Lock the next
709 * object before unlocking the current one.
710 */
711 access_required = VM_PROT_READ((vm_prot_t) 0x01);
712
713 offset += object->shadow_offset;
714 next_object = object->shadow;
715 if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) {
13
Assuming 'next_object' is not equal to null
14
Taking false branch
20
Assuming 'next_object' is not equal to null
21
Taking false branch
27
Assuming 'next_object' is not equal to null
28
Taking false branch
716 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 716); })
;
717
718 /*
719 * If there's no object left, fill the page
720 * in the top object with zeros. But first we
721 * need to allocate a real page.
722 */
723
724 if (object != first_object) {
725 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 725); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
726 vm_object_unlock(object);
727
728 object = first_object;
729 offset = first_offset;
730 vm_object_lock(object);
731 }
732
733 m = first_m;
734 assert(m->object == object)({ if (!(m->object == object)) Assert("m->object == object"
, "../vm/vm_fault.c", 734); })
;
735 first_m = VM_PAGE_NULL((vm_page_t) 0);
736
737 if (m->fictitious && !vm_page_convert(m, !object->internal)) {
738 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
739 vm_fault_cleanup(object, VM_PAGE_NULL((vm_page_t) 0));
740 return(VM_FAULT_MEMORY_SHORTAGE3);
741 }
742
743 vm_object_unlock(object);
744 vm_page_zero_fill(m);
745 vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x10))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x10))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample
((_thread_), &task->pc_sample, ((0x10))); }); })
;
746 vm_stat.zero_fill_count++;
747 current_task()((active_threads[(0)])->task)->zero_fills++;
748 vm_object_lock(object);
749 pmap_clear_modify(m->phys_addr);
750 break;
751 }
752 else {
753 vm_object_lock(next_object);
754 if ((object != first_object) || must_be_resident)
15
Taking true branch
22
Taking true branch
755 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 755); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
756 vm_object_unlock(object);
757 object = next_object;
758 vm_object_paging_begin(object)((object)->paging_in_progress++);
759 }
760 }
761
762 /*
763 * PAGE HAS BEEN FOUND.
764 *
765 * This page (m) is:
766 * busy, so that we can play with it;
767 * not absent, so that nobody else will fill it;
768 * possibly eligible for pageout;
769 *
770 * The top-level page (first_m) is:
771 * VM_PAGE_NULL if the page was found in the
772 * top-level object;
773 * busy, not absent, and ineligible for pageout.
774 *
775 * The current object (object) is locked. A paging
776 * reference is held for the current and top-level
777 * objects.
778 */
779
780#if EXTRA_ASSERTIONS
781 assert(m->busy && !m->absent)({ if (!(m->busy && !m->absent)) Assert("m->busy && !m->absent"
, "../vm/vm_fault.c", 781); })
;
782 assert((first_m == VM_PAGE_NULL) ||({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 784); })
783 (first_m->busy && !first_m->absent &&({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 784); })
784 !first_m->active && !first_m->inactive))({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 784); })
;
785#endif /* EXTRA_ASSERTIONS */
786
787 /*
788 * If the page is being written, but isn't
789 * already owned by the top-level object,
790 * we have to copy it into a new page owned
791 * by the top-level object.
792 */
793
794 if (object != first_object) {
795 /*
796 * We only really need to copy if we
797 * want to write it.
798 */
799
800 if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) {
801 vm_page_t copy_m;
802
803 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 803); })
;
804
805 /*
806 * If we try to collapse first_object at this
807 * point, we may deadlock when we try to get
808 * the lock on an intermediate object (since we
809 * have the bottom object locked). We can't
810 * unlock the bottom object, because the page
811 * we found may move (by collapse) if we do.
812 *
813 * Instead, we first copy the page. Then, when
814 * we have no more use for the bottom object,
815 * we unlock it and try to collapse.
816 *
817 * Note that we copy the page even if we didn't
818 * need to... that's the breaks.
819 */
820
821 /*
822 * Allocate a page for the copy
823 */
824 copy_m = vm_page_grab(!first_object->internal);
825 if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) {
826 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
827 vm_fault_cleanup(object, first_m);
828 return(VM_FAULT_MEMORY_SHORTAGE3);
829 }
830
831 vm_object_unlock(object);
832 vm_page_copy(m, copy_m);
833 vm_object_lock(object);
834
835 /*
836 * If another map is truly sharing this
837 * page with us, we have to flush all
838 * uses of the original page, since we
839 * can't distinguish those which want the
840 * original from those which need the
841 * new copy.
842 *
843 * XXXO If we know that only one map has
844 * access to this page, then we could
845 * avoid the pmap_page_protect() call.
846 */
847
848 vm_page_lock_queues();
849 vm_page_deactivate(m);
850 pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00));
851 vm_page_unlock_queues();
852
853 /*
854 * We no longer need the old page or object.
855 */
856
857 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
858 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 858); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
859 vm_object_unlock(object);
860
861 vm_stat.cow_faults++;
862 vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x80))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x80))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x80))) take_pc_sample
((_thread_), &task->pc_sample, ((0x80))); }); })
;
863 current_task()((active_threads[(0)])->task)->cow_faults++;
864 object = first_object;
865 offset = first_offset;
866
867 vm_object_lock(object);
868 VM_PAGE_FREE(first_m)({ ; vm_page_free(first_m); ; });
869 first_m = VM_PAGE_NULL((vm_page_t) 0);
870 assert(copy_m->busy)({ if (!(copy_m->busy)) Assert("copy_m->busy", "../vm/vm_fault.c"
, 870); })
;
871 vm_page_lock_queues();
872 vm_page_insert(copy_m, object, offset);
873 vm_page_unlock_queues();
874 m = copy_m;
875
876 /*
877 * Now that we've gotten the copy out of the
878 * way, let's try to collapse the top object.
879 * But we have to play ugly games with
880 * paging_in_progress to do that...
881 */
882
883 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 883); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
884 vm_object_collapse(object);
885 vm_object_paging_begin(object)((object)->paging_in_progress++);
886 }
887 else {
888 *protection &= (~VM_PROT_WRITE((vm_prot_t) 0x02));
889 }
890 }
891
892 /*
893 * Now check whether the page needs to be pushed into the
894 * copy object. The use of asymmetric copy on write for
895 * shared temporary objects means that we may do two copies to
896 * satisfy the fault; one above to get the page from a
897 * shadowed object, and one here to push it into the copy.
898 */
899
900 while ((copy_object = first_object->copy) != VM_OBJECT_NULL((vm_object_t) 0)) {
901 vm_offset_t copy_offset;
902 vm_page_t copy_m;
903
904 /*
905 * If the page is being written, but hasn't been
906 * copied to the copy-object, we have to copy it there.
907 */
908
909 if ((fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0) {
910 *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
911 break;
912 }
913
914 /*
915 * If the page was guaranteed to be resident,
916 * we must have already performed the copy.
917 */
918
919 if (must_be_resident)
920 break;
921
922 /*
923 * Try to get the lock on the copy_object.
924 */
925 if (!vm_object_lock_try(copy_object)(((boolean_t) 1))) {
926 vm_object_unlock(object);
927
928 simple_lock_pause(); /* wait a bit */
929
930 vm_object_lock(object);
931 continue;
932 }
933
934 /*
935 * Make another reference to the copy-object,
936 * to keep it from disappearing during the
937 * copy.
938 */
939 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 939); })
;
940 copy_object->ref_count++;
941
942 /*
943 * Does the page exist in the copy?
944 */
945 copy_offset = first_offset - copy_object->shadow_offset;
946 copy_m = vm_page_lookup(copy_object, copy_offset);
947 if (copy_m != VM_PAGE_NULL((vm_page_t) 0)) {
948 if (copy_m->busy) {
949 /*
950 * If the page is being brought
951 * in, wait for it and then retry.
952 */
953 PAGE_ASSERT_WAIT(copy_m, interruptible)({ (copy_m)->wanted = ((boolean_t) 1); assert_wait((event_t
) (copy_m), (interruptible)); })
;
954 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
955 copy_object->ref_count--;
956 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 956); })
;
957 vm_object_unlock(copy_object);
958 goto block_and_backoff;
959 }
960 }
961 else {
962 /*
963 * Allocate a page for the copy
964 */
965 copy_m = vm_page_alloc(copy_object, copy_offset);
966 if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) {
967 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
968 copy_object->ref_count--;
969 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 969); })
;
970 vm_object_unlock(copy_object);
971 vm_fault_cleanup(object, first_m);
972 return(VM_FAULT_MEMORY_SHORTAGE3);
973 }
974
975 /*
976 * Must copy page into copy-object.
977 */
978
979 vm_page_copy(m, copy_m);
980
981 /*
982 * If the old page was in use by any users
983 * of the copy-object, it must be removed
984 * from all pmaps. (We can't know which
985 * pmaps use it.)
986 */
987
988 vm_page_lock_queues();
989 pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00));
990 copy_m->dirty = TRUE((boolean_t) 1);
991 vm_page_unlock_queues();
992
993 /*
994 * If there's a pager, then immediately
995 * page out this page, using the "initialize"
996 * option. Else, we use the copy.
997 */
998
999 if (!copy_object->pager_created) {
1000 vm_page_lock_queues();
1001 vm_page_activate(copy_m);
1002 vm_page_unlock_queues();
1003 PAGE_WAKEUP_DONE(copy_m)({ (copy_m)->busy = ((boolean_t) 0); if ((copy_m)->wanted
) { (copy_m)->wanted = ((boolean_t) 0); thread_wakeup_prim
((((event_t) copy_m)), ((boolean_t) 0), 0); } })
;
1004 } else {
1005 /*
1006 * The page is already ready for pageout:
1007 * not on pageout queues and busy.
1008 * Unlock everything except the
1009 * copy_object itself.
1010 */
1011
1012 vm_object_unlock(object);
1013
1014 /*
1015 * Write the page to the copy-object,
1016 * flushing it from the kernel.
1017 */
1018
1019 vm_pageout_page(copy_m, TRUE((boolean_t) 1), TRUE((boolean_t) 1));
1020
1021 /*
1022 * Since the pageout may have
1023 * temporarily dropped the
1024 * copy_object's lock, we
1025 * check whether we'll have
1026 * to deallocate the hard way.
1027 */
1028
1029 if ((copy_object->shadow != object) ||
1030 (copy_object->ref_count == 1)) {
1031 vm_object_unlock(copy_object);
1032 vm_object_deallocate(copy_object);
1033 vm_object_lock(object);
1034 continue;
1035 }
1036
1037 /*
1038 * Pick back up the old object's
1039 * lock. [It is safe to do so,
1040 * since it must be deeper in the
1041 * object tree.]
1042 */
1043
1044 vm_object_lock(object);
1045 }
1046
1047 /*
1048 * Because we're pushing a page upward
1049 * in the object tree, we must restart
1050 * any faults that are waiting here.
1051 * [Note that this is an expansion of
1052 * PAGE_WAKEUP that uses the THREAD_RESTART
1053 * wait result]. Can't turn off the page's
1054 * busy bit because we're not done with it.
1055 */
1056
1057 if (m->wanted) {
1058 m->wanted = FALSE((boolean_t) 0);
1059 thread_wakeup_with_result((event_t) m,thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3))
1060 THREAD_RESTART)thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3));
1061 }
1062 }
1063
1064 /*
1065 * The reference count on copy_object must be
1066 * at least 2: one for our extra reference,
1067 * and at least one from the outside world
1068 * (we checked that when we last locked
1069 * copy_object).
1070 */
1071 copy_object->ref_count--;
1072 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 1072); })
;
1073 vm_object_unlock(copy_object);
1074
1075 break;
1076 }
1077
1078 *result_page = m;
1079 *top_page = first_m;
1080
1081 /*
1082 * If the page can be written, assume that it will be.
1083 * [Earlier, we restrict the permission to allow write
1084 * access only if the fault so required, so we don't
1085 * mark read-only data as dirty.]
1086 */
1087
1088 if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE((vm_prot_t) 0x02)))
1089 m->dirty = TRUE((boolean_t) 1);
1090
1091 return(VM_FAULT_SUCCESS0);
1092
1093 block_and_backoff:
1094 vm_fault_cleanup(object, first_m);
1095
1096 if (continuation != (void (*)()) 0) {
1097 register vm_fault_state_t *state =
1098 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1099
1100 /*
1101 * Save variables in case we must restart.
1102 */
1103
1104 state->vmfp_backoff = TRUE((boolean_t) 1);
1105 state->vmf_prot = *protection;
1106
1107 counter(c_vm_fault_page_block_backoff_user++);
1108 thread_block(continuation);
1109 } else
1110 {
1111 counter(c_vm_fault_page_block_backoff_kernel++);
1112 thread_block((void (*)()) 0);
1113 }
1114 after_block_and_backoff:
1115 if (current_thread()(active_threads[(0)])->wait_result == THREAD_AWAKENED0)
1116 return VM_FAULT_RETRY1;
1117 else
1118 return VM_FAULT_INTERRUPTED2;
1119
1120#undef RELEASE_PAGE
1121}
1122
1123/*
1124 * Routine: vm_fault
1125 * Purpose:
1126 * Handle page faults, including pseudo-faults
1127 * used to change the wiring status of pages.
1128 * Returns:
1129 * If an explicit (expression) continuation is supplied,
1130 * then we call the continuation instead of returning.
1131 * Implementation:
1132 * Explicit continuations make this a little icky,
1133 * because it hasn't been rewritten to embrace CPS.
1134 * Instead, we have resume arguments for vm_fault and
1135 * vm_fault_page, to let continue the fault computation.
1136 *
1137 * vm_fault and vm_fault_page save mucho state
1138 * in the moral equivalent of a closure. The state
1139 * structure is allocated when first entering vm_fault
1140 * and deallocated when leaving vm_fault.
1141 */
1142
1143void
1144vm_fault_continue()
1145{
1146 register vm_fault_state_t *state =
1147 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1148
1149 (void) vm_fault(state->vmf_map,
1150 state->vmf_vaddr,
1151 state->vmf_fault_type,
1152 state->vmf_change_wiring,
1153 TRUE((boolean_t) 1), state->vmf_continuation);
1154 /*NOTREACHED*/
1155}
1156
1157kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
1158 resume, continuation)
1159 vm_map_t map;
1160 vm_offset_t vaddr;
1161 vm_prot_t fault_type;
1162 boolean_t change_wiring;
1163 boolean_t resume;
1164 void (*continuation)();
1165{
1166 vm_map_version_t version; /* Map version for verificiation */
1167 boolean_t wired; /* Should mapping be wired down? */
1168 vm_object_t object; /* Top-level object */
1169 vm_offset_t offset; /* Top-level offset */
1170 vm_prot_t prot; /* Protection for mapping */
1171 vm_object_t old_copy_object; /* Saved copy object */
1172 vm_page_t result_page; /* Result of vm_fault_page */
1173 vm_page_t top_page; /* Placeholder page */
1174 kern_return_t kr;
1175
1176 register
1177 vm_page_t m; /* Fast access to result_page */
1178
1179 if (resume) {
1180 register vm_fault_state_t *state =
1181 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1182
1183 /*
1184 * Retrieve cached variables and
1185 * continue vm_fault_page.
1186 */
1187
1188 object = state->vmf_object;
1189 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1190 goto RetryFault;
1191 version = state->vmf_version;
1192 wired = state->vmf_wired;
1193 offset = state->vmf_offset;
1194 prot = state->vmf_prot;
1195
1196 kr = vm_fault_page(object, offset, fault_type,
1197 (change_wiring && !wired), !change_wiring,
1198 &prot, &result_page, &top_page,
1199 TRUE((boolean_t) 1), vm_fault_continue);
1200 goto after_vm_fault_page;
1201 }
1202
1203 if (continuation != (void (*)()) 0) {
1204 /*
1205 * We will probably need to save state.
1206 */
1207
1208 char * state;
1209
1210 /*
1211 * if this assignment stmt is written as
1212 * 'active_threads[cpu_number()] = kmem_cache_alloc()',
1213 * cpu_number may be evaluated before kmem_cache_alloc;
1214 * if kmem_cache_alloc blocks, cpu_number will be wrong
1215 */
1216
1217 state = (char *) kmem_cache_alloc(&vm_fault_state_cache);
1218 current_thread()(active_threads[(0)])->ith_othersaved.other = state;
1219
1220 }
1221
1222 RetryFault: ;
1223
1224 /*
1225 * Find the backing store object and offset into
1226 * it to begin the search.
1227 */
1228
1229 if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version,
1230 &object, &offset,
1231 &prot, &wired)) != KERN_SUCCESS0) {
1232 goto done;
1233 }
1234
1235 /*
1236 * If the page is wired, we must fault for the current protection
1237 * value, to avoid further faults.
1238 */
1239
1240 if (wired)
1241 fault_type = prot;
1242
1243 /*
1244 * Make a reference to this object to
1245 * prevent its disposal while we are messing with
1246 * it. Once we have the reference, the map is free
1247 * to be diddled. Since objects reference their
1248 * shadows (and copies), they will stay around as well.
1249 */
1250
1251 assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0"
, "../vm/vm_fault.c", 1251); })
;
1252 object->ref_count++;
1253 vm_object_paging_begin(object)((object)->paging_in_progress++);
1254
1255 if (continuation != (void (*)()) 0) {
1256 register vm_fault_state_t *state =
1257 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1258
1259 /*
1260 * Save variables, in case vm_fault_page discards
1261 * our kernel stack and we have to restart.
1262 */
1263
1264 state->vmf_map = map;
1265 state->vmf_vaddr = vaddr;
1266 state->vmf_fault_type = fault_type;
1267 state->vmf_change_wiring = change_wiring;
1268 state->vmf_continuation = continuation;
1269
1270 state->vmf_version = version;
1271 state->vmf_wired = wired;
1272 state->vmf_object = object;
1273 state->vmf_offset = offset;
1274 state->vmf_prot = prot;
1275
1276 kr = vm_fault_page(object, offset, fault_type,
1277 (change_wiring && !wired), !change_wiring,
1278 &prot, &result_page, &top_page,
1279 FALSE((boolean_t) 0), vm_fault_continue);
1280 } else
1281 {
1282 kr = vm_fault_page(object, offset, fault_type,
1283 (change_wiring && !wired), !change_wiring,
1284 &prot, &result_page, &top_page,
1285 FALSE((boolean_t) 0), (void (*)()) 0);
1286 }
1287 after_vm_fault_page:
1288
1289 /*
1290 * If we didn't succeed, lose the object reference immediately.
1291 */
1292
1293 if (kr != VM_FAULT_SUCCESS0)
1294 vm_object_deallocate(object);
1295
1296 /*
1297 * See why we failed, and take corrective action.
1298 */
1299
1300 switch (kr) {
1301 case VM_FAULT_SUCCESS0:
1302 break;
1303 case VM_FAULT_RETRY1:
1304 goto RetryFault;
1305 case VM_FAULT_INTERRUPTED2:
1306 kr = KERN_SUCCESS0;
1307 goto done;
1308 case VM_FAULT_MEMORY_SHORTAGE3:
1309 if (continuation != (void (*)()) 0) {
1310 register vm_fault_state_t *state =
1311 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1312
1313 /*
1314 * Save variables in case VM_PAGE_WAIT
1315 * discards our kernel stack.
1316 */
1317
1318 state->vmf_map = map;
1319 state->vmf_vaddr = vaddr;
1320 state->vmf_fault_type = fault_type;
1321 state->vmf_change_wiring = change_wiring;
1322 state->vmf_continuation = continuation;
1323 state->vmf_object = VM_OBJECT_NULL((vm_object_t) 0);
1324
1325 VM_PAGE_WAIT(vm_fault_continue)vm_page_wait(vm_fault_continue);
1326 } else
1327 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1328 goto RetryFault;
1329 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1330 vm_page_more_fictitious();
1331 goto RetryFault;
1332 case VM_FAULT_MEMORY_ERROR5:
1333 kr = KERN_MEMORY_ERROR10;
1334 goto done;
1335 }
1336
1337 m = result_page;
1338
1339 assert((change_wiring && !wired) ?({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1341); })
1340 (top_page == VM_PAGE_NULL) :({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1341); })
1341 ((top_page == VM_PAGE_NULL) == (m->object == object)))({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1341); })
;
1342
1343 /*
1344 * How to clean up the result of vm_fault_page. This
1345 * happens whether the mapping is entered or not.
1346 */
1347
1348#define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
\
1349 MACRO_BEGIN({ \
1350 vm_fault_cleanup(m->object, top_page); \
1351 vm_object_deallocate(object); \
1352 MACRO_END})
1353
1354 /*
1355 * What to do with the resulting page from vm_fault_page
1356 * if it doesn't get entered into the physical map:
1357 */
1358
1359#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
\
1360 MACRO_BEGIN({ \
1361 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
1362 vm_page_lock_queues(); \
1363 if (!m->active && !m->inactive) \
1364 vm_page_activate(m); \
1365 vm_page_unlock_queues(); \
1366 MACRO_END})
1367
1368 /*
1369 * We must verify that the maps have not changed
1370 * since our last lookup.
1371 */
1372
1373 old_copy_object = m->object->copy;
1374
1375 vm_object_unlock(m->object);
1376 while (!vm_map_verify(map, &version)) {
1377 vm_object_t retry_object;
1378 vm_offset_t retry_offset;
1379 vm_prot_t retry_prot;
1380
1381 /*
1382 * To avoid trying to write_lock the map while another
1383 * thread has it read_locked (in vm_map_pageable), we
1384 * do not try for write permission. If the page is
1385 * still writable, we will get write permission. If it
1386 * is not, or has been marked needs_copy, we enter the
1387 * mapping without write permission, and will merely
1388 * take another fault.
1389 */
1390 kr = vm_map_lookup(&map, vaddr,
1391 fault_type & ~VM_PROT_WRITE((vm_prot_t) 0x02), &version,
1392 &retry_object, &retry_offset, &retry_prot,
1393 &wired);
1394
1395 if (kr != KERN_SUCCESS0) {
1396 vm_object_lock(m->object);
1397 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1398 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1399 goto done;
1400 }
1401
1402 vm_object_unlock(retry_object);
1403 vm_object_lock(m->object);
1404
1405 if ((retry_object != object) ||
1406 (retry_offset != offset)) {
1407 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1408 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1409 goto RetryFault;
1410 }
1411
1412 /*
1413 * Check whether the protection has changed or the object
1414 * has been copied while we left the map unlocked.
1415 */
1416 prot &= retry_prot;
1417 vm_object_unlock(m->object);
1418 }
1419 vm_object_lock(m->object);
1420
1421 /*
1422 * If the copy object changed while the top-level object
1423 * was unlocked, then we must take away write permission.
1424 */
1425
1426 if (m->object->copy != old_copy_object)
1427 prot &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
1428
1429 /*
1430 * If we want to wire down this page, but no longer have
1431 * adequate permissions, we must start all over.
1432 */
1433
1434 if (wired && (prot != fault_type)) {
1435 vm_map_verify_done(map, &version)(lock_done(&(map)->lock));
1436 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1437 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1438 goto RetryFault;
1439 }
1440
1441 /*
1442 * It's critically important that a wired-down page be faulted
1443 * only once in each map for which it is wired.
1444 */
1445
1446 vm_object_unlock(m->object);
1447
1448 /*
1449 * Put this page into the physical map.
1450 * We had to do the unlock above because pmap_enter
1451 * may cause other faults. The page may be on
1452 * the pageout queues. If the pageout daemon comes
1453 * across the page, it will remove it from the queues.
1454 */
1455
1456 PMAP_ENTER(map->pmap, vaddr, m, prot, wired)({ pmap_enter( (map->pmap), (vaddr), (m)->phys_addr, (prot
) & ~(m)->page_lock, (wired) ); })
;
1457
1458 /*
1459 * If the page is not wired down and isn't already
1460 * on a pageout queue, then put it where the
1461 * pageout daemon can find it.
1462 */
1463 vm_object_lock(m->object);
1464 vm_page_lock_queues();
1465 if (change_wiring) {
1466 if (wired)
1467 vm_page_wire(m);
1468 else
1469 vm_page_unwire(m);
1470 } else if (software_reference_bits) {
1471 if (!m->active && !m->inactive)
1472 vm_page_activate(m);
1473 m->reference = TRUE((boolean_t) 1);
1474 } else {
1475 vm_page_activate(m);
1476 }
1477 vm_page_unlock_queues();
1478
1479 /*
1480 * Unlock everything, and return
1481 */
1482
1483 vm_map_verify_done(map, &version)(lock_done(&(map)->lock));
1484 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1485 kr = KERN_SUCCESS0;
1486 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1487
1488#undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
1489#undef RELEASE_PAGE
1490
1491 done:
1492 if (continuation != (void (*)()) 0) {
1493 register vm_fault_state_t *state =
1494 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1495
1496 kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
1497 (*continuation)(kr);
1498 /*NOTREACHED*/
1499 }
1500
1501 return(kr);
1502}
1503
1504kern_return_t vm_fault_wire_fast();
1505
1506/*
1507 * vm_fault_wire:
1508 *
1509 * Wire down a range of virtual addresses in a map.
1510 */
1511void vm_fault_wire(map, entry)
1512 vm_map_t map;
1513 vm_map_entry_t entry;
1514{
1515
1516 register vm_offset_t va;
1517 register pmap_t pmap;
1518 register vm_offset_t end_addr = entry->vme_endlinks.end;
1519
1520 pmap = vm_map_pmap(map)((map)->pmap);
1521
1522 /*
1523 * Inform the physical mapping system that the
1524 * range of addresses may not fault, so that
1525 * page tables and such can be locked down as well.
1526 */
1527
1528 pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, FALSE((boolean_t) 0));
1529
1530 /*
1531 * We simulate a fault to get the page and enter it
1532 * in the physical map.
1533 */
1534
1535 for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) {
1536 if (vm_fault_wire_fast(map, va, entry) != KERN_SUCCESS0)
1537 (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1538 FALSE((boolean_t) 0), (void (*)()) 0);
1539 }
1540}
1541
1542/*
1543 * vm_fault_unwire:
1544 *
1545 * Unwire a range of virtual addresses in a map.
1546 */
1547void vm_fault_unwire(map, entry)
1548 vm_map_t map;
1549 vm_map_entry_t entry;
1550{
1551 register vm_offset_t va;
1552 register pmap_t pmap;
1553 register vm_offset_t end_addr = entry->vme_endlinks.end;
1554 vm_object_t object;
1555
1556 pmap = vm_map_pmap(map)((map)->pmap);
1557
1558 object = (entry->is_sub_map)
1559 ? VM_OBJECT_NULL((vm_object_t) 0) : entry->object.vm_object;
1560
1561 /*
1562 * Since the pages are wired down, we must be able to
1563 * get their mappings from the physical map system.
1564 */
1565
1566 for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) {
1567 pmap_change_wiring(pmap, va, FALSE((boolean_t) 0));
1568
1569 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
1570 vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock);
1571 (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1572 FALSE((boolean_t) 0), (void (*)()) 0);
1573 vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock);
1574 } else {
1575 vm_prot_t prot;
1576 vm_page_t result_page;
1577 vm_page_t top_page;
1578 vm_fault_return_t result;
1579
1580 do {
1581 prot = VM_PROT_NONE((vm_prot_t) 0x00);
1582
1583 vm_object_lock(object);
1584 vm_object_paging_begin(object)((object)->paging_in_progress++);
1585 result = vm_fault_page(object,
1586 entry->offset +
1587 (va - entry->vme_startlinks.start),
1588 VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1589 FALSE((boolean_t) 0), &prot,
1590 &result_page,
1591 &top_page,
1592 FALSE((boolean_t) 0), (void (*)()) 0);
1593 } while (result == VM_FAULT_RETRY1);
1594
1595 if (result != VM_FAULT_SUCCESS0)
1596 panic("vm_fault_unwire: failure");
1597
1598 vm_page_lock_queues();
1599 vm_page_unwire(result_page);
1600 vm_page_unlock_queues();
1601 PAGE_WAKEUP_DONE(result_page)({ (result_page)->busy = ((boolean_t) 0); if ((result_page
)->wanted) { (result_page)->wanted = ((boolean_t) 0); thread_wakeup_prim
((((event_t) result_page)), ((boolean_t) 0), 0); } })
;
1602
1603 vm_fault_cleanup(result_page->object, top_page);
1604 }
1605 }
1606
1607 /*
1608 * Inform the physical mapping system that the range
1609 * of addresses may fault, so that page tables and
1610 * such may be unwired themselves.
1611 */
1612
1613 pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, TRUE((boolean_t) 1));
1614}
1615
1616/*
1617 * vm_fault_wire_fast:
1618 *
1619 * Handle common case of a wire down page fault at the given address.
1620 * If successful, the page is inserted into the associated physical map.
1621 * The map entry is passed in to avoid the overhead of a map lookup.
1622 *
1623 * NOTE: the given address should be truncated to the
1624 * proper page address.
1625 *
1626 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
1627 * a standard error specifying why the fault is fatal is returned.
1628 *
1629 * The map in question must be referenced, and remains so.
1630 * Caller has a read lock on the map.
1631 *
1632 * This is a stripped version of vm_fault() for wiring pages. Anything
1633 * other than the common case will return KERN_FAILURE, and the caller
1634 * is expected to call vm_fault().
1635 */
1636kern_return_t vm_fault_wire_fast(map, va, entry)
1637 vm_map_t map;
1638 vm_offset_t va;
1639 vm_map_entry_t entry;
1640{
1641 vm_object_t object;
1642 vm_offset_t offset;
1643 register vm_page_t m;
1644 vm_prot_t prot;
1645
1646 vm_stat.faults++; /* needs lock XXX */
1647 current_task()((active_threads[(0)])->task)->faults++;
1648/*
1649 * Recovery actions
1650 */
1651
1652#undef RELEASE_PAGE
1653#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
{ \
1654 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
1655 vm_page_lock_queues(); \
1656 vm_page_unwire(m); \
1657 vm_page_unlock_queues(); \
1658}
1659
1660
1661#undef UNLOCK_THINGS{ object->paging_in_progress--; ; }
1662#define UNLOCK_THINGS{ object->paging_in_progress--; ; } { \
1663 object->paging_in_progress--; \
1664 vm_object_unlock(object); \
1665}
1666
1667#undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
1668#define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
{ \
1669 UNLOCK_THINGS{ object->paging_in_progress--; ; }; \
1670 vm_object_deallocate(object); \
1671}
1672/*
1673 * Give up and have caller do things the hard way.
1674 */
1675
1676#define GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
{ \
1677 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
; \
1678 return(KERN_FAILURE5); \
1679}
1680
1681
1682 /*
1683 * If this entry is not directly to a vm_object, bail out.
1684 */
1685 if (entry->is_sub_map)
1686 return(KERN_FAILURE5);
1687
1688 /*
1689 * Find the backing store object and offset into it.
1690 */
1691
1692 object = entry->object.vm_object;
1693 offset = (va - entry->vme_startlinks.start) + entry->offset;
1694 prot = entry->protection;
1695
1696 /*
1697 * Make a reference to this object to prevent its
1698 * disposal while we are messing with it.
1699 */
1700
1701 vm_object_lock(object);
1702 assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0"
, "../vm/vm_fault.c", 1702); })
;
1703 object->ref_count++;
1704 object->paging_in_progress++;
1705
1706 /*
1707 * INVARIANTS (through entire routine):
1708 *
1709 * 1) At all times, we must either have the object
1710 * lock or a busy page in some object to prevent
1711 * some other thread from trying to bring in
1712 * the same page.
1713 *
1714 * 2) Once we have a busy page, we must remove it from
1715 * the pageout queues, so that the pageout daemon
1716 * will not grab it away.
1717 *
1718 */
1719
1720 /*
1721 * Look for page in top-level object. If it's not there or
1722 * there's something going on, give up.
1723 */
1724 m = vm_page_lookup(object, offset);
1725 if ((m == VM_PAGE_NULL((vm_page_t) 0)) || (m->error) ||
1726 (m->busy) || (m->absent) || (prot & m->page_lock)) {
1727 GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
;
1728 }
1729
1730 /*
1731 * Wire the page down now. All bail outs beyond this
1732 * point must unwire the page.
1733 */
1734
1735 vm_page_lock_queues();
1736 vm_page_wire(m);
1737 vm_page_unlock_queues();
1738
1739 /*
1740 * Mark page busy for other threads.
1741 */
1742 assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c"
, 1742); })
;
1743 m->busy = TRUE((boolean_t) 1);
1744 assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c"
, 1744); })
;
1745
1746 /*
1747 * Give up if the page is being written and there's a copy object
1748 */
1749 if ((object->copy != VM_OBJECT_NULL((vm_object_t) 0)) && (prot & VM_PROT_WRITE((vm_prot_t) 0x02))) {
1750 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1751 GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
;
1752 }
1753
1754 /*
1755 * Put this page into the physical map.
1756 * We have to unlock the object because pmap_enter
1757 * may cause other faults.
1758 */
1759 vm_object_unlock(object);
1760
1761 PMAP_ENTER(map->pmap, va, m, prot, TRUE)({ pmap_enter( (map->pmap), (va), (m)->phys_addr, (prot
) & ~(m)->page_lock, (((boolean_t) 1)) ); })
;
1762
1763 /*
1764 * Must relock object so that paging_in_progress can be cleared.
1765 */
1766 vm_object_lock(object);
1767
1768 /*
1769 * Unlock everything, and return
1770 */
1771
1772 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1773 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1774
1775 return(KERN_SUCCESS0);
1776
1777}
1778
1779/*
1780 * Routine: vm_fault_copy_cleanup
1781 * Purpose:
1782 * Release a page used by vm_fault_copy.
1783 */
1784
1785void vm_fault_copy_cleanup(page, top_page)
1786 vm_page_t page;
1787 vm_page_t top_page;
1788{
1789 vm_object_t object = page->object;
1790
1791 vm_object_lock(object);
1792 PAGE_WAKEUP_DONE(page)({ (page)->busy = ((boolean_t) 0); if ((page)->wanted) {
(page)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) page)), ((boolean_t) 0), 0); } })
;
1793 vm_page_lock_queues();
1794 if (!page->active && !page->inactive)
1795 vm_page_activate(page);
1796 vm_page_unlock_queues();
1797 vm_fault_cleanup(object, top_page);
1798}
1799
1800/*
1801 * Routine: vm_fault_copy
1802 *
1803 * Purpose:
1804 * Copy pages from one virtual memory object to another --
1805 * neither the source nor destination pages need be resident.
1806 *
1807 * Before actually copying a page, the version associated with
1808 * the destination address map wil be verified.
1809 *
1810 * In/out conditions:
1811 * The caller must hold a reference, but not a lock, to
1812 * each of the source and destination objects and to the
1813 * destination map.
1814 *
1815 * Results:
1816 * Returns KERN_SUCCESS if no errors were encountered in
1817 * reading or writing the data. Returns KERN_INTERRUPTED if
1818 * the operation was interrupted (only possible if the
1819 * "interruptible" argument is asserted). Other return values
1820 * indicate a permanent error in copying the data.
1821 *
1822 * The actual amount of data copied will be returned in the
1823 * "copy_size" argument. In the event that the destination map
1824 * verification failed, this amount may be less than the amount
1825 * requested.
1826 */
1827kern_return_t vm_fault_copy(
1828 src_object,
1829 src_offset,
1830 src_size,
1831 dst_object,
1832 dst_offset,
1833 dst_map,
1834 dst_version,
1835 interruptible
1836 )
1837 vm_object_t src_object;
1838 vm_offset_t src_offset;
1839 vm_size_t *src_size; /* INOUT */
1840 vm_object_t dst_object;
1841 vm_offset_t dst_offset;
1842 vm_map_t dst_map;
1843 vm_map_version_t *dst_version;
1844 boolean_t interruptible;
1845{
1846 vm_page_t result_page;
1847 vm_prot_t prot;
1848
1849 vm_page_t src_page;
1850 vm_page_t src_top_page;
1851
1852 vm_page_t dst_page;
1853 vm_page_t dst_top_page;
1854
1855 vm_size_t amount_done;
1856 vm_object_t old_copy_object;
1857
1858#define RETURN(x) \
1859 MACRO_BEGIN({ \
1860 *src_size = amount_done; \
1861 MACRO_RETURNif (((boolean_t) 1)) return(x); \
1862 MACRO_END})
1863
1864 amount_done = 0;
1865 do { /* while (amount_done != *src_size) */
1866
1867 RetrySourceFault: ;
1868
1869 if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) {
1870 /*
1871 * No source object. We will just
1872 * zero-fill the page in dst_object.
1873 */
1874
1875 src_page = VM_PAGE_NULL((vm_page_t) 0);
1876 } else {
1877 prot = VM_PROT_READ((vm_prot_t) 0x01);
1878
1879 vm_object_lock(src_object);
1880 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
1881
1882 switch (vm_fault_page(src_object, src_offset,
1883 VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), interruptible,
1884 &prot, &result_page, &src_top_page,
1885 FALSE((boolean_t) 0), (void (*)()) 0)) {
1886
1887 case VM_FAULT_SUCCESS0:
1888 break;
1889 case VM_FAULT_RETRY1:
1890 goto RetrySourceFault;
1891 case VM_FAULT_INTERRUPTED2:
1892 RETURN(MACH_SEND_INTERRUPTED0x10000007);
1893 case VM_FAULT_MEMORY_SHORTAGE3:
1894 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1895 goto RetrySourceFault;
1896 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1897 vm_page_more_fictitious();
1898 goto RetrySourceFault;
1899 case VM_FAULT_MEMORY_ERROR5:
1900 return(KERN_MEMORY_ERROR10);
1901 }
1902
1903 src_page = result_page;
1904
1905 assert((src_top_page == VM_PAGE_NULL) ==({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object
== src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)"
, "../vm/vm_fault.c", 1906); })
1906 (src_page->object == src_object))({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object
== src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)"
, "../vm/vm_fault.c", 1906); })
;
1907
1908 assert ((prot & VM_PROT_READ) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x01)) != ((vm_prot_t) 0x00
))) Assert("(prot & VM_PROT_READ) != VM_PROT_NONE", "../vm/vm_fault.c"
, 1908); })
;
1909
1910 vm_object_unlock(src_page->object);
1911 }
1912
1913 RetryDestinationFault: ;
1914
1915 prot = VM_PROT_WRITE((vm_prot_t) 0x02);
1916
1917 vm_object_lock(dst_object);
1918 vm_object_paging_begin(dst_object)((dst_object)->paging_in_progress++);
1919
1920 switch (vm_fault_page(dst_object, dst_offset, VM_PROT_WRITE((vm_prot_t) 0x02),
1921 FALSE((boolean_t) 0), FALSE((boolean_t) 0) /* interruptible */,
1922 &prot, &result_page, &dst_top_page,
1923 FALSE((boolean_t) 0), (void (*)()) 0)) {
1924
1925 case VM_FAULT_SUCCESS0:
1926 break;
1927 case VM_FAULT_RETRY1:
1928 goto RetryDestinationFault;
1929 case VM_FAULT_INTERRUPTED2:
1930 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1931 vm_fault_copy_cleanup(src_page,
1932 src_top_page);
1933 RETURN(MACH_SEND_INTERRUPTED0x10000007);
1934 case VM_FAULT_MEMORY_SHORTAGE3:
1935 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1936 goto RetryDestinationFault;
1937 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1938 vm_page_more_fictitious();
1939 goto RetryDestinationFault;
1940 case VM_FAULT_MEMORY_ERROR5:
1941 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1942 vm_fault_copy_cleanup(src_page,
1943 src_top_page);
1944 return(KERN_MEMORY_ERROR10);
1945 }
1946 assert ((prot & VM_PROT_WRITE) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x02)) != ((vm_prot_t) 0x00
))) Assert("(prot & VM_PROT_WRITE) != VM_PROT_NONE", "../vm/vm_fault.c"
, 1946); })
;
1947
1948 dst_page = result_page;
1949
1950 old_copy_object = dst_page->object->copy;
1951
1952 vm_object_unlock(dst_page->object);
1953
1954 if (!vm_map_verify(dst_map, dst_version)) {
1955
1956 BailOut: ;
1957
1958 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1959 vm_fault_copy_cleanup(src_page, src_top_page);
1960 vm_fault_copy_cleanup(dst_page, dst_top_page);
1961 break;
1962 }
1963
1964
1965 vm_object_lock(dst_page->object);
1966 if (dst_page->object->copy != old_copy_object) {
1967 vm_object_unlock(dst_page->object);
1968 vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock));
1969 goto BailOut;
1970 }
1971 vm_object_unlock(dst_page->object);
1972
1973 /*
1974 * Copy the page, and note that it is dirty
1975 * immediately.
1976 */
1977
1978 if (src_page == VM_PAGE_NULL((vm_page_t) 0))
1979 vm_page_zero_fill(dst_page);
1980 else
1981 vm_page_copy(src_page, dst_page);
1982 dst_page->dirty = TRUE((boolean_t) 1);
1983
1984 /*
1985 * Unlock everything, and return
1986 */
1987
1988 vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock));
1989
1990 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1991 vm_fault_copy_cleanup(src_page, src_top_page);
1992 vm_fault_copy_cleanup(dst_page, dst_top_page);
1993
1994 amount_done += PAGE_SIZE(1 << 12);
1995 src_offset += PAGE_SIZE(1 << 12);
1996 dst_offset += PAGE_SIZE(1 << 12);
1997
1998 } while (amount_done != *src_size);
1999
2000 RETURN(KERN_SUCCESS0);
2001#undef RETURN
2002
2003 /*NOTREACHED*/
2004}
2005
2006
2007
2008
2009
2010#ifdef notdef
2011
2012/*
2013 * Routine: vm_fault_page_overwrite
2014 *
2015 * Description:
2016 * A form of vm_fault_page that assumes that the
2017 * resulting page will be overwritten in its entirety,
2018 * making it unnecessary to obtain the correct *contents*
2019 * of the page.
2020 *
2021 * Implementation:
2022 * XXX Untested. Also unused. Eventually, this technology
2023 * could be used in vm_fault_copy() to advantage.
2024 */
2025vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
2026 register
2027 vm_object_t dst_object;
2028 vm_offset_t dst_offset;
2029 vm_page_t *result_page; /* OUT */
2030{
2031 register
2032 vm_page_t dst_page;
2033
2034#define interruptible FALSE((boolean_t) 0) /* XXX */
2035
2036 while (TRUE((boolean_t) 1)) {
2037 /*
2038 * Look for a page at this offset
2039 */
2040
2041 while ((dst_page = vm_page_lookup(dst_object, dst_offset))
2042 == VM_PAGE_NULL((vm_page_t) 0)) {
2043 /*
2044 * No page, no problem... just allocate one.
2045 */
2046
2047 dst_page = vm_page_alloc(dst_object, dst_offset);
2048 if (dst_page == VM_PAGE_NULL((vm_page_t) 0)) {
2049 vm_object_unlock(dst_object);
2050 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
2051 vm_object_lock(dst_object);
2052 continue;
2053 }
2054
2055 /*
2056 * Pretend that the memory manager
2057 * write-protected the page.
2058 *
2059 * Note that we will be asking for write
2060 * permission without asking for the data
2061 * first.
2062 */
2063
2064 dst_page->overwriting = TRUE((boolean_t) 1);
2065 dst_page->page_lock = VM_PROT_WRITE((vm_prot_t) 0x02);
2066 dst_page->absent = TRUE((boolean_t) 1);
2067 dst_object->absent_count++;
2068
2069 break;
2070
2071 /*
2072 * When we bail out, we might have to throw
2073 * away the page created here.
2074 */
2075
2076#define DISCARD_PAGE \
2077 MACRO_BEGIN({ \
2078 vm_object_lock(dst_object); \
2079 dst_page = vm_page_lookup(dst_object, dst_offset); \
2080 if ((dst_page != VM_PAGE_NULL((vm_page_t) 0)) && dst_page->overwriting) \
2081 VM_PAGE_FREE(dst_page)({ ; vm_page_free(dst_page); ; }); \
2082 vm_object_unlock(dst_object); \
2083 MACRO_END})
2084 }
2085
2086 /*
2087 * If the page is write-protected...
2088 */
2089
2090 if (dst_page->page_lock & VM_PROT_WRITE((vm_prot_t) 0x02)) {
2091 /*
2092 * ... and an unlock request hasn't been sent
2093 */
2094
2095 if ( ! (dst_page->unlock_request & VM_PROT_WRITE((vm_prot_t) 0x02))) {
2096 vm_prot_t u;
2097 kern_return_t rc;
2098
2099 /*
2100 * ... then send one now.
2101 */
2102
2103 if (!dst_object->pager_ready) {
2104 vm_object_assert_wait(dst_object,({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
2105 VM_OBJECT_EVENT_PAGER_READY,({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
2106 interruptible)({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
;
2107 vm_object_unlock(dst_object);
2108 thread_block((void (*)()) 0);
2109 if (current_thread()(active_threads[(0)])->wait_result !=
2110 THREAD_AWAKENED0) {
2111 DISCARD_PAGE;
2112 return(VM_FAULT_INTERRUPTED2);
2113 }
2114 continue;
2115 }
2116
2117 u = dst_page->unlock_request |= VM_PROT_WRITE((vm_prot_t) 0x02);
2118 vm_object_unlock(dst_object);
2119
2120 if ((rc = memory_object_data_unlock(
2121 dst_object->pager,
2122 dst_object->pager_request,
2123 dst_offset + dst_object->paging_offset,
2124 PAGE_SIZE(1 << 12),
2125 u)) != KERN_SUCCESS0) {
2126 printf("vm_object_overwrite: memory_object_data_unlock failed\n");
2127 DISCARD_PAGE;
2128 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
2129 VM_FAULT_INTERRUPTED2 :
2130 VM_FAULT_MEMORY_ERROR5);
2131 }
2132 vm_object_lock(dst_object);
2133 continue;
2134 }
2135
2136 /* ... fall through to wait below */
2137 } else {
2138 /*
2139 * If the page isn't being used for other
2140 * purposes, then we're done.
2141 */
2142 if ( ! (dst_page->busy || dst_page->absent || dst_page->error) )
2143 break;
2144 }
2145
2146 PAGE_ASSERT_WAIT(dst_page, interruptible)({ (dst_page)->wanted = ((boolean_t) 1); assert_wait((event_t
) (dst_page), (interruptible)); })
;
2147 vm_object_unlock(dst_object);
2148 thread_block((void (*)()) 0);
2149 if (current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) {
2150 DISCARD_PAGE;
2151 return(VM_FAULT_INTERRUPTED2);
2152 }
2153 }
2154
2155 *result_page = dst_page;
2156 return(VM_FAULT_SUCCESS0);
2157
2158#undef interruptible
2159#undef DISCARD_PAGE
2160}
2161
2162#endif /* notdef */