Bug Summary

File:obj-scan-build/../vm/vm_fault.c
Location:line 552, column 6
Description:Access to field 'task' results in a dereference of a null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm_fault.c
31 * Author: Avadis Tevanian, Jr., Michael Wayne Young
32 *
33 * Page fault handling module.
34 */
35
36#include <kern/printf.h>
37#include <vm/vm_fault.h>
38#include <mach/kern_return.h>
39#include <mach/message.h> /* for error codes */
40#include <kern/counters.h>
41#include <kern/debug.h>
42#include <kern/thread.h>
43#include <kern/sched_prim.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <vm/vm_page.h>
47#include <vm/pmap.h>
48#include <mach/vm_statistics.h>
49#include <vm/vm_pageout.h>
50#include <mach/vm_param.h>
51#include <mach/memory_object.h>
52#include <vm/memory_object_user.user.h>
53 /* For memory_object_data_{request,unlock} */
54#include <kern/macro_help.h>
55#include <kern/slab.h>
56
57#if MACH_PCSAMPLE1
58#include <kern/pc_sample.h>
59#endif
60
61
62
63/*
64 * State needed by vm_fault_continue.
65 * This is a little hefty to drop directly
66 * into the thread structure.
67 */
68typedef struct vm_fault_state {
69 struct vm_map *vmf_map;
70 vm_offset_t vmf_vaddr;
71 vm_prot_t vmf_fault_type;
72 boolean_t vmf_change_wiring;
73 void (*vmf_continuation)();
74 vm_map_version_t vmf_version;
75 boolean_t vmf_wired;
76 struct vm_object *vmf_object;
77 vm_offset_t vmf_offset;
78 vm_prot_t vmf_prot;
79
80 boolean_t vmfp_backoff;
81 struct vm_object *vmfp_object;
82 vm_offset_t vmfp_offset;
83 struct vm_page *vmfp_first_m;
84 vm_prot_t vmfp_access;
85} vm_fault_state_t;
86
87struct kmem_cache vm_fault_state_cache;
88
89int vm_object_absent_max = 50;
90
91boolean_t vm_fault_dirty_handling = FALSE((boolean_t) 0);
92boolean_t vm_fault_interruptible = TRUE((boolean_t) 1);
93
94boolean_t software_reference_bits = TRUE((boolean_t) 1);
95
96#if MACH_KDB0
97extern struct db_watchpoint *db_watchpoint_list;
98#endif /* MACH_KDB */
99
100/*
101 * Routine: vm_fault_init
102 * Purpose:
103 * Initialize our private data structures.
104 */
105void vm_fault_init(void)
106{
107 kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
108 sizeof(vm_fault_state_t), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
109}
110
111/*
112 * Routine: vm_fault_cleanup
113 * Purpose:
114 * Clean up the result of vm_fault_page.
115 * Results:
116 * The paging reference for "object" is released.
117 * "object" is unlocked.
118 * If "top_page" is not null, "top_page" is
119 * freed and the paging reference for the object
120 * containing it is released.
121 *
122 * In/out conditions:
123 * "object" must be locked.
124 */
125void
126vm_fault_cleanup(object, top_page)
127 vm_object_t object;
128 vm_page_t top_page;
129{
130 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 130); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
131 vm_object_unlock(object);
132
133 if (top_page != VM_PAGE_NULL((vm_page_t) 0)) {
134 object = top_page->object;
135 vm_object_lock(object);
136 VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ; });
137 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 137); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
138 vm_object_unlock(object);
139 }
140}
141
142
143#if MACH_PCSAMPLE1
144/*
145 * Do PC sampling on current thread, assuming
146 * that it is the thread taking this page fault.
147 *
148 * Must check for THREAD_NULL, since faults
149 * can occur before threads are running.
150 */
151
152#define vm_stat_sample(flavor)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((flavor))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((flavor))); task = (_thread_)->
task; if (task->pc_sample.sampletypes & ((flavor))) take_pc_sample
((_thread_), &task->pc_sample, ((flavor))); }); })
\
153 MACRO_BEGIN({ \
154 thread_t _thread_ = current_thread()(active_threads[(0)]); \
155 \
156 if (_thread_ != THREAD_NULL((thread_t) 0)) \
157 take_pc_sample_macro(_thread_, (flavor))({ task_t task; if ((_thread_)->pc_sample.sampletypes &
((flavor))) take_pc_sample((_thread_), &(_thread_)->pc_sample
, ((flavor))); task = (_thread_)->task; if (task->pc_sample
.sampletypes & ((flavor))) take_pc_sample((_thread_), &
task->pc_sample, ((flavor))); })
; \
158 MACRO_END})
159
160#else
161#define vm_stat_sample(x)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((x))) take_pc_sample((_thread_), &(_thread_
)->pc_sample, ((x))); task = (_thread_)->task; if (task
->pc_sample.sampletypes & ((x))) take_pc_sample((_thread_
), &task->pc_sample, ((x))); }); })
162#endif /* MACH_PCSAMPLE */
163
164
165
166/*
167 * Routine: vm_fault_page
168 * Purpose:
169 * Find the resident page for the virtual memory
170 * specified by the given virtual memory object
171 * and offset.
172 * Additional arguments:
173 * The required permissions for the page is given
174 * in "fault_type". Desired permissions are included
175 * in "protection".
176 *
177 * If the desired page is known to be resident (for
178 * example, because it was previously wired down), asserting
179 * the "unwiring" parameter will speed the search.
180 *
181 * If the operation can be interrupted (by thread_abort
182 * or thread_terminate), then the "interruptible"
183 * parameter should be asserted.
184 *
185 * Results:
186 * The page containing the proper data is returned
187 * in "result_page".
188 *
189 * In/out conditions:
190 * The source object must be locked and referenced,
191 * and must donate one paging reference. The reference
192 * is not affected. The paging reference and lock are
193 * consumed.
194 *
195 * If the call succeeds, the object in which "result_page"
196 * resides is left locked and holding a paging reference.
197 * If this is not the original object, a busy page in the
198 * original object is returned in "top_page", to prevent other
199 * callers from pursuing this same data, along with a paging
200 * reference for the original object. The "top_page" should
201 * be destroyed when this guarantee is no longer required.
202 * The "result_page" is also left busy. It is not removed
203 * from the pageout queues.
204 */
205vm_fault_return_t vm_fault_page(first_object, first_offset,
206 fault_type, must_be_resident, interruptible,
207 protection,
208 result_page, top_page,
209 resume, continuation)
210 /* Arguments: */
211 vm_object_t first_object; /* Object to begin search */
212 vm_offset_t first_offset; /* Offset into object */
213 vm_prot_t fault_type; /* What access is requested */
214 boolean_t must_be_resident;/* Must page be resident? */
215 boolean_t interruptible; /* May fault be interrupted? */
216 /* Modifies in place: */
217 vm_prot_t *protection; /* Protection for mapping */
218 /* Returns: */
219 vm_page_t *result_page; /* Page found, if successful */
220 vm_page_t *top_page; /* Page in top object, if
221 * not result_page.
222 */
223 /* More arguments: */
224 boolean_t resume; /* We are restarting. */
225 void (*continuation)(); /* Continuation for blocking. */
226{
227 vm_page_t m;
228 vm_object_t object;
229 vm_offset_t offset;
230 vm_page_t first_m;
231 vm_object_t next_object;
232 vm_object_t copy_object;
233 boolean_t look_for_page;
234 vm_prot_t access_required;
235
236 if (resume) {
1
Assuming 'resume' is 0
2
Taking false branch
237 vm_fault_state_t *state =
238 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
239
240 if (state->vmfp_backoff)
241 goto after_block_and_backoff;
242
243 object = state->vmfp_object;
244 offset = state->vmfp_offset;
245 first_m = state->vmfp_first_m;
246 access_required = state->vmfp_access;
247 goto after_thread_block;
248 }
249
250 vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x100))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x100))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x100))) take_pc_sample
((_thread_), &task->pc_sample, ((0x100))); }); })
;
251 vm_stat.faults++; /* needs lock XXX */
252 current_task()((active_threads[(0)])->task)->faults++;
253
254/*
255 * Recovery actions
256 */
257#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
\
258 MACRO_BEGIN({ \
259 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
260 vm_page_lock_queues(); \
261 if (!m->active && !m->inactive) \
262 vm_page_activate(m); \
263 vm_page_unlock_queues(); \
264 MACRO_END})
265
266 if (vm_fault_dirty_handling
3
Assuming 'vm_fault_dirty_handling' is 0
4
Taking false branch
267#if MACH_KDB0
268 /*
269 * If there are watchpoints set, then
270 * we don't want to give away write permission
271 * on a read fault. Make the task write fault,
272 * so that the watchpoint code notices the access.
273 */
274 || db_watchpoint_list
275#endif /* MACH_KDB */
276 ) {
277 /*
278 * If we aren't asking for write permission,
279 * then don't give it away. We're using write
280 * faults to set the dirty bit.
281 */
282 if (!(fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)))
283 *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
284 }
285
286 if (!vm_fault_interruptible)
5
Assuming 'vm_fault_interruptible' is not equal to 0
6
Taking false branch
287 interruptible = FALSE((boolean_t) 0);
288
289 /*
290 * INVARIANTS (through entire routine):
291 *
292 * 1) At all times, we must either have the object
293 * lock or a busy page in some object to prevent
294 * some other thread from trying to bring in
295 * the same page.
296 *
297 * Note that we cannot hold any locks during the
298 * pager access or when waiting for memory, so
299 * we use a busy page then.
300 *
301 * Note also that we aren't as concerned about more than
302 * one thread attempting to memory_object_data_unlock
303 * the same page at once, so we don't hold the page
304 * as busy then, but do record the highest unlock
305 * value so far. [Unlock requests may also be delivered
306 * out of order.]
307 *
308 * 2) To prevent another thread from racing us down the
309 * shadow chain and entering a new page in the top
310 * object before we do, we must keep a busy page in
311 * the top object while following the shadow chain.
312 *
313 * 3) We must increment paging_in_progress on any object
314 * for which we have a busy page, to prevent
315 * vm_object_collapse from removing the busy page
316 * without our noticing.
317 *
318 * 4) We leave busy pages on the pageout queues.
319 * If the pageout daemon comes across a busy page,
320 * it will remove the page from the pageout queues.
321 */
322
323 /*
324 * Search for the page at object/offset.
325 */
326
327 object = first_object;
328 offset = first_offset;
329 first_m = VM_PAGE_NULL((vm_page_t) 0);
330 access_required = fault_type;
331
332 /*
333 * See whether this page is resident
334 */
335
336 while (TRUE((boolean_t) 1)) {
7
Loop condition is true. Entering loop body
16
Loop condition is true. Entering loop body
23
Loop condition is true. Entering loop body
29
Loop condition is true. Entering loop body
337 m = vm_page_lookup(object, offset);
338 if (m != VM_PAGE_NULL((vm_page_t) 0)) {
8
Assuming 'm' is equal to null
9
Taking false branch
17
Assuming 'm' is equal to null
18
Taking false branch
24
Assuming 'm' is equal to null
25
Taking false branch
30
Assuming 'm' is not equal to null
31
Taking true branch
339 /*
340 * If the page is being brought in,
341 * wait for it and then retry.
342 *
343 * A possible optimization: if the page
344 * is known to be resident, we can ignore
345 * pages that are absent (regardless of
346 * whether they're busy).
347 */
348
349 if (m->busy) {
32
Taking false branch
350 kern_return_t wait_result;
351
352 PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (interruptible)); })
;
353 vm_object_unlock(object);
354 if (continuation != (void (*)()) 0) {
355 vm_fault_state_t *state =
356 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
357
358 /*
359 * Save variables in case
360 * thread_block discards
361 * our kernel stack.
362 */
363
364 state->vmfp_backoff = FALSE((boolean_t) 0);
365 state->vmfp_object = object;
366 state->vmfp_offset = offset;
367 state->vmfp_first_m = first_m;
368 state->vmfp_access =
369 access_required;
370 state->vmf_prot = *protection;
371
372 counter(c_vm_fault_page_block_busy_user++);
373 thread_block(continuation);
374 } else
375 {
376 counter(c_vm_fault_page_block_busy_kernel++);
377 thread_block((void (*)()) 0);
378 }
379 after_thread_block:
380 wait_result = current_thread()(active_threads[(0)])->wait_result;
381 vm_object_lock(object);
382 if (wait_result != THREAD_AWAKENED0) {
383 vm_fault_cleanup(object, first_m);
384 if (wait_result == THREAD_RESTART3)
385 return(VM_FAULT_RETRY1);
386 else
387 return(VM_FAULT_INTERRUPTED2);
388 }
389 continue;
390 }
391
392 /*
393 * If the page is in error, give up now.
394 */
395
396 if (m->error) {
33
Taking false branch
397 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
398 vm_fault_cleanup(object, first_m);
399 return(VM_FAULT_MEMORY_ERROR5);
400 }
401
402 /*
403 * If the page isn't busy, but is absent,
404 * then it was deemed "unavailable".
405 */
406
407 if (m->absent) {
34
Taking false branch
408 /*
409 * Remove the non-existent page (unless it's
410 * in the top object) and move on down to the
411 * next object (if there is one).
412 */
413
414 offset += object->shadow_offset;
415 access_required = VM_PROT_READ((vm_prot_t) 0x01);
416 next_object = object->shadow;
417 if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) {
418 vm_page_t real_m;
419
420 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 420); })
;
421
422 /*
423 * Absent page at bottom of shadow
424 * chain; zero fill the page we left
425 * busy in the first object, and flush
426 * the absent page. But first we
427 * need to allocate a real page.
428 */
429
430 real_m = vm_page_grab(!object->internal);
431 if (real_m == VM_PAGE_NULL((vm_page_t) 0)) {
432 vm_fault_cleanup(object, first_m);
433 return(VM_FAULT_MEMORY_SHORTAGE3);
434 }
435
436 if (object != first_object) {
437 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
438 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 438); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
439 vm_object_unlock(object);
440 object = first_object;
441 offset = first_offset;
442 m = first_m;
443 first_m = VM_PAGE_NULL((vm_page_t) 0);
444 vm_object_lock(object);
445 }
446
447 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
448 assert(real_m->busy)({ if (!(real_m->busy)) Assert("real_m->busy", "../vm/vm_fault.c"
, 448); })
;
449 vm_page_lock_queues();
450 vm_page_insert(real_m, object, offset);
451 vm_page_unlock_queues();
452 m = real_m;
453
454 /*
455 * Drop the lock while zero filling
456 * page. Then break because this
457 * is the page we wanted. Checking
458 * the page lock is a waste of time;
459 * this page was either absent or
460 * newly allocated -- in both cases
461 * it can't be page locked by a pager.
462 */
463 vm_object_unlock(object);
464
465 vm_page_zero_fill(m);
466
467 vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x10))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x10))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample
((_thread_), &task->pc_sample, ((0x10))); }); })
;
468
469 vm_stat.zero_fill_count++;
470 current_task()((active_threads[(0)])->task)->zero_fills++;
471 vm_object_lock(object);
472 pmap_clear_modify(m->phys_addr);
473 break;
474 } else {
475 if (must_be_resident) {
476 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 476); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
477 } else if (object != first_object) {
478 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 478); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
479 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
480 } else {
481 first_m = m;
482 m->absent = FALSE((boolean_t) 0);
483 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
484 m->busy = TRUE((boolean_t) 1);
485
486 vm_page_lock_queues();
487 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m)
->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t
next, prev; next = (m)->pageq.next; prev = (m)->pageq.
prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive
)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev
; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive
)->next = next; else ((vm_page_t)prev)->pageq.next = next
; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count
--; } })
;
488 vm_page_unlock_queues();
489 }
490 vm_object_lock(next_object);
491 vm_object_unlock(object);
492 object = next_object;
493 vm_object_paging_begin(object)((object)->paging_in_progress++);
494 continue;
495 }
496 }
497
498 /*
499 * If the desired access to this page has
500 * been locked out, request that it be unlocked.
501 */
502
503 if (access_required & m->page_lock) {
35
Taking false branch
504 if ((access_required & m->unlock_request) != access_required) {
505 vm_prot_t new_unlock_request;
506 kern_return_t rc;
507
508 if (!object->pager_ready) {
509 vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
510 VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
511 interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
;
512 goto block_and_backoff;
513 }
514
515 new_unlock_request = m->unlock_request =
516 (access_required | m->unlock_request);
517 vm_object_unlock(object);
518 if ((rc = memory_object_data_unlock(
519 object->pager,
520 object->pager_request,
521 offset + object->paging_offset,
522 PAGE_SIZE(1 << 12),
523 new_unlock_request))
524 != KERN_SUCCESS0) {
525 printf("vm_fault: memory_object_data_unlock failed\n");
526 vm_object_lock(object);
527 vm_fault_cleanup(object, first_m);
528 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
529 VM_FAULT_INTERRUPTED2 :
530 VM_FAULT_MEMORY_ERROR5);
531 }
532 vm_object_lock(object);
533 continue;
534 }
535
536 PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (interruptible)); })
;
537 goto block_and_backoff;
538 }
539
540 /*
541 * We mark the page busy and leave it on
542 * the pageout queues. If the pageout
543 * deamon comes across it, then it will
544 * remove the page.
545 */
546
547 if (!software_reference_bits) {
36
Assuming 'software_reference_bits' is 0
37
Taking true branch
548 vm_page_lock_queues();
549 if (m->inactive) {
38
Taking true branch
550 vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x20))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x20))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x20))) take_pc_sample
((_thread_), &task->pc_sample, ((0x20))); }); })
;
39
Within the expansion of the macro 'vm_stat_sample':
a
Assuming '_thread_' is equal to null
551 vm_stat.reactivations++;
552 current_task()((active_threads[(0)])->task)->reactivations++;
40
Within the expansion of the macro 'current_task':
a
Access to field 'task' results in a dereference of a null pointer
553 }
554
555 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m)
->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t
next, prev; next = (m)->pageq.next; prev = (m)->pageq.
prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive
)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev
; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive
)->next = next; else ((vm_page_t)prev)->pageq.next = next
; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count
--; } })
;
556 vm_page_unlock_queues();
557 }
558
559 assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c"
, 559); })
;
560 m->busy = TRUE((boolean_t) 1);
561 assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c"
, 561); })
;
562 break;
563 }
564
565 look_for_page =
566 (object->pager_created)
567#if MACH_PAGEMAP1
568 && (vm_external_state_get(object->existence_info, offset + object->paging_offset)(((object->existence_info) != ((vm_external_t) 0)) ? _vm_external_state_get
(object->existence_info, offset + object->paging_offset
) : 2)
!=
569 VM_EXTERNAL_STATE_ABSENT3)
570#endif /* MACH_PAGEMAP */
571 ;
572
573 if ((look_for_page || (object == first_object))
11
Taking false branch
574 && !must_be_resident) {
10
Assuming 'must_be_resident' is not equal to 0
575 /*
576 * Allocate a new page for this object/offset
577 * pair.
578 */
579
580 m = vm_page_grab_fictitious();
581 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
582 vm_fault_cleanup(object, first_m);
583 return(VM_FAULT_FICTITIOUS_SHORTAGE4);
584 }
585
586 vm_page_lock_queues();
587 vm_page_insert(m, object, offset);
588 vm_page_unlock_queues();
589 }
590
591 if (look_for_page && !must_be_resident) {
592 kern_return_t rc;
593
594 /*
595 * If the memory manager is not ready, we
596 * cannot make requests.
597 */
598 if (!object->pager_ready) {
599 vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
600 VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
601 interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t
)(((vm_offset_t) object) + (1)), (interruptible)); })
;
602 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
603 goto block_and_backoff;
604 }
605
606 if (object->internal) {
607 /*
608 * Requests to the default pager
609 * must reserve a real page in advance,
610 * because the pager's data-provided
611 * won't block for pages.
612 */
613
614 if (m->fictitious && !vm_page_convert(m, FALSE((boolean_t) 0))) {
615 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
616 vm_fault_cleanup(object, first_m);
617 return(VM_FAULT_MEMORY_SHORTAGE3);
618 }
619 } else if (object->absent_count >
620 vm_object_absent_max) {
621 /*
622 * If there are too many outstanding page
623 * requests pending on this object, we
624 * wait for them to be resolved now.
625 */
626
627 vm_object_absent_assert_wait(object, interruptible)({ ({ ((object))->all_wanted |= 1 << (3); assert_wait
((event_t)(((vm_offset_t) (object)) + (3)), ((interruptible))
); }); })
;
628 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
629 goto block_and_backoff;
630 }
631
632 /*
633 * Indicate that the page is waiting for data
634 * from the memory manager.
635 */
636
637 m->absent = TRUE((boolean_t) 1);
638 object->absent_count++;
639
640 /*
641 * We have a busy page, so we can
642 * release the object lock.
643 */
644 vm_object_unlock(object);
645
646 /*
647 * Call the memory manager to retrieve the data.
648 */
649
650 vm_stat.pageins++;
651 vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x40))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x40))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x40))) take_pc_sample
((_thread_), &task->pc_sample, ((0x40))); }); })
;
652 current_task()((active_threads[(0)])->task)->pageins++;
653
654 if ((rc = memory_object_data_request(object->pager,
655 object->pager_request,
656 m->offset + object->paging_offset,
657 PAGE_SIZE(1 << 12), access_required)) != KERN_SUCCESS0) {
658 if (rc != MACH_SEND_INTERRUPTED0x10000007)
659 printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n",
660 "memory_object_data_request",
661 object->pager,
662 object->pager_request,
663 m->offset + object->paging_offset,
664 PAGE_SIZE(1 << 12), access_required, rc);
665 /*
666 * Don't want to leave a busy page around,
667 * but the data request may have blocked,
668 * so check if it's still there and busy.
669 */
670 vm_object_lock(object);
671 if (m == vm_page_lookup(object,offset) &&
672 m->absent && m->busy)
673 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
674 vm_fault_cleanup(object, first_m);
675 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
676 VM_FAULT_INTERRUPTED2 :
677 VM_FAULT_MEMORY_ERROR5);
678 }
679
680 /*
681 * Retry with same object/offset, since new data may
682 * be in a different page (i.e., m is meaningless at
683 * this point).
684 */
685 vm_object_lock(object);
686 continue;
687 }
688
689 /*
690 * For the XP system, the only case in which we get here is if
691 * object has no pager (or unwiring). If the pager doesn't
692 * have the page this is handled in the m->absent case above
693 * (and if you change things here you should look above).
694 */
695 if (object == first_object)
12
Taking true branch
19
Taking false branch
26
Taking false branch
696 first_m = m;
697 else
698 {
699 assert(m == VM_PAGE_NULL)({ if (!(m == ((vm_page_t) 0))) Assert("m == VM_PAGE_NULL", "../vm/vm_fault.c"
, 699); })
;
700 }
701
702 /*
703 * Move on to the next object. Lock the next
704 * object before unlocking the current one.
705 */
706 access_required = VM_PROT_READ((vm_prot_t) 0x01);
707
708 offset += object->shadow_offset;
709 next_object = object->shadow;
710 if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) {
13
Assuming 'next_object' is not equal to null
14
Taking false branch
20
Assuming 'next_object' is not equal to null
21
Taking false branch
27
Assuming 'next_object' is not equal to null
28
Taking false branch
711 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 711); })
;
712
713 /*
714 * If there's no object left, fill the page
715 * in the top object with zeros. But first we
716 * need to allocate a real page.
717 */
718
719 if (object != first_object) {
720 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 720); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
721 vm_object_unlock(object);
722
723 object = first_object;
724 offset = first_offset;
725 vm_object_lock(object);
726 }
727
728 m = first_m;
729 assert(m->object == object)({ if (!(m->object == object)) Assert("m->object == object"
, "../vm/vm_fault.c", 729); })
;
730 first_m = VM_PAGE_NULL((vm_page_t) 0);
731
732 if (m->fictitious && !vm_page_convert(m, !object->internal)) {
733 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
734 vm_fault_cleanup(object, VM_PAGE_NULL((vm_page_t) 0));
735 return(VM_FAULT_MEMORY_SHORTAGE3);
736 }
737
738 vm_object_unlock(object);
739 vm_page_zero_fill(m);
740 vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x10))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x10))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample
((_thread_), &task->pc_sample, ((0x10))); }); })
;
741 vm_stat.zero_fill_count++;
742 current_task()((active_threads[(0)])->task)->zero_fills++;
743 vm_object_lock(object);
744 pmap_clear_modify(m->phys_addr);
745 break;
746 }
747 else {
748 vm_object_lock(next_object);
749 if ((object != first_object) || must_be_resident)
15
Taking true branch
22
Taking true branch
750 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 750); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
751 vm_object_unlock(object);
752 object = next_object;
753 vm_object_paging_begin(object)((object)->paging_in_progress++);
754 }
755 }
756
757 /*
758 * PAGE HAS BEEN FOUND.
759 *
760 * This page (m) is:
761 * busy, so that we can play with it;
762 * not absent, so that nobody else will fill it;
763 * possibly eligible for pageout;
764 *
765 * The top-level page (first_m) is:
766 * VM_PAGE_NULL if the page was found in the
767 * top-level object;
768 * busy, not absent, and ineligible for pageout.
769 *
770 * The current object (object) is locked. A paging
771 * reference is held for the current and top-level
772 * objects.
773 */
774
775#if EXTRA_ASSERTIONS
776 assert(m->busy && !m->absent)({ if (!(m->busy && !m->absent)) Assert("m->busy && !m->absent"
, "../vm/vm_fault.c", 776); })
;
777 assert((first_m == VM_PAGE_NULL) ||({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 779); })
778 (first_m->busy && !first_m->absent &&({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 779); })
779 !first_m->active && !first_m->inactive))({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy &&
!first_m->absent && !first_m->active &&
!first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)"
, "../vm/vm_fault.c", 779); })
;
780#endif /* EXTRA_ASSERTIONS */
781
782 /*
783 * If the page is being written, but isn't
784 * already owned by the top-level object,
785 * we have to copy it into a new page owned
786 * by the top-level object.
787 */
788
789 if (object != first_object) {
790 /*
791 * We only really need to copy if we
792 * want to write it.
793 */
794
795 if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) {
796 vm_page_t copy_m;
797
798 assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c"
, 798); })
;
799
800 /*
801 * If we try to collapse first_object at this
802 * point, we may deadlock when we try to get
803 * the lock on an intermediate object (since we
804 * have the bottom object locked). We can't
805 * unlock the bottom object, because the page
806 * we found may move (by collapse) if we do.
807 *
808 * Instead, we first copy the page. Then, when
809 * we have no more use for the bottom object,
810 * we unlock it and try to collapse.
811 *
812 * Note that we copy the page even if we didn't
813 * need to... that's the breaks.
814 */
815
816 /*
817 * Allocate a page for the copy
818 */
819 copy_m = vm_page_grab(!first_object->internal);
820 if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) {
821 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
822 vm_fault_cleanup(object, first_m);
823 return(VM_FAULT_MEMORY_SHORTAGE3);
824 }
825
826 vm_object_unlock(object);
827 vm_page_copy(m, copy_m);
828 vm_object_lock(object);
829
830 /*
831 * If another map is truly sharing this
832 * page with us, we have to flush all
833 * uses of the original page, since we
834 * can't distinguish those which want the
835 * original from those which need the
836 * new copy.
837 *
838 * XXXO If we know that only one map has
839 * access to this page, then we could
840 * avoid the pmap_page_protect() call.
841 */
842
843 vm_page_lock_queues();
844 vm_page_deactivate(m);
845 pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00));
846 vm_page_unlock_queues();
847
848 /*
849 * We no longer need the old page or object.
850 */
851
852 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
853 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 853); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
854 vm_object_unlock(object);
855
856 vm_stat.cow_faults++;
857 vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ !=
((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample
.sampletypes & ((0x80))) take_pc_sample((_thread_), &
(_thread_)->pc_sample, ((0x80))); task = (_thread_)->task
; if (task->pc_sample.sampletypes & ((0x80))) take_pc_sample
((_thread_), &task->pc_sample, ((0x80))); }); })
;
858 current_task()((active_threads[(0)])->task)->cow_faults++;
859 object = first_object;
860 offset = first_offset;
861
862 vm_object_lock(object);
863 VM_PAGE_FREE(first_m)({ ; vm_page_free(first_m); ; });
864 first_m = VM_PAGE_NULL((vm_page_t) 0);
865 assert(copy_m->busy)({ if (!(copy_m->busy)) Assert("copy_m->busy", "../vm/vm_fault.c"
, 865); })
;
866 vm_page_lock_queues();
867 vm_page_insert(copy_m, object, offset);
868 vm_page_unlock_queues();
869 m = copy_m;
870
871 /*
872 * Now that we've gotten the copy out of the
873 * way, let's try to collapse the top object.
874 * But we have to play ugly games with
875 * paging_in_progress to do that...
876 */
877
878 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_fault.c", 878); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
879 vm_object_collapse(object);
880 vm_object_paging_begin(object)((object)->paging_in_progress++);
881 }
882 else {
883 *protection &= (~VM_PROT_WRITE((vm_prot_t) 0x02));
884 }
885 }
886
887 /*
888 * Now check whether the page needs to be pushed into the
889 * copy object. The use of asymmetric copy on write for
890 * shared temporary objects means that we may do two copies to
891 * satisfy the fault; one above to get the page from a
892 * shadowed object, and one here to push it into the copy.
893 */
894
895 while ((copy_object = first_object->copy) != VM_OBJECT_NULL((vm_object_t) 0)) {
896 vm_offset_t copy_offset;
897 vm_page_t copy_m;
898
899 /*
900 * If the page is being written, but hasn't been
901 * copied to the copy-object, we have to copy it there.
902 */
903
904 if ((fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0) {
905 *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
906 break;
907 }
908
909 /*
910 * If the page was guaranteed to be resident,
911 * we must have already performed the copy.
912 */
913
914 if (must_be_resident)
915 break;
916
917 /*
918 * Try to get the lock on the copy_object.
919 */
920 if (!vm_object_lock_try(copy_object)(((boolean_t) 1))) {
921 vm_object_unlock(object);
922
923 simple_lock_pause(); /* wait a bit */
924
925 vm_object_lock(object);
926 continue;
927 }
928
929 /*
930 * Make another reference to the copy-object,
931 * to keep it from disappearing during the
932 * copy.
933 */
934 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 934); })
;
935 copy_object->ref_count++;
936
937 /*
938 * Does the page exist in the copy?
939 */
940 copy_offset = first_offset - copy_object->shadow_offset;
941 copy_m = vm_page_lookup(copy_object, copy_offset);
942 if (copy_m != VM_PAGE_NULL((vm_page_t) 0)) {
943 if (copy_m->busy) {
944 /*
945 * If the page is being brought
946 * in, wait for it and then retry.
947 */
948 PAGE_ASSERT_WAIT(copy_m, interruptible)({ (copy_m)->wanted = ((boolean_t) 1); assert_wait((event_t
) (copy_m), (interruptible)); })
;
949 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
950 copy_object->ref_count--;
951 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 951); })
;
952 vm_object_unlock(copy_object);
953 goto block_and_backoff;
954 }
955 }
956 else {
957 /*
958 * Allocate a page for the copy
959 */
960 copy_m = vm_page_alloc(copy_object, copy_offset);
961 if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) {
962 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
963 copy_object->ref_count--;
964 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 964); })
;
965 vm_object_unlock(copy_object);
966 vm_fault_cleanup(object, first_m);
967 return(VM_FAULT_MEMORY_SHORTAGE3);
968 }
969
970 /*
971 * Must copy page into copy-object.
972 */
973
974 vm_page_copy(m, copy_m);
975
976 /*
977 * If the old page was in use by any users
978 * of the copy-object, it must be removed
979 * from all pmaps. (We can't know which
980 * pmaps use it.)
981 */
982
983 vm_page_lock_queues();
984 pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00));
985 copy_m->dirty = TRUE((boolean_t) 1);
986 vm_page_unlock_queues();
987
988 /*
989 * If there's a pager, then immediately
990 * page out this page, using the "initialize"
991 * option. Else, we use the copy.
992 */
993
994 if (!copy_object->pager_created) {
995 vm_page_lock_queues();
996 vm_page_activate(copy_m);
997 vm_page_unlock_queues();
998 PAGE_WAKEUP_DONE(copy_m)({ (copy_m)->busy = ((boolean_t) 0); if ((copy_m)->wanted
) { (copy_m)->wanted = ((boolean_t) 0); thread_wakeup_prim
((((event_t) copy_m)), ((boolean_t) 0), 0); } })
;
999 } else {
1000 /*
1001 * The page is already ready for pageout:
1002 * not on pageout queues and busy.
1003 * Unlock everything except the
1004 * copy_object itself.
1005 */
1006
1007 vm_object_unlock(object);
1008
1009 /*
1010 * Write the page to the copy-object,
1011 * flushing it from the kernel.
1012 */
1013
1014 vm_pageout_page(copy_m, TRUE((boolean_t) 1), TRUE((boolean_t) 1));
1015
1016 /*
1017 * Since the pageout may have
1018 * temporarily dropped the
1019 * copy_object's lock, we
1020 * check whether we'll have
1021 * to deallocate the hard way.
1022 */
1023
1024 if ((copy_object->shadow != object) ||
1025 (copy_object->ref_count == 1)) {
1026 vm_object_unlock(copy_object);
1027 vm_object_deallocate(copy_object);
1028 vm_object_lock(object);
1029 continue;
1030 }
1031
1032 /*
1033 * Pick back up the old object's
1034 * lock. [It is safe to do so,
1035 * since it must be deeper in the
1036 * object tree.]
1037 */
1038
1039 vm_object_lock(object);
1040 }
1041
1042 /*
1043 * Because we're pushing a page upward
1044 * in the object tree, we must restart
1045 * any faults that are waiting here.
1046 * [Note that this is an expansion of
1047 * PAGE_WAKEUP that uses the THREAD_RESTART
1048 * wait result]. Can't turn off the page's
1049 * busy bit because we're not done with it.
1050 */
1051
1052 if (m->wanted) {
1053 m->wanted = FALSE((boolean_t) 0);
1054 thread_wakeup_with_result((event_t) m,thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3))
1055 THREAD_RESTART)thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3));
1056 }
1057 }
1058
1059 /*
1060 * The reference count on copy_object must be
1061 * at least 2: one for our extra reference,
1062 * and at least one from the outside world
1063 * (we checked that when we last locked
1064 * copy_object).
1065 */
1066 copy_object->ref_count--;
1067 assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0"
, "../vm/vm_fault.c", 1067); })
;
1068 vm_object_unlock(copy_object);
1069
1070 break;
1071 }
1072
1073 *result_page = m;
1074 *top_page = first_m;
1075
1076 /*
1077 * If the page can be written, assume that it will be.
1078 * [Earlier, we restrict the permission to allow write
1079 * access only if the fault so required, so we don't
1080 * mark read-only data as dirty.]
1081 */
1082
1083 if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE((vm_prot_t) 0x02)))
1084 m->dirty = TRUE((boolean_t) 1);
1085
1086 return(VM_FAULT_SUCCESS0);
1087
1088 block_and_backoff:
1089 vm_fault_cleanup(object, first_m);
1090
1091 if (continuation != (void (*)()) 0) {
1092 vm_fault_state_t *state =
1093 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1094
1095 /*
1096 * Save variables in case we must restart.
1097 */
1098
1099 state->vmfp_backoff = TRUE((boolean_t) 1);
1100 state->vmf_prot = *protection;
1101
1102 counter(c_vm_fault_page_block_backoff_user++);
1103 thread_block(continuation);
1104 } else
1105 {
1106 counter(c_vm_fault_page_block_backoff_kernel++);
1107 thread_block((void (*)()) 0);
1108 }
1109 after_block_and_backoff:
1110 if (current_thread()(active_threads[(0)])->wait_result == THREAD_AWAKENED0)
1111 return VM_FAULT_RETRY1;
1112 else
1113 return VM_FAULT_INTERRUPTED2;
1114
1115#undef RELEASE_PAGE
1116}
1117
1118/*
1119 * Routine: vm_fault
1120 * Purpose:
1121 * Handle page faults, including pseudo-faults
1122 * used to change the wiring status of pages.
1123 * Returns:
1124 * If an explicit (expression) continuation is supplied,
1125 * then we call the continuation instead of returning.
1126 * Implementation:
1127 * Explicit continuations make this a little icky,
1128 * because it hasn't been rewritten to embrace CPS.
1129 * Instead, we have resume arguments for vm_fault and
1130 * vm_fault_page, to let continue the fault computation.
1131 *
1132 * vm_fault and vm_fault_page save mucho state
1133 * in the moral equivalent of a closure. The state
1134 * structure is allocated when first entering vm_fault
1135 * and deallocated when leaving vm_fault.
1136 */
1137
1138void
1139vm_fault_continue()
1140{
1141 vm_fault_state_t *state =
1142 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1143
1144 (void) vm_fault(state->vmf_map,
1145 state->vmf_vaddr,
1146 state->vmf_fault_type,
1147 state->vmf_change_wiring,
1148 TRUE((boolean_t) 1), state->vmf_continuation);
1149 /*NOTREACHED*/
1150}
1151
1152kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
1153 resume, continuation)
1154 vm_map_t map;
1155 vm_offset_t vaddr;
1156 vm_prot_t fault_type;
1157 boolean_t change_wiring;
1158 boolean_t resume;
1159 void (*continuation)();
1160{
1161 vm_map_version_t version; /* Map version for verificiation */
1162 boolean_t wired; /* Should mapping be wired down? */
1163 vm_object_t object; /* Top-level object */
1164 vm_offset_t offset; /* Top-level offset */
1165 vm_prot_t prot; /* Protection for mapping */
1166 vm_object_t old_copy_object; /* Saved copy object */
1167 vm_page_t result_page; /* Result of vm_fault_page */
1168 vm_page_t top_page; /* Placeholder page */
1169 kern_return_t kr;
1170
1171 vm_page_t m; /* Fast access to result_page */
1172
1173 if (resume) {
1174 vm_fault_state_t *state =
1175 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1176
1177 /*
1178 * Retrieve cached variables and
1179 * continue vm_fault_page.
1180 */
1181
1182 object = state->vmf_object;
1183 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1184 goto RetryFault;
1185 version = state->vmf_version;
1186 wired = state->vmf_wired;
1187 offset = state->vmf_offset;
1188 prot = state->vmf_prot;
1189
1190 kr = vm_fault_page(object, offset, fault_type,
1191 (change_wiring && !wired), !change_wiring,
1192 &prot, &result_page, &top_page,
1193 TRUE((boolean_t) 1), vm_fault_continue);
1194 goto after_vm_fault_page;
1195 }
1196
1197 if (continuation != (void (*)()) 0) {
1198 /*
1199 * We will probably need to save state.
1200 */
1201
1202 char * state;
1203
1204 /*
1205 * if this assignment stmt is written as
1206 * 'active_threads[cpu_number()] = kmem_cache_alloc()',
1207 * cpu_number may be evaluated before kmem_cache_alloc;
1208 * if kmem_cache_alloc blocks, cpu_number will be wrong
1209 */
1210
1211 state = (char *) kmem_cache_alloc(&vm_fault_state_cache);
1212 current_thread()(active_threads[(0)])->ith_othersaved.other = state;
1213
1214 }
1215
1216 RetryFault: ;
1217
1218 /*
1219 * Find the backing store object and offset into
1220 * it to begin the search.
1221 */
1222
1223 if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version,
1224 &object, &offset,
1225 &prot, &wired)) != KERN_SUCCESS0) {
1226 goto done;
1227 }
1228
1229 /*
1230 * If the page is wired, we must fault for the current protection
1231 * value, to avoid further faults.
1232 */
1233
1234 if (wired)
1235 fault_type = prot;
1236
1237 /*
1238 * Make a reference to this object to
1239 * prevent its disposal while we are messing with
1240 * it. Once we have the reference, the map is free
1241 * to be diddled. Since objects reference their
1242 * shadows (and copies), they will stay around as well.
1243 */
1244
1245 assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0"
, "../vm/vm_fault.c", 1245); })
;
1246 object->ref_count++;
1247 vm_object_paging_begin(object)((object)->paging_in_progress++);
1248
1249 if (continuation != (void (*)()) 0) {
1250 vm_fault_state_t *state =
1251 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1252
1253 /*
1254 * Save variables, in case vm_fault_page discards
1255 * our kernel stack and we have to restart.
1256 */
1257
1258 state->vmf_map = map;
1259 state->vmf_vaddr = vaddr;
1260 state->vmf_fault_type = fault_type;
1261 state->vmf_change_wiring = change_wiring;
1262 state->vmf_continuation = continuation;
1263
1264 state->vmf_version = version;
1265 state->vmf_wired = wired;
1266 state->vmf_object = object;
1267 state->vmf_offset = offset;
1268 state->vmf_prot = prot;
1269
1270 kr = vm_fault_page(object, offset, fault_type,
1271 (change_wiring && !wired), !change_wiring,
1272 &prot, &result_page, &top_page,
1273 FALSE((boolean_t) 0), vm_fault_continue);
1274 } else
1275 {
1276 kr = vm_fault_page(object, offset, fault_type,
1277 (change_wiring && !wired), !change_wiring,
1278 &prot, &result_page, &top_page,
1279 FALSE((boolean_t) 0), (void (*)()) 0);
1280 }
1281 after_vm_fault_page:
1282
1283 /*
1284 * If we didn't succeed, lose the object reference immediately.
1285 */
1286
1287 if (kr != VM_FAULT_SUCCESS0)
1288 vm_object_deallocate(object);
1289
1290 /*
1291 * See why we failed, and take corrective action.
1292 */
1293
1294 switch (kr) {
1295 case VM_FAULT_SUCCESS0:
1296 break;
1297 case VM_FAULT_RETRY1:
1298 goto RetryFault;
1299 case VM_FAULT_INTERRUPTED2:
1300 kr = KERN_SUCCESS0;
1301 goto done;
1302 case VM_FAULT_MEMORY_SHORTAGE3:
1303 if (continuation != (void (*)()) 0) {
1304 vm_fault_state_t *state =
1305 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1306
1307 /*
1308 * Save variables in case VM_PAGE_WAIT
1309 * discards our kernel stack.
1310 */
1311
1312 state->vmf_map = map;
1313 state->vmf_vaddr = vaddr;
1314 state->vmf_fault_type = fault_type;
1315 state->vmf_change_wiring = change_wiring;
1316 state->vmf_continuation = continuation;
1317 state->vmf_object = VM_OBJECT_NULL((vm_object_t) 0);
1318
1319 VM_PAGE_WAIT(vm_fault_continue)vm_page_wait(vm_fault_continue);
1320 } else
1321 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1322 goto RetryFault;
1323 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1324 vm_page_more_fictitious();
1325 goto RetryFault;
1326 case VM_FAULT_MEMORY_ERROR5:
1327 kr = KERN_MEMORY_ERROR10;
1328 goto done;
1329 }
1330
1331 m = result_page;
1332
1333 assert((change_wiring && !wired) ?({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1335); })
1334 (top_page == VM_PAGE_NULL) :({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1335); })
1335 ((top_page == VM_PAGE_NULL) == (m->object == object)))({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t
) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object
)))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))"
, "../vm/vm_fault.c", 1335); })
;
1336
1337 /*
1338 * How to clean up the result of vm_fault_page. This
1339 * happens whether the mapping is entered or not.
1340 */
1341
1342#define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
\
1343 MACRO_BEGIN({ \
1344 vm_fault_cleanup(m->object, top_page); \
1345 vm_object_deallocate(object); \
1346 MACRO_END})
1347
1348 /*
1349 * What to do with the resulting page from vm_fault_page
1350 * if it doesn't get entered into the physical map:
1351 */
1352
1353#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
\
1354 MACRO_BEGIN({ \
1355 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
1356 vm_page_lock_queues(); \
1357 if (!m->active && !m->inactive) \
1358 vm_page_activate(m); \
1359 vm_page_unlock_queues(); \
1360 MACRO_END})
1361
1362 /*
1363 * We must verify that the maps have not changed
1364 * since our last lookup.
1365 */
1366
1367 old_copy_object = m->object->copy;
1368
1369 vm_object_unlock(m->object);
1370 while (!vm_map_verify(map, &version)) {
1371 vm_object_t retry_object;
1372 vm_offset_t retry_offset;
1373 vm_prot_t retry_prot;
1374
1375 /*
1376 * To avoid trying to write_lock the map while another
1377 * thread has it read_locked (in vm_map_pageable), we
1378 * do not try for write permission. If the page is
1379 * still writable, we will get write permission. If it
1380 * is not, or has been marked needs_copy, we enter the
1381 * mapping without write permission, and will merely
1382 * take another fault.
1383 */
1384 kr = vm_map_lookup(&map, vaddr,
1385 fault_type & ~VM_PROT_WRITE((vm_prot_t) 0x02), &version,
1386 &retry_object, &retry_offset, &retry_prot,
1387 &wired);
1388
1389 if (kr != KERN_SUCCESS0) {
1390 vm_object_lock(m->object);
1391 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1392 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1393 goto done;
1394 }
1395
1396 vm_object_unlock(retry_object);
1397 vm_object_lock(m->object);
1398
1399 if ((retry_object != object) ||
1400 (retry_offset != offset)) {
1401 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1402 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1403 goto RetryFault;
1404 }
1405
1406 /*
1407 * Check whether the protection has changed or the object
1408 * has been copied while we left the map unlocked.
1409 */
1410 prot &= retry_prot;
1411 vm_object_unlock(m->object);
1412 }
1413 vm_object_lock(m->object);
1414
1415 /*
1416 * If the copy object changed while the top-level object
1417 * was unlocked, then we must take away write permission.
1418 */
1419
1420 if (m->object->copy != old_copy_object)
1421 prot &= ~VM_PROT_WRITE((vm_prot_t) 0x02);
1422
1423 /*
1424 * If we want to wire down this page, but no longer have
1425 * adequate permissions, we must start all over.
1426 */
1427
1428 if (wired && (prot != fault_type)) {
1429 vm_map_verify_done(map, &version)(lock_done(&(map)->lock));
1430 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1431 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1432 goto RetryFault;
1433 }
1434
1435 /*
1436 * It's critically important that a wired-down page be faulted
1437 * only once in each map for which it is wired.
1438 */
1439
1440 vm_object_unlock(m->object);
1441
1442 /*
1443 * Put this page into the physical map.
1444 * We had to do the unlock above because pmap_enter
1445 * may cause other faults. The page may be on
1446 * the pageout queues. If the pageout daemon comes
1447 * across the page, it will remove it from the queues.
1448 */
1449
1450 PMAP_ENTER(map->pmap, vaddr, m, prot, wired)({ pmap_enter( (map->pmap), (vaddr), (m)->phys_addr, (prot
) & ~(m)->page_lock, (wired) ); })
;
1451
1452 /*
1453 * If the page is not wired down and isn't already
1454 * on a pageout queue, then put it where the
1455 * pageout daemon can find it.
1456 */
1457 vm_object_lock(m->object);
1458 vm_page_lock_queues();
1459 if (change_wiring) {
1460 if (wired)
1461 vm_page_wire(m);
1462 else
1463 vm_page_unwire(m);
1464 } else if (software_reference_bits) {
1465 if (!m->active && !m->inactive)
1466 vm_page_activate(m);
1467 m->reference = TRUE((boolean_t) 1);
1468 } else {
1469 vm_page_activate(m);
1470 }
1471 vm_page_unlock_queues();
1472
1473 /*
1474 * Unlock everything, and return
1475 */
1476
1477 vm_map_verify_done(map, &version)(lock_done(&(map)->lock));
1478 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1479 kr = KERN_SUCCESS0;
1480 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1481
1482#undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
1483#undef RELEASE_PAGE
1484
1485 done:
1486 if (continuation != (void (*)()) 0) {
1487 vm_fault_state_t *state =
1488 (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other;
1489
1490 kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
1491 (*continuation)(kr);
1492 /*NOTREACHED*/
1493 }
1494
1495 return(kr);
1496}
1497
1498kern_return_t vm_fault_wire_fast();
1499
1500/*
1501 * vm_fault_wire:
1502 *
1503 * Wire down a range of virtual addresses in a map.
1504 */
1505void vm_fault_wire(map, entry)
1506 vm_map_t map;
1507 vm_map_entry_t entry;
1508{
1509
1510 vm_offset_t va;
1511 pmap_t pmap;
1512 vm_offset_t end_addr = entry->vme_endlinks.end;
1513
1514 pmap = vm_map_pmap(map)((map)->pmap);
1515
1516 /*
1517 * Inform the physical mapping system that the
1518 * range of addresses may not fault, so that
1519 * page tables and such can be locked down as well.
1520 */
1521
1522 pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, FALSE((boolean_t) 0));
1523
1524 /*
1525 * We simulate a fault to get the page and enter it
1526 * in the physical map.
1527 */
1528
1529 for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) {
1530 if (vm_fault_wire_fast(map, va, entry) != KERN_SUCCESS0)
1531 (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1532 FALSE((boolean_t) 0), (void (*)()) 0);
1533 }
1534}
1535
1536/*
1537 * vm_fault_unwire:
1538 *
1539 * Unwire a range of virtual addresses in a map.
1540 */
1541void vm_fault_unwire(map, entry)
1542 vm_map_t map;
1543 vm_map_entry_t entry;
1544{
1545 vm_offset_t va;
1546 pmap_t pmap;
1547 vm_offset_t end_addr = entry->vme_endlinks.end;
1548 vm_object_t object;
1549
1550 pmap = vm_map_pmap(map)((map)->pmap);
1551
1552 object = (entry->is_sub_map)
1553 ? VM_OBJECT_NULL((vm_object_t) 0) : entry->object.vm_object;
1554
1555 /*
1556 * Since the pages are wired down, we must be able to
1557 * get their mappings from the physical map system.
1558 */
1559
1560 for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) {
1561 pmap_change_wiring(pmap, va, FALSE((boolean_t) 0));
1562
1563 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
1564 vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock);
1565 (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1566 FALSE((boolean_t) 0), (void (*)()) 0);
1567 vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock);
1568 } else {
1569 vm_prot_t prot;
1570 vm_page_t result_page;
1571 vm_page_t top_page;
1572 vm_fault_return_t result;
1573
1574 do {
1575 prot = VM_PROT_NONE((vm_prot_t) 0x00);
1576
1577 vm_object_lock(object);
1578 vm_object_paging_begin(object)((object)->paging_in_progress++);
1579 result = vm_fault_page(object,
1580 entry->offset +
1581 (va - entry->vme_startlinks.start),
1582 VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1),
1583 FALSE((boolean_t) 0), &prot,
1584 &result_page,
1585 &top_page,
1586 FALSE((boolean_t) 0), (void (*)()) 0);
1587 } while (result == VM_FAULT_RETRY1);
1588
1589 if (result != VM_FAULT_SUCCESS0)
1590 panic("vm_fault_unwire: failure");
1591
1592 vm_page_lock_queues();
1593 vm_page_unwire(result_page);
1594 vm_page_unlock_queues();
1595 PAGE_WAKEUP_DONE(result_page)({ (result_page)->busy = ((boolean_t) 0); if ((result_page
)->wanted) { (result_page)->wanted = ((boolean_t) 0); thread_wakeup_prim
((((event_t) result_page)), ((boolean_t) 0), 0); } })
;
1596
1597 vm_fault_cleanup(result_page->object, top_page);
1598 }
1599 }
1600
1601 /*
1602 * Inform the physical mapping system that the range
1603 * of addresses may fault, so that page tables and
1604 * such may be unwired themselves.
1605 */
1606
1607 pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, TRUE((boolean_t) 1));
1608}
1609
1610/*
1611 * vm_fault_wire_fast:
1612 *
1613 * Handle common case of a wire down page fault at the given address.
1614 * If successful, the page is inserted into the associated physical map.
1615 * The map entry is passed in to avoid the overhead of a map lookup.
1616 *
1617 * NOTE: the given address should be truncated to the
1618 * proper page address.
1619 *
1620 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
1621 * a standard error specifying why the fault is fatal is returned.
1622 *
1623 * The map in question must be referenced, and remains so.
1624 * Caller has a read lock on the map.
1625 *
1626 * This is a stripped version of vm_fault() for wiring pages. Anything
1627 * other than the common case will return KERN_FAILURE, and the caller
1628 * is expected to call vm_fault().
1629 */
1630kern_return_t vm_fault_wire_fast(map, va, entry)
1631 vm_map_t map;
1632 vm_offset_t va;
1633 vm_map_entry_t entry;
1634{
1635 vm_object_t object;
1636 vm_offset_t offset;
1637 vm_page_t m;
1638 vm_prot_t prot;
1639
1640 vm_stat.faults++; /* needs lock XXX */
1641 current_task()((active_threads[(0)])->task)->faults++;
1642/*
1643 * Recovery actions
1644 */
1645
1646#undef RELEASE_PAGE
1647#define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
{ \
1648 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
; \
1649 vm_page_lock_queues(); \
1650 vm_page_unwire(m); \
1651 vm_page_unlock_queues(); \
1652}
1653
1654
1655#undef UNLOCK_THINGS{ object->paging_in_progress--; ; }
1656#define UNLOCK_THINGS{ object->paging_in_progress--; ; } { \
1657 object->paging_in_progress--; \
1658 vm_object_unlock(object); \
1659}
1660
1661#undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
1662#define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
{ \
1663 UNLOCK_THINGS{ object->paging_in_progress--; ; }; \
1664 vm_object_deallocate(object); \
1665}
1666/*
1667 * Give up and have caller do things the hard way.
1668 */
1669
1670#define GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
{ \
1671 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
; \
1672 return(KERN_FAILURE5); \
1673}
1674
1675
1676 /*
1677 * If this entry is not directly to a vm_object, bail out.
1678 */
1679 if (entry->is_sub_map)
1680 return(KERN_FAILURE5);
1681
1682 /*
1683 * Find the backing store object and offset into it.
1684 */
1685
1686 object = entry->object.vm_object;
1687 offset = (va - entry->vme_startlinks.start) + entry->offset;
1688 prot = entry->protection;
1689
1690 /*
1691 * Make a reference to this object to prevent its
1692 * disposal while we are messing with it.
1693 */
1694
1695 vm_object_lock(object);
1696 assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0"
, "../vm/vm_fault.c", 1696); })
;
1697 object->ref_count++;
1698 object->paging_in_progress++;
1699
1700 /*
1701 * INVARIANTS (through entire routine):
1702 *
1703 * 1) At all times, we must either have the object
1704 * lock or a busy page in some object to prevent
1705 * some other thread from trying to bring in
1706 * the same page.
1707 *
1708 * 2) Once we have a busy page, we must remove it from
1709 * the pageout queues, so that the pageout daemon
1710 * will not grab it away.
1711 *
1712 */
1713
1714 /*
1715 * Look for page in top-level object. If it's not there or
1716 * there's something going on, give up.
1717 */
1718 m = vm_page_lookup(object, offset);
1719 if ((m == VM_PAGE_NULL((vm_page_t) 0)) || (m->error) ||
1720 (m->busy) || (m->absent) || (prot & m->page_lock)) {
1721 GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
;
1722 }
1723
1724 /*
1725 * Wire the page down now. All bail outs beyond this
1726 * point must unwire the page.
1727 */
1728
1729 vm_page_lock_queues();
1730 vm_page_wire(m);
1731 vm_page_unlock_queues();
1732
1733 /*
1734 * Mark page busy for other threads.
1735 */
1736 assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c"
, 1736); })
;
1737 m->busy = TRUE((boolean_t) 1);
1738 assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c"
, 1738); })
;
1739
1740 /*
1741 * Give up if the page is being written and there's a copy object
1742 */
1743 if ((object->copy != VM_OBJECT_NULL((vm_object_t) 0)) && (prot & VM_PROT_WRITE((vm_prot_t) 0x02))) {
1744 RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m
)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }
;
1745 GIVE_UP{ { { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }; return(5); }
;
1746 }
1747
1748 /*
1749 * Put this page into the physical map.
1750 * We have to unlock the object because pmap_enter
1751 * may cause other faults.
1752 */
1753 vm_object_unlock(object);
1754
1755 PMAP_ENTER(map->pmap, va, m, prot, TRUE)({ pmap_enter( (map->pmap), (va), (m)->phys_addr, (prot
) & ~(m)->page_lock, (((boolean_t) 1)) ); })
;
1756
1757 /*
1758 * Must relock object so that paging_in_progress can be cleared.
1759 */
1760 vm_object_lock(object);
1761
1762 /*
1763 * Unlock everything, and return
1764 */
1765
1766 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1767 UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ; }; vm_object_deallocate
(object); }
;
1768
1769 return(KERN_SUCCESS0);
1770
1771}
1772
1773/*
1774 * Routine: vm_fault_copy_cleanup
1775 * Purpose:
1776 * Release a page used by vm_fault_copy.
1777 */
1778
1779void vm_fault_copy_cleanup(page, top_page)
1780 vm_page_t page;
1781 vm_page_t top_page;
1782{
1783 vm_object_t object = page->object;
1784
1785 vm_object_lock(object);
1786 PAGE_WAKEUP_DONE(page)({ (page)->busy = ((boolean_t) 0); if ((page)->wanted) {
(page)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t
) page)), ((boolean_t) 0), 0); } })
;
1787 vm_page_lock_queues();
1788 if (!page->active && !page->inactive)
1789 vm_page_activate(page);
1790 vm_page_unlock_queues();
1791 vm_fault_cleanup(object, top_page);
1792}
1793
1794/*
1795 * Routine: vm_fault_copy
1796 *
1797 * Purpose:
1798 * Copy pages from one virtual memory object to another --
1799 * neither the source nor destination pages need be resident.
1800 *
1801 * Before actually copying a page, the version associated with
1802 * the destination address map wil be verified.
1803 *
1804 * In/out conditions:
1805 * The caller must hold a reference, but not a lock, to
1806 * each of the source and destination objects and to the
1807 * destination map.
1808 *
1809 * Results:
1810 * Returns KERN_SUCCESS if no errors were encountered in
1811 * reading or writing the data. Returns KERN_INTERRUPTED if
1812 * the operation was interrupted (only possible if the
1813 * "interruptible" argument is asserted). Other return values
1814 * indicate a permanent error in copying the data.
1815 *
1816 * The actual amount of data copied will be returned in the
1817 * "copy_size" argument. In the event that the destination map
1818 * verification failed, this amount may be less than the amount
1819 * requested.
1820 */
1821kern_return_t vm_fault_copy(
1822 src_object,
1823 src_offset,
1824 src_size,
1825 dst_object,
1826 dst_offset,
1827 dst_map,
1828 dst_version,
1829 interruptible
1830 )
1831 vm_object_t src_object;
1832 vm_offset_t src_offset;
1833 vm_size_t *src_size; /* INOUT */
1834 vm_object_t dst_object;
1835 vm_offset_t dst_offset;
1836 vm_map_t dst_map;
1837 vm_map_version_t *dst_version;
1838 boolean_t interruptible;
1839{
1840 vm_page_t result_page;
1841 vm_prot_t prot;
1842
1843 vm_page_t src_page;
1844 vm_page_t src_top_page;
1845
1846 vm_page_t dst_page;
1847 vm_page_t dst_top_page;
1848
1849 vm_size_t amount_done;
1850 vm_object_t old_copy_object;
1851
1852#define RETURN(x) \
1853 MACRO_BEGIN({ \
1854 *src_size = amount_done; \
1855 MACRO_RETURNif (((boolean_t) 1)) return(x); \
1856 MACRO_END})
1857
1858 amount_done = 0;
1859 do { /* while (amount_done != *src_size) */
1860
1861 RetrySourceFault: ;
1862
1863 if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) {
1864 /*
1865 * No source object. We will just
1866 * zero-fill the page in dst_object.
1867 */
1868
1869 src_page = VM_PAGE_NULL((vm_page_t) 0);
1870 } else {
1871 prot = VM_PROT_READ((vm_prot_t) 0x01);
1872
1873 vm_object_lock(src_object);
1874 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
1875
1876 switch (vm_fault_page(src_object, src_offset,
1877 VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), interruptible,
1878 &prot, &result_page, &src_top_page,
1879 FALSE((boolean_t) 0), (void (*)()) 0)) {
1880
1881 case VM_FAULT_SUCCESS0:
1882 break;
1883 case VM_FAULT_RETRY1:
1884 goto RetrySourceFault;
1885 case VM_FAULT_INTERRUPTED2:
1886 RETURN(MACH_SEND_INTERRUPTED0x10000007);
1887 case VM_FAULT_MEMORY_SHORTAGE3:
1888 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1889 goto RetrySourceFault;
1890 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1891 vm_page_more_fictitious();
1892 goto RetrySourceFault;
1893 case VM_FAULT_MEMORY_ERROR5:
1894 return(KERN_MEMORY_ERROR10);
1895 }
1896
1897 src_page = result_page;
1898
1899 assert((src_top_page == VM_PAGE_NULL) ==({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object
== src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)"
, "../vm/vm_fault.c", 1900); })
1900 (src_page->object == src_object))({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object
== src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)"
, "../vm/vm_fault.c", 1900); })
;
1901
1902 assert ((prot & VM_PROT_READ) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x01)) != ((vm_prot_t) 0x00
))) Assert("(prot & VM_PROT_READ) != VM_PROT_NONE", "../vm/vm_fault.c"
, 1902); })
;
1903
1904 vm_object_unlock(src_page->object);
1905 }
1906
1907 RetryDestinationFault: ;
1908
1909 prot = VM_PROT_WRITE((vm_prot_t) 0x02);
1910
1911 vm_object_lock(dst_object);
1912 vm_object_paging_begin(dst_object)((dst_object)->paging_in_progress++);
1913
1914 switch (vm_fault_page(dst_object, dst_offset, VM_PROT_WRITE((vm_prot_t) 0x02),
1915 FALSE((boolean_t) 0), FALSE((boolean_t) 0) /* interruptible */,
1916 &prot, &result_page, &dst_top_page,
1917 FALSE((boolean_t) 0), (void (*)()) 0)) {
1918
1919 case VM_FAULT_SUCCESS0:
1920 break;
1921 case VM_FAULT_RETRY1:
1922 goto RetryDestinationFault;
1923 case VM_FAULT_INTERRUPTED2:
1924 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1925 vm_fault_copy_cleanup(src_page,
1926 src_top_page);
1927 RETURN(MACH_SEND_INTERRUPTED0x10000007);
1928 case VM_FAULT_MEMORY_SHORTAGE3:
1929 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
1930 goto RetryDestinationFault;
1931 case VM_FAULT_FICTITIOUS_SHORTAGE4:
1932 vm_page_more_fictitious();
1933 goto RetryDestinationFault;
1934 case VM_FAULT_MEMORY_ERROR5:
1935 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1936 vm_fault_copy_cleanup(src_page,
1937 src_top_page);
1938 return(KERN_MEMORY_ERROR10);
1939 }
1940 assert ((prot & VM_PROT_WRITE) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x02)) != ((vm_prot_t) 0x00
))) Assert("(prot & VM_PROT_WRITE) != VM_PROT_NONE", "../vm/vm_fault.c"
, 1940); })
;
1941
1942 dst_page = result_page;
1943
1944 old_copy_object = dst_page->object->copy;
1945
1946 vm_object_unlock(dst_page->object);
1947
1948 if (!vm_map_verify(dst_map, dst_version)) {
1949
1950 BailOut: ;
1951
1952 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1953 vm_fault_copy_cleanup(src_page, src_top_page);
1954 vm_fault_copy_cleanup(dst_page, dst_top_page);
1955 break;
1956 }
1957
1958
1959 vm_object_lock(dst_page->object);
1960 if (dst_page->object->copy != old_copy_object) {
1961 vm_object_unlock(dst_page->object);
1962 vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock));
1963 goto BailOut;
1964 }
1965 vm_object_unlock(dst_page->object);
1966
1967 /*
1968 * Copy the page, and note that it is dirty
1969 * immediately.
1970 */
1971
1972 if (src_page == VM_PAGE_NULL((vm_page_t) 0))
1973 vm_page_zero_fill(dst_page);
1974 else
1975 vm_page_copy(src_page, dst_page);
1976 dst_page->dirty = TRUE((boolean_t) 1);
1977
1978 /*
1979 * Unlock everything, and return
1980 */
1981
1982 vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock));
1983
1984 if (src_page != VM_PAGE_NULL((vm_page_t) 0))
1985 vm_fault_copy_cleanup(src_page, src_top_page);
1986 vm_fault_copy_cleanup(dst_page, dst_top_page);
1987
1988 amount_done += PAGE_SIZE(1 << 12);
1989 src_offset += PAGE_SIZE(1 << 12);
1990 dst_offset += PAGE_SIZE(1 << 12);
1991
1992 } while (amount_done != *src_size);
1993
1994 RETURN(KERN_SUCCESS0);
1995#undef RETURN
1996
1997 /*NOTREACHED*/
1998}
1999
2000
2001
2002
2003
2004#ifdef notdef
2005
2006/*
2007 * Routine: vm_fault_page_overwrite
2008 *
2009 * Description:
2010 * A form of vm_fault_page that assumes that the
2011 * resulting page will be overwritten in its entirety,
2012 * making it unnecessary to obtain the correct *contents*
2013 * of the page.
2014 *
2015 * Implementation:
2016 * XXX Untested. Also unused. Eventually, this technology
2017 * could be used in vm_fault_copy() to advantage.
2018 */
2019vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
2020 vm_object_t dst_object;
2021 vm_offset_t dst_offset;
2022 vm_page_t *result_page; /* OUT */
2023{
2024 vm_page_t dst_page;
2025
2026#define interruptible FALSE((boolean_t) 0) /* XXX */
2027
2028 while (TRUE((boolean_t) 1)) {
2029 /*
2030 * Look for a page at this offset
2031 */
2032
2033 while ((dst_page = vm_page_lookup(dst_object, dst_offset))
2034 == VM_PAGE_NULL((vm_page_t) 0)) {
2035 /*
2036 * No page, no problem... just allocate one.
2037 */
2038
2039 dst_page = vm_page_alloc(dst_object, dst_offset);
2040 if (dst_page == VM_PAGE_NULL((vm_page_t) 0)) {
2041 vm_object_unlock(dst_object);
2042 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
2043 vm_object_lock(dst_object);
2044 continue;
2045 }
2046
2047 /*
2048 * Pretend that the memory manager
2049 * write-protected the page.
2050 *
2051 * Note that we will be asking for write
2052 * permission without asking for the data
2053 * first.
2054 */
2055
2056 dst_page->overwriting = TRUE((boolean_t) 1);
2057 dst_page->page_lock = VM_PROT_WRITE((vm_prot_t) 0x02);
2058 dst_page->absent = TRUE((boolean_t) 1);
2059 dst_object->absent_count++;
2060
2061 break;
2062
2063 /*
2064 * When we bail out, we might have to throw
2065 * away the page created here.
2066 */
2067
2068#define DISCARD_PAGE \
2069 MACRO_BEGIN({ \
2070 vm_object_lock(dst_object); \
2071 dst_page = vm_page_lookup(dst_object, dst_offset); \
2072 if ((dst_page != VM_PAGE_NULL((vm_page_t) 0)) && dst_page->overwriting) \
2073 VM_PAGE_FREE(dst_page)({ ; vm_page_free(dst_page); ; }); \
2074 vm_object_unlock(dst_object); \
2075 MACRO_END})
2076 }
2077
2078 /*
2079 * If the page is write-protected...
2080 */
2081
2082 if (dst_page->page_lock & VM_PROT_WRITE((vm_prot_t) 0x02)) {
2083 /*
2084 * ... and an unlock request hasn't been sent
2085 */
2086
2087 if ( ! (dst_page->unlock_request & VM_PROT_WRITE((vm_prot_t) 0x02))) {
2088 vm_prot_t u;
2089 kern_return_t rc;
2090
2091 /*
2092 * ... then send one now.
2093 */
2094
2095 if (!dst_object->pager_ready) {
2096 vm_object_assert_wait(dst_object,({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
2097 VM_OBJECT_EVENT_PAGER_READY,({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
2098 interruptible)({ (dst_object)->all_wanted |= 1 << (1); assert_wait
((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible)
); })
;
2099 vm_object_unlock(dst_object);
2100 thread_block((void (*)()) 0);
2101 if (current_thread()(active_threads[(0)])->wait_result !=
2102 THREAD_AWAKENED0) {
2103 DISCARD_PAGE;
2104 return(VM_FAULT_INTERRUPTED2);
2105 }
2106 continue;
2107 }
2108
2109 u = dst_page->unlock_request |= VM_PROT_WRITE((vm_prot_t) 0x02);
2110 vm_object_unlock(dst_object);
2111
2112 if ((rc = memory_object_data_unlock(
2113 dst_object->pager,
2114 dst_object->pager_request,
2115 dst_offset + dst_object->paging_offset,
2116 PAGE_SIZE(1 << 12),
2117 u)) != KERN_SUCCESS0) {
2118 printf("vm_object_overwrite: memory_object_data_unlock failed\n");
2119 DISCARD_PAGE;
2120 return((rc == MACH_SEND_INTERRUPTED0x10000007) ?
2121 VM_FAULT_INTERRUPTED2 :
2122 VM_FAULT_MEMORY_ERROR5);
2123 }
2124 vm_object_lock(dst_object);
2125 continue;
2126 }
2127
2128 /* ... fall through to wait below */
2129 } else {
2130 /*
2131 * If the page isn't being used for other
2132 * purposes, then we're done.
2133 */
2134 if ( ! (dst_page->busy || dst_page->absent || dst_page->error) )
2135 break;
2136 }
2137
2138 PAGE_ASSERT_WAIT(dst_page, interruptible)({ (dst_page)->wanted = ((boolean_t) 1); assert_wait((event_t
) (dst_page), (interruptible)); })
;
2139 vm_object_unlock(dst_object);
2140 thread_block((void (*)()) 0);
2141 if (current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) {
2142 DISCARD_PAGE;
2143 return(VM_FAULT_INTERRUPTED2);
2144 }
2145 }
2146
2147 *result_page = dst_page;
2148 return(VM_FAULT_SUCCESS0);
2149
2150#undef interruptible
2151#undef DISCARD_PAGE
2152}
2153
2154#endif /* notdef */