File: | obj-scan-build/../vm/vm_fault.c |
Location: | line 552, column 6 |
Description: | Access to field 'task' results in a dereference of a null pointer |
1 | /* | |||||
2 | * Mach Operating System | |||||
3 | * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University. | |||||
4 | * Copyright (c) 1993,1994 The University of Utah and | |||||
5 | * the Computer Systems Laboratory (CSL). | |||||
6 | * All rights reserved. | |||||
7 | * | |||||
8 | * Permission to use, copy, modify and distribute this software and its | |||||
9 | * documentation is hereby granted, provided that both the copyright | |||||
10 | * notice and this permission notice appear in all copies of the | |||||
11 | * software, derivative works or modified versions, and any portions | |||||
12 | * thereof, and that both notices appear in supporting documentation. | |||||
13 | * | |||||
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF | |||||
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY | |||||
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF | |||||
17 | * THIS SOFTWARE. | |||||
18 | * | |||||
19 | * Carnegie Mellon requests users of this software to return to | |||||
20 | * | |||||
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||||
22 | * School of Computer Science | |||||
23 | * Carnegie Mellon University | |||||
24 | * Pittsburgh PA 15213-3890 | |||||
25 | * | |||||
26 | * any improvements or extensions that they make and grant Carnegie Mellon | |||||
27 | * the rights to redistribute these changes. | |||||
28 | */ | |||||
29 | /* | |||||
30 | * File: vm_fault.c | |||||
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |||||
32 | * | |||||
33 | * Page fault handling module. | |||||
34 | */ | |||||
35 | ||||||
36 | #include <kern/printf.h> | |||||
37 | #include <vm/vm_fault.h> | |||||
38 | #include <mach/kern_return.h> | |||||
39 | #include <mach/message.h> /* for error codes */ | |||||
40 | #include <kern/counters.h> | |||||
41 | #include <kern/debug.h> | |||||
42 | #include <kern/thread.h> | |||||
43 | #include <kern/sched_prim.h> | |||||
44 | #include <vm/vm_map.h> | |||||
45 | #include <vm/vm_object.h> | |||||
46 | #include <vm/vm_page.h> | |||||
47 | #include <vm/pmap.h> | |||||
48 | #include <mach/vm_statistics.h> | |||||
49 | #include <vm/vm_pageout.h> | |||||
50 | #include <mach/vm_param.h> | |||||
51 | #include <mach/memory_object.h> | |||||
52 | #include <vm/memory_object_user.user.h> | |||||
53 | /* For memory_object_data_{request,unlock} */ | |||||
54 | #include <kern/macro_help.h> | |||||
55 | #include <kern/slab.h> | |||||
56 | ||||||
57 | #if MACH_PCSAMPLE1 | |||||
58 | #include <kern/pc_sample.h> | |||||
59 | #endif | |||||
60 | ||||||
61 | ||||||
62 | ||||||
63 | /* | |||||
64 | * State needed by vm_fault_continue. | |||||
65 | * This is a little hefty to drop directly | |||||
66 | * into the thread structure. | |||||
67 | */ | |||||
68 | typedef struct vm_fault_state { | |||||
69 | struct vm_map *vmf_map; | |||||
70 | vm_offset_t vmf_vaddr; | |||||
71 | vm_prot_t vmf_fault_type; | |||||
72 | boolean_t vmf_change_wiring; | |||||
73 | void (*vmf_continuation)(); | |||||
74 | vm_map_version_t vmf_version; | |||||
75 | boolean_t vmf_wired; | |||||
76 | struct vm_object *vmf_object; | |||||
77 | vm_offset_t vmf_offset; | |||||
78 | vm_prot_t vmf_prot; | |||||
79 | ||||||
80 | boolean_t vmfp_backoff; | |||||
81 | struct vm_object *vmfp_object; | |||||
82 | vm_offset_t vmfp_offset; | |||||
83 | struct vm_page *vmfp_first_m; | |||||
84 | vm_prot_t vmfp_access; | |||||
85 | } vm_fault_state_t; | |||||
86 | ||||||
87 | struct kmem_cache vm_fault_state_cache; | |||||
88 | ||||||
89 | int vm_object_absent_max = 50; | |||||
90 | ||||||
91 | boolean_t vm_fault_dirty_handling = FALSE((boolean_t) 0); | |||||
92 | boolean_t vm_fault_interruptible = TRUE((boolean_t) 1); | |||||
93 | ||||||
94 | boolean_t software_reference_bits = TRUE((boolean_t) 1); | |||||
95 | ||||||
96 | #if MACH_KDB0 | |||||
97 | extern struct db_watchpoint *db_watchpoint_list; | |||||
98 | #endif /* MACH_KDB */ | |||||
99 | ||||||
100 | /* | |||||
101 | * Routine: vm_fault_init | |||||
102 | * Purpose: | |||||
103 | * Initialize our private data structures. | |||||
104 | */ | |||||
105 | void vm_fault_init(void) | |||||
106 | { | |||||
107 | kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", | |||||
108 | sizeof(vm_fault_state_t), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||||
109 | } | |||||
110 | ||||||
111 | /* | |||||
112 | * Routine: vm_fault_cleanup | |||||
113 | * Purpose: | |||||
114 | * Clean up the result of vm_fault_page. | |||||
115 | * Results: | |||||
116 | * The paging reference for "object" is released. | |||||
117 | * "object" is unlocked. | |||||
118 | * If "top_page" is not null, "top_page" is | |||||
119 | * freed and the paging reference for the object | |||||
120 | * containing it is released. | |||||
121 | * | |||||
122 | * In/out conditions: | |||||
123 | * "object" must be locked. | |||||
124 | */ | |||||
125 | void | |||||
126 | vm_fault_cleanup(object, top_page) | |||||
127 | vm_object_t object; | |||||
128 | vm_page_t top_page; | |||||
129 | { | |||||
130 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 130); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
131 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
132 | ||||||
133 | if (top_page != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
134 | object = top_page->object; | |||||
135 | vm_object_lock(object); | |||||
136 | VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ((void)(&vm_page_queue_lock) ); }); | |||||
137 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 137); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
138 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
139 | } | |||||
140 | } | |||||
141 | ||||||
142 | ||||||
143 | #if MACH_PCSAMPLE1 | |||||
144 | /* | |||||
145 | * Do PC sampling on current thread, assuming | |||||
146 | * that it is the thread taking this page fault. | |||||
147 | * | |||||
148 | * Must check for THREAD_NULL, since faults | |||||
149 | * can occur before threads are running. | |||||
150 | */ | |||||
151 | ||||||
152 | #define vm_stat_sample(flavor)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((flavor))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((flavor))); task = (_thread_)-> task; if (task->pc_sample.sampletypes & ((flavor))) take_pc_sample ((_thread_), &task->pc_sample, ((flavor))); }); }) \ | |||||
153 | MACRO_BEGIN({ \ | |||||
154 | thread_t _thread_ = current_thread()(active_threads[(0)]); \ | |||||
155 | \ | |||||
156 | if (_thread_ != THREAD_NULL((thread_t) 0)) \ | |||||
157 | take_pc_sample_macro(_thread_, (flavor))({ task_t task; if ((_thread_)->pc_sample.sampletypes & ((flavor))) take_pc_sample((_thread_), &(_thread_)->pc_sample , ((flavor))); task = (_thread_)->task; if (task->pc_sample .sampletypes & ((flavor))) take_pc_sample((_thread_), & task->pc_sample, ((flavor))); }); \ | |||||
158 | MACRO_END}) | |||||
159 | ||||||
160 | #else | |||||
161 | #define vm_stat_sample(x)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((x))) take_pc_sample((_thread_), &(_thread_ )->pc_sample, ((x))); task = (_thread_)->task; if (task ->pc_sample.sampletypes & ((x))) take_pc_sample((_thread_ ), &task->pc_sample, ((x))); }); }) | |||||
162 | #endif /* MACH_PCSAMPLE */ | |||||
163 | ||||||
164 | ||||||
165 | ||||||
166 | /* | |||||
167 | * Routine: vm_fault_page | |||||
168 | * Purpose: | |||||
169 | * Find the resident page for the virtual memory | |||||
170 | * specified by the given virtual memory object | |||||
171 | * and offset. | |||||
172 | * Additional arguments: | |||||
173 | * The required permissions for the page is given | |||||
174 | * in "fault_type". Desired permissions are included | |||||
175 | * in "protection". | |||||
176 | * | |||||
177 | * If the desired page is known to be resident (for | |||||
178 | * example, because it was previously wired down), asserting | |||||
179 | * the "unwiring" parameter will speed the search. | |||||
180 | * | |||||
181 | * If the operation can be interrupted (by thread_abort | |||||
182 | * or thread_terminate), then the "interruptible" | |||||
183 | * parameter should be asserted. | |||||
184 | * | |||||
185 | * Results: | |||||
186 | * The page containing the proper data is returned | |||||
187 | * in "result_page". | |||||
188 | * | |||||
189 | * In/out conditions: | |||||
190 | * The source object must be locked and referenced, | |||||
191 | * and must donate one paging reference. The reference | |||||
192 | * is not affected. The paging reference and lock are | |||||
193 | * consumed. | |||||
194 | * | |||||
195 | * If the call succeeds, the object in which "result_page" | |||||
196 | * resides is left locked and holding a paging reference. | |||||
197 | * If this is not the original object, a busy page in the | |||||
198 | * original object is returned in "top_page", to prevent other | |||||
199 | * callers from pursuing this same data, along with a paging | |||||
200 | * reference for the original object. The "top_page" should | |||||
201 | * be destroyed when this guarantee is no longer required. | |||||
202 | * The "result_page" is also left busy. It is not removed | |||||
203 | * from the pageout queues. | |||||
204 | */ | |||||
205 | vm_fault_return_t vm_fault_page(first_object, first_offset, | |||||
206 | fault_type, must_be_resident, interruptible, | |||||
207 | protection, | |||||
208 | result_page, top_page, | |||||
209 | resume, continuation) | |||||
210 | /* Arguments: */ | |||||
211 | vm_object_t first_object; /* Object to begin search */ | |||||
212 | vm_offset_t first_offset; /* Offset into object */ | |||||
213 | vm_prot_t fault_type; /* What access is requested */ | |||||
214 | boolean_t must_be_resident;/* Must page be resident? */ | |||||
215 | boolean_t interruptible; /* May fault be interrupted? */ | |||||
216 | /* Modifies in place: */ | |||||
217 | vm_prot_t *protection; /* Protection for mapping */ | |||||
218 | /* Returns: */ | |||||
219 | vm_page_t *result_page; /* Page found, if successful */ | |||||
220 | vm_page_t *top_page; /* Page in top object, if | |||||
221 | * not result_page. | |||||
222 | */ | |||||
223 | /* More arguments: */ | |||||
224 | boolean_t resume; /* We are restarting. */ | |||||
225 | void (*continuation)(); /* Continuation for blocking. */ | |||||
226 | { | |||||
227 | vm_page_t m; | |||||
228 | vm_object_t object; | |||||
229 | vm_offset_t offset; | |||||
230 | vm_page_t first_m; | |||||
231 | vm_object_t next_object; | |||||
232 | vm_object_t copy_object; | |||||
233 | boolean_t look_for_page; | |||||
234 | vm_prot_t access_required; | |||||
235 | ||||||
236 | if (resume) { | |||||
| ||||||
237 | vm_fault_state_t *state = | |||||
238 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
239 | ||||||
240 | if (state->vmfp_backoff) | |||||
241 | goto after_block_and_backoff; | |||||
242 | ||||||
243 | object = state->vmfp_object; | |||||
244 | offset = state->vmfp_offset; | |||||
245 | first_m = state->vmfp_first_m; | |||||
246 | access_required = state->vmfp_access; | |||||
247 | goto after_thread_block; | |||||
248 | } | |||||
249 | ||||||
250 | vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x100))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x100))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x100))) take_pc_sample ((_thread_), &task->pc_sample, ((0x100))); }); }); | |||||
251 | vm_stat.faults++; /* needs lock XXX */ | |||||
252 | current_task()((active_threads[(0)])->task)->faults++; | |||||
253 | ||||||
254 | /* | |||||
255 | * Recovery actions | |||||
256 | */ | |||||
257 | #define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); } \ | |||||
258 | MACRO_BEGIN({ \ | |||||
259 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); \ | |||||
260 | vm_page_lock_queues(); \ | |||||
261 | if (!m->active && !m->inactive) \ | |||||
262 | vm_page_activate(m); \ | |||||
263 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); \ | |||||
264 | MACRO_END}) | |||||
265 | ||||||
266 | if (vm_fault_dirty_handling | |||||
267 | #if MACH_KDB0 | |||||
268 | /* | |||||
269 | * If there are watchpoints set, then | |||||
270 | * we don't want to give away write permission | |||||
271 | * on a read fault. Make the task write fault, | |||||
272 | * so that the watchpoint code notices the access. | |||||
273 | */ | |||||
274 | || db_watchpoint_list | |||||
275 | #endif /* MACH_KDB */ | |||||
276 | ) { | |||||
277 | /* | |||||
278 | * If we aren't asking for write permission, | |||||
279 | * then don't give it away. We're using write | |||||
280 | * faults to set the dirty bit. | |||||
281 | */ | |||||
282 | if (!(fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) | |||||
283 | *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
284 | } | |||||
285 | ||||||
286 | if (!vm_fault_interruptible) | |||||
287 | interruptible = FALSE((boolean_t) 0); | |||||
288 | ||||||
289 | /* | |||||
290 | * INVARIANTS (through entire routine): | |||||
291 | * | |||||
292 | * 1) At all times, we must either have the object | |||||
293 | * lock or a busy page in some object to prevent | |||||
294 | * some other thread from trying to bring in | |||||
295 | * the same page. | |||||
296 | * | |||||
297 | * Note that we cannot hold any locks during the | |||||
298 | * pager access or when waiting for memory, so | |||||
299 | * we use a busy page then. | |||||
300 | * | |||||
301 | * Note also that we aren't as concerned about more than | |||||
302 | * one thread attempting to memory_object_data_unlock | |||||
303 | * the same page at once, so we don't hold the page | |||||
304 | * as busy then, but do record the highest unlock | |||||
305 | * value so far. [Unlock requests may also be delivered | |||||
306 | * out of order.] | |||||
307 | * | |||||
308 | * 2) To prevent another thread from racing us down the | |||||
309 | * shadow chain and entering a new page in the top | |||||
310 | * object before we do, we must keep a busy page in | |||||
311 | * the top object while following the shadow chain. | |||||
312 | * | |||||
313 | * 3) We must increment paging_in_progress on any object | |||||
314 | * for which we have a busy page, to prevent | |||||
315 | * vm_object_collapse from removing the busy page | |||||
316 | * without our noticing. | |||||
317 | * | |||||
318 | * 4) We leave busy pages on the pageout queues. | |||||
319 | * If the pageout daemon comes across a busy page, | |||||
320 | * it will remove the page from the pageout queues. | |||||
321 | */ | |||||
322 | ||||||
323 | /* | |||||
324 | * Search for the page at object/offset. | |||||
325 | */ | |||||
326 | ||||||
327 | object = first_object; | |||||
328 | offset = first_offset; | |||||
329 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
330 | access_required = fault_type; | |||||
331 | ||||||
332 | /* | |||||
333 | * See whether this page is resident | |||||
334 | */ | |||||
335 | ||||||
336 | while (TRUE((boolean_t) 1)) { | |||||
337 | m = vm_page_lookup(object, offset); | |||||
338 | if (m != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
339 | /* | |||||
340 | * If the page is being brought in, | |||||
341 | * wait for it and then retry. | |||||
342 | * | |||||
343 | * A possible optimization: if the page | |||||
344 | * is known to be resident, we can ignore | |||||
345 | * pages that are absent (regardless of | |||||
346 | * whether they're busy). | |||||
347 | */ | |||||
348 | ||||||
349 | if (m->busy) { | |||||
350 | kern_return_t wait_result; | |||||
351 | ||||||
352 | PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m ), (interruptible)); }); | |||||
353 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
354 | if (continuation != (void (*)()) 0) { | |||||
355 | vm_fault_state_t *state = | |||||
356 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
357 | ||||||
358 | /* | |||||
359 | * Save variables in case | |||||
360 | * thread_block discards | |||||
361 | * our kernel stack. | |||||
362 | */ | |||||
363 | ||||||
364 | state->vmfp_backoff = FALSE((boolean_t) 0); | |||||
365 | state->vmfp_object = object; | |||||
366 | state->vmfp_offset = offset; | |||||
367 | state->vmfp_first_m = first_m; | |||||
368 | state->vmfp_access = | |||||
369 | access_required; | |||||
370 | state->vmf_prot = *protection; | |||||
371 | ||||||
372 | counter(c_vm_fault_page_block_busy_user++); | |||||
373 | thread_block(continuation); | |||||
374 | } else | |||||
375 | { | |||||
376 | counter(c_vm_fault_page_block_busy_kernel++); | |||||
377 | thread_block((void (*)()) 0); | |||||
378 | } | |||||
379 | after_thread_block: | |||||
380 | wait_result = current_thread()(active_threads[(0)])->wait_result; | |||||
381 | vm_object_lock(object); | |||||
382 | if (wait_result != THREAD_AWAKENED0) { | |||||
383 | vm_fault_cleanup(object, first_m); | |||||
384 | if (wait_result == THREAD_RESTART3) | |||||
385 | return(VM_FAULT_RETRY1); | |||||
386 | else | |||||
387 | return(VM_FAULT_INTERRUPTED2); | |||||
388 | } | |||||
389 | continue; | |||||
390 | } | |||||
391 | ||||||
392 | /* | |||||
393 | * If the page is in error, give up now. | |||||
394 | */ | |||||
395 | ||||||
396 | if (m->error) { | |||||
397 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
398 | vm_fault_cleanup(object, first_m); | |||||
399 | return(VM_FAULT_MEMORY_ERROR5); | |||||
400 | } | |||||
401 | ||||||
402 | /* | |||||
403 | * If the page isn't busy, but is absent, | |||||
404 | * then it was deemed "unavailable". | |||||
405 | */ | |||||
406 | ||||||
407 | if (m->absent) { | |||||
408 | /* | |||||
409 | * Remove the non-existent page (unless it's | |||||
410 | * in the top object) and move on down to the | |||||
411 | * next object (if there is one). | |||||
412 | */ | |||||
413 | ||||||
414 | offset += object->shadow_offset; | |||||
415 | access_required = VM_PROT_READ((vm_prot_t) 0x01); | |||||
416 | next_object = object->shadow; | |||||
417 | if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
418 | vm_page_t real_m; | |||||
419 | ||||||
420 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 420); }); | |||||
421 | ||||||
422 | /* | |||||
423 | * Absent page at bottom of shadow | |||||
424 | * chain; zero fill the page we left | |||||
425 | * busy in the first object, and flush | |||||
426 | * the absent page. But first we | |||||
427 | * need to allocate a real page. | |||||
428 | */ | |||||
429 | ||||||
430 | real_m = vm_page_grab(!object->internal); | |||||
431 | if (real_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
432 | vm_fault_cleanup(object, first_m); | |||||
433 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
434 | } | |||||
435 | ||||||
436 | if (object != first_object) { | |||||
437 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
438 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 438); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
439 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
440 | object = first_object; | |||||
441 | offset = first_offset; | |||||
442 | m = first_m; | |||||
443 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
444 | vm_object_lock(object); | |||||
445 | } | |||||
446 | ||||||
447 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
448 | assert(real_m->busy)({ if (!(real_m->busy)) Assert("real_m->busy", "../vm/vm_fault.c" , 448); }); | |||||
449 | vm_page_lock_queues(); | |||||
450 | vm_page_insert(real_m, object, offset); | |||||
451 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
452 | m = real_m; | |||||
453 | ||||||
454 | /* | |||||
455 | * Drop the lock while zero filling | |||||
456 | * page. Then break because this | |||||
457 | * is the page we wanted. Checking | |||||
458 | * the page lock is a waste of time; | |||||
459 | * this page was either absent or | |||||
460 | * newly allocated -- in both cases | |||||
461 | * it can't be page locked by a pager. | |||||
462 | */ | |||||
463 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
464 | ||||||
465 | vm_page_zero_fill(m); | |||||
466 | ||||||
467 | vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x10))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x10))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample ((_thread_), &task->pc_sample, ((0x10))); }); }); | |||||
468 | ||||||
469 | vm_stat.zero_fill_count++; | |||||
470 | current_task()((active_threads[(0)])->task)->zero_fills++; | |||||
471 | vm_object_lock(object); | |||||
472 | pmap_clear_modify(m->phys_addr); | |||||
473 | break; | |||||
474 | } else { | |||||
475 | if (must_be_resident) { | |||||
476 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 476); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
477 | } else if (object != first_object) { | |||||
478 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 478); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
479 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
480 | } else { | |||||
481 | first_m = m; | |||||
482 | m->absent = FALSE((boolean_t) 0); | |||||
483 | vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted & (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t ) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted &= ~(1 << (3)); }); }); | |||||
484 | m->busy = TRUE((boolean_t) 1); | |||||
485 | ||||||
486 | vm_page_lock_queues(); | |||||
487 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); | |||||
488 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
489 | } | |||||
490 | vm_object_lock(next_object); | |||||
491 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
492 | object = next_object; | |||||
493 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
494 | continue; | |||||
495 | } | |||||
496 | } | |||||
497 | ||||||
498 | /* | |||||
499 | * If the desired access to this page has | |||||
500 | * been locked out, request that it be unlocked. | |||||
501 | */ | |||||
502 | ||||||
503 | if (access_required & m->page_lock) { | |||||
504 | if ((access_required & m->unlock_request) != access_required) { | |||||
505 | vm_prot_t new_unlock_request; | |||||
506 | kern_return_t rc; | |||||
507 | ||||||
508 | if (!object->pager_ready) { | |||||
509 | vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
510 | VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
511 | interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }); | |||||
512 | goto block_and_backoff; | |||||
513 | } | |||||
514 | ||||||
515 | new_unlock_request = m->unlock_request = | |||||
516 | (access_required | m->unlock_request); | |||||
517 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
518 | if ((rc = memory_object_data_unlock( | |||||
519 | object->pager, | |||||
520 | object->pager_request, | |||||
521 | offset + object->paging_offset, | |||||
522 | PAGE_SIZE(1 << 12), | |||||
523 | new_unlock_request)) | |||||
524 | != KERN_SUCCESS0) { | |||||
525 | printf("vm_fault: memory_object_data_unlock failed\n"); | |||||
526 | vm_object_lock(object); | |||||
527 | vm_fault_cleanup(object, first_m); | |||||
528 | return((rc == MACH_SEND_INTERRUPTED0x10000007) ? | |||||
529 | VM_FAULT_INTERRUPTED2 : | |||||
530 | VM_FAULT_MEMORY_ERROR5); | |||||
531 | } | |||||
532 | vm_object_lock(object); | |||||
533 | continue; | |||||
534 | } | |||||
535 | ||||||
536 | PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m ), (interruptible)); }); | |||||
537 | goto block_and_backoff; | |||||
538 | } | |||||
539 | ||||||
540 | /* | |||||
541 | * We mark the page busy and leave it on | |||||
542 | * the pageout queues. If the pageout | |||||
543 | * deamon comes across it, then it will | |||||
544 | * remove the page. | |||||
545 | */ | |||||
546 | ||||||
547 | if (!software_reference_bits) { | |||||
548 | vm_page_lock_queues(); | |||||
549 | if (m->inactive) { | |||||
550 | vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x20))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x20))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x20))) take_pc_sample ((_thread_), &task->pc_sample, ((0x20))); }); }); | |||||
551 | vm_stat.reactivations++; | |||||
552 | current_task()((active_threads[(0)])->task)->reactivations++; | |||||
| ||||||
553 | } | |||||
554 | ||||||
555 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); | |||||
556 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
557 | } | |||||
558 | ||||||
559 | assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c" , 559); }); | |||||
560 | m->busy = TRUE((boolean_t) 1); | |||||
561 | assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c" , 561); }); | |||||
562 | break; | |||||
563 | } | |||||
564 | ||||||
565 | look_for_page = | |||||
566 | (object->pager_created) | |||||
567 | #if MACH_PAGEMAP1 | |||||
568 | && (vm_external_state_get(object->existence_info, offset + object->paging_offset)(((object->existence_info) != ((vm_external_t) 0)) ? _vm_external_state_get (object->existence_info, offset + object->paging_offset ) : 2) != | |||||
569 | VM_EXTERNAL_STATE_ABSENT3) | |||||
570 | #endif /* MACH_PAGEMAP */ | |||||
571 | ; | |||||
572 | ||||||
573 | if ((look_for_page || (object == first_object)) | |||||
574 | && !must_be_resident) { | |||||
575 | /* | |||||
576 | * Allocate a new page for this object/offset | |||||
577 | * pair. | |||||
578 | */ | |||||
579 | ||||||
580 | m = vm_page_grab_fictitious(); | |||||
581 | if (m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
582 | vm_fault_cleanup(object, first_m); | |||||
583 | return(VM_FAULT_FICTITIOUS_SHORTAGE4); | |||||
584 | } | |||||
585 | ||||||
586 | vm_page_lock_queues(); | |||||
587 | vm_page_insert(m, object, offset); | |||||
588 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
589 | } | |||||
590 | ||||||
591 | if (look_for_page && !must_be_resident) { | |||||
592 | kern_return_t rc; | |||||
593 | ||||||
594 | /* | |||||
595 | * If the memory manager is not ready, we | |||||
596 | * cannot make requests. | |||||
597 | */ | |||||
598 | if (!object->pager_ready) { | |||||
599 | vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
600 | VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
601 | interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }); | |||||
602 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
603 | goto block_and_backoff; | |||||
604 | } | |||||
605 | ||||||
606 | if (object->internal) { | |||||
607 | /* | |||||
608 | * Requests to the default pager | |||||
609 | * must reserve a real page in advance, | |||||
610 | * because the pager's data-provided | |||||
611 | * won't block for pages. | |||||
612 | */ | |||||
613 | ||||||
614 | if (m->fictitious && !vm_page_convert(m, FALSE((boolean_t) 0))) { | |||||
615 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
616 | vm_fault_cleanup(object, first_m); | |||||
617 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
618 | } | |||||
619 | } else if (object->absent_count > | |||||
620 | vm_object_absent_max) { | |||||
621 | /* | |||||
622 | * If there are too many outstanding page | |||||
623 | * requests pending on this object, we | |||||
624 | * wait for them to be resolved now. | |||||
625 | */ | |||||
626 | ||||||
627 | vm_object_absent_assert_wait(object, interruptible)({ ({ ((object))->all_wanted |= 1 << (3); assert_wait ((event_t)(((vm_offset_t) (object)) + (3)), ((interruptible)) ); }); }); | |||||
628 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
629 | goto block_and_backoff; | |||||
630 | } | |||||
631 | ||||||
632 | /* | |||||
633 | * Indicate that the page is waiting for data | |||||
634 | * from the memory manager. | |||||
635 | */ | |||||
636 | ||||||
637 | m->absent = TRUE((boolean_t) 1); | |||||
638 | object->absent_count++; | |||||
639 | ||||||
640 | /* | |||||
641 | * We have a busy page, so we can | |||||
642 | * release the object lock. | |||||
643 | */ | |||||
644 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
645 | ||||||
646 | /* | |||||
647 | * Call the memory manager to retrieve the data. | |||||
648 | */ | |||||
649 | ||||||
650 | vm_stat.pageins++; | |||||
651 | vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x40))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x40))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x40))) take_pc_sample ((_thread_), &task->pc_sample, ((0x40))); }); }); | |||||
652 | current_task()((active_threads[(0)])->task)->pageins++; | |||||
653 | ||||||
654 | if ((rc = memory_object_data_request(object->pager, | |||||
655 | object->pager_request, | |||||
656 | m->offset + object->paging_offset, | |||||
657 | PAGE_SIZE(1 << 12), access_required)) != KERN_SUCCESS0) { | |||||
658 | if (rc != MACH_SEND_INTERRUPTED0x10000007) | |||||
659 | printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n", | |||||
660 | "memory_object_data_request", | |||||
661 | object->pager, | |||||
662 | object->pager_request, | |||||
663 | m->offset + object->paging_offset, | |||||
664 | PAGE_SIZE(1 << 12), access_required, rc); | |||||
665 | /* | |||||
666 | * Don't want to leave a busy page around, | |||||
667 | * but the data request may have blocked, | |||||
668 | * so check if it's still there and busy. | |||||
669 | */ | |||||
670 | vm_object_lock(object); | |||||
671 | if (m == vm_page_lookup(object,offset) && | |||||
672 | m->absent && m->busy) | |||||
673 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
674 | vm_fault_cleanup(object, first_m); | |||||
675 | return((rc == MACH_SEND_INTERRUPTED0x10000007) ? | |||||
676 | VM_FAULT_INTERRUPTED2 : | |||||
677 | VM_FAULT_MEMORY_ERROR5); | |||||
678 | } | |||||
679 | ||||||
680 | /* | |||||
681 | * Retry with same object/offset, since new data may | |||||
682 | * be in a different page (i.e., m is meaningless at | |||||
683 | * this point). | |||||
684 | */ | |||||
685 | vm_object_lock(object); | |||||
686 | continue; | |||||
687 | } | |||||
688 | ||||||
689 | /* | |||||
690 | * For the XP system, the only case in which we get here is if | |||||
691 | * object has no pager (or unwiring). If the pager doesn't | |||||
692 | * have the page this is handled in the m->absent case above | |||||
693 | * (and if you change things here you should look above). | |||||
694 | */ | |||||
695 | if (object == first_object) | |||||
696 | first_m = m; | |||||
697 | else | |||||
698 | { | |||||
699 | assert(m == VM_PAGE_NULL)({ if (!(m == ((vm_page_t) 0))) Assert("m == VM_PAGE_NULL", "../vm/vm_fault.c" , 699); }); | |||||
700 | } | |||||
701 | ||||||
702 | /* | |||||
703 | * Move on to the next object. Lock the next | |||||
704 | * object before unlocking the current one. | |||||
705 | */ | |||||
706 | access_required = VM_PROT_READ((vm_prot_t) 0x01); | |||||
707 | ||||||
708 | offset += object->shadow_offset; | |||||
709 | next_object = object->shadow; | |||||
710 | if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
711 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 711); }); | |||||
712 | ||||||
713 | /* | |||||
714 | * If there's no object left, fill the page | |||||
715 | * in the top object with zeros. But first we | |||||
716 | * need to allocate a real page. | |||||
717 | */ | |||||
718 | ||||||
719 | if (object != first_object) { | |||||
720 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 720); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
721 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
722 | ||||||
723 | object = first_object; | |||||
724 | offset = first_offset; | |||||
725 | vm_object_lock(object); | |||||
726 | } | |||||
727 | ||||||
728 | m = first_m; | |||||
729 | assert(m->object == object)({ if (!(m->object == object)) Assert("m->object == object" , "../vm/vm_fault.c", 729); }); | |||||
730 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
731 | ||||||
732 | if (m->fictitious && !vm_page_convert(m, !object->internal)) { | |||||
733 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); | |||||
734 | vm_fault_cleanup(object, VM_PAGE_NULL((vm_page_t) 0)); | |||||
735 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
736 | } | |||||
737 | ||||||
738 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
739 | vm_page_zero_fill(m); | |||||
740 | vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x10))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x10))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample ((_thread_), &task->pc_sample, ((0x10))); }); }); | |||||
741 | vm_stat.zero_fill_count++; | |||||
742 | current_task()((active_threads[(0)])->task)->zero_fills++; | |||||
743 | vm_object_lock(object); | |||||
744 | pmap_clear_modify(m->phys_addr); | |||||
745 | break; | |||||
746 | } | |||||
747 | else { | |||||
748 | vm_object_lock(next_object); | |||||
749 | if ((object != first_object) || must_be_resident) | |||||
750 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 750); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
751 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
752 | object = next_object; | |||||
753 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
754 | } | |||||
755 | } | |||||
756 | ||||||
757 | /* | |||||
758 | * PAGE HAS BEEN FOUND. | |||||
759 | * | |||||
760 | * This page (m) is: | |||||
761 | * busy, so that we can play with it; | |||||
762 | * not absent, so that nobody else will fill it; | |||||
763 | * possibly eligible for pageout; | |||||
764 | * | |||||
765 | * The top-level page (first_m) is: | |||||
766 | * VM_PAGE_NULL if the page was found in the | |||||
767 | * top-level object; | |||||
768 | * busy, not absent, and ineligible for pageout. | |||||
769 | * | |||||
770 | * The current object (object) is locked. A paging | |||||
771 | * reference is held for the current and top-level | |||||
772 | * objects. | |||||
773 | */ | |||||
774 | ||||||
775 | #if EXTRA_ASSERTIONS | |||||
776 | assert(m->busy && !m->absent)({ if (!(m->busy && !m->absent)) Assert("m->busy && !m->absent" , "../vm/vm_fault.c", 776); }); | |||||
777 | assert((first_m == VM_PAGE_NULL) ||({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }) | |||||
778 | (first_m->busy && !first_m->absent &&({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }) | |||||
779 | !first_m->active && !first_m->inactive))({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }); | |||||
780 | #endif /* EXTRA_ASSERTIONS */ | |||||
781 | ||||||
782 | /* | |||||
783 | * If the page is being written, but isn't | |||||
784 | * already owned by the top-level object, | |||||
785 | * we have to copy it into a new page owned | |||||
786 | * by the top-level object. | |||||
787 | */ | |||||
788 | ||||||
789 | if (object != first_object) { | |||||
790 | /* | |||||
791 | * We only really need to copy if we | |||||
792 | * want to write it. | |||||
793 | */ | |||||
794 | ||||||
795 | if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) { | |||||
796 | vm_page_t copy_m; | |||||
797 | ||||||
798 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 798); }); | |||||
799 | ||||||
800 | /* | |||||
801 | * If we try to collapse first_object at this | |||||
802 | * point, we may deadlock when we try to get | |||||
803 | * the lock on an intermediate object (since we | |||||
804 | * have the bottom object locked). We can't | |||||
805 | * unlock the bottom object, because the page | |||||
806 | * we found may move (by collapse) if we do. | |||||
807 | * | |||||
808 | * Instead, we first copy the page. Then, when | |||||
809 | * we have no more use for the bottom object, | |||||
810 | * we unlock it and try to collapse. | |||||
811 | * | |||||
812 | * Note that we copy the page even if we didn't | |||||
813 | * need to... that's the breaks. | |||||
814 | */ | |||||
815 | ||||||
816 | /* | |||||
817 | * Allocate a page for the copy | |||||
818 | */ | |||||
819 | copy_m = vm_page_grab(!first_object->internal); | |||||
820 | if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
821 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
822 | vm_fault_cleanup(object, first_m); | |||||
823 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
824 | } | |||||
825 | ||||||
826 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
827 | vm_page_copy(m, copy_m); | |||||
828 | vm_object_lock(object); | |||||
829 | ||||||
830 | /* | |||||
831 | * If another map is truly sharing this | |||||
832 | * page with us, we have to flush all | |||||
833 | * uses of the original page, since we | |||||
834 | * can't distinguish those which want the | |||||
835 | * original from those which need the | |||||
836 | * new copy. | |||||
837 | * | |||||
838 | * XXXO If we know that only one map has | |||||
839 | * access to this page, then we could | |||||
840 | * avoid the pmap_page_protect() call. | |||||
841 | */ | |||||
842 | ||||||
843 | vm_page_lock_queues(); | |||||
844 | vm_page_deactivate(m); | |||||
845 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); | |||||
846 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
847 | ||||||
848 | /* | |||||
849 | * We no longer need the old page or object. | |||||
850 | */ | |||||
851 | ||||||
852 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||||
853 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 853); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
854 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
855 | ||||||
856 | vm_stat.cow_faults++; | |||||
857 | vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x80))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x80))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x80))) take_pc_sample ((_thread_), &task->pc_sample, ((0x80))); }); }); | |||||
858 | current_task()((active_threads[(0)])->task)->cow_faults++; | |||||
859 | object = first_object; | |||||
860 | offset = first_offset; | |||||
861 | ||||||
862 | vm_object_lock(object); | |||||
863 | VM_PAGE_FREE(first_m)({ ; vm_page_free(first_m); ((void)(&vm_page_queue_lock)) ; }); | |||||
864 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
865 | assert(copy_m->busy)({ if (!(copy_m->busy)) Assert("copy_m->busy", "../vm/vm_fault.c" , 865); }); | |||||
866 | vm_page_lock_queues(); | |||||
867 | vm_page_insert(copy_m, object, offset); | |||||
868 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
869 | m = copy_m; | |||||
870 | ||||||
871 | /* | |||||
872 | * Now that we've gotten the copy out of the | |||||
873 | * way, let's try to collapse the top object. | |||||
874 | * But we have to play ugly games with | |||||
875 | * paging_in_progress to do that... | |||||
876 | */ | |||||
877 | ||||||
878 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 878); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
879 | vm_object_collapse(object); | |||||
880 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
881 | } | |||||
882 | else { | |||||
883 | *protection &= (~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||||
884 | } | |||||
885 | } | |||||
886 | ||||||
887 | /* | |||||
888 | * Now check whether the page needs to be pushed into the | |||||
889 | * copy object. The use of asymmetric copy on write for | |||||
890 | * shared temporary objects means that we may do two copies to | |||||
891 | * satisfy the fault; one above to get the page from a | |||||
892 | * shadowed object, and one here to push it into the copy. | |||||
893 | */ | |||||
894 | ||||||
895 | while ((copy_object = first_object->copy) != VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
896 | vm_offset_t copy_offset; | |||||
897 | vm_page_t copy_m; | |||||
898 | ||||||
899 | /* | |||||
900 | * If the page is being written, but hasn't been | |||||
901 | * copied to the copy-object, we have to copy it there. | |||||
902 | */ | |||||
903 | ||||||
904 | if ((fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0) { | |||||
905 | *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
906 | break; | |||||
907 | } | |||||
908 | ||||||
909 | /* | |||||
910 | * If the page was guaranteed to be resident, | |||||
911 | * we must have already performed the copy. | |||||
912 | */ | |||||
913 | ||||||
914 | if (must_be_resident) | |||||
915 | break; | |||||
916 | ||||||
917 | /* | |||||
918 | * Try to get the lock on the copy_object. | |||||
919 | */ | |||||
920 | if (!vm_object_lock_try(copy_object)(((boolean_t) 1))) { | |||||
921 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
922 | ||||||
923 | simple_lock_pause(); /* wait a bit */ | |||||
924 | ||||||
925 | vm_object_lock(object); | |||||
926 | continue; | |||||
927 | } | |||||
928 | ||||||
929 | /* | |||||
930 | * Make another reference to the copy-object, | |||||
931 | * to keep it from disappearing during the | |||||
932 | * copy. | |||||
933 | */ | |||||
934 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 934); }); | |||||
935 | copy_object->ref_count++; | |||||
936 | ||||||
937 | /* | |||||
938 | * Does the page exist in the copy? | |||||
939 | */ | |||||
940 | copy_offset = first_offset - copy_object->shadow_offset; | |||||
941 | copy_m = vm_page_lookup(copy_object, copy_offset); | |||||
942 | if (copy_m != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
943 | if (copy_m->busy) { | |||||
944 | /* | |||||
945 | * If the page is being brought | |||||
946 | * in, wait for it and then retry. | |||||
947 | */ | |||||
948 | PAGE_ASSERT_WAIT(copy_m, interruptible)({ (copy_m)->wanted = ((boolean_t) 1); assert_wait((event_t ) (copy_m), (interruptible)); }); | |||||
949 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
950 | copy_object->ref_count--; | |||||
951 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 951); }); | |||||
952 | vm_object_unlock(copy_object)((void)(&(copy_object)->Lock)); | |||||
953 | goto block_and_backoff; | |||||
954 | } | |||||
955 | } | |||||
956 | else { | |||||
957 | /* | |||||
958 | * Allocate a page for the copy | |||||
959 | */ | |||||
960 | copy_m = vm_page_alloc(copy_object, copy_offset); | |||||
961 | if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
962 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
963 | copy_object->ref_count--; | |||||
964 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 964); }); | |||||
965 | vm_object_unlock(copy_object)((void)(&(copy_object)->Lock)); | |||||
966 | vm_fault_cleanup(object, first_m); | |||||
967 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
968 | } | |||||
969 | ||||||
970 | /* | |||||
971 | * Must copy page into copy-object. | |||||
972 | */ | |||||
973 | ||||||
974 | vm_page_copy(m, copy_m); | |||||
975 | ||||||
976 | /* | |||||
977 | * If the old page was in use by any users | |||||
978 | * of the copy-object, it must be removed | |||||
979 | * from all pmaps. (We can't know which | |||||
980 | * pmaps use it.) | |||||
981 | */ | |||||
982 | ||||||
983 | vm_page_lock_queues(); | |||||
984 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); | |||||
985 | copy_m->dirty = TRUE((boolean_t) 1); | |||||
986 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
987 | ||||||
988 | /* | |||||
989 | * If there's a pager, then immediately | |||||
990 | * page out this page, using the "initialize" | |||||
991 | * option. Else, we use the copy. | |||||
992 | */ | |||||
993 | ||||||
994 | if (!copy_object->pager_created) { | |||||
995 | vm_page_lock_queues(); | |||||
996 | vm_page_activate(copy_m); | |||||
997 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
998 | PAGE_WAKEUP_DONE(copy_m)({ (copy_m)->busy = ((boolean_t) 0); if ((copy_m)->wanted ) { (copy_m)->wanted = ((boolean_t) 0); thread_wakeup_prim ((((event_t) copy_m)), ((boolean_t) 0), 0); } }); | |||||
999 | } else { | |||||
1000 | /* | |||||
1001 | * The page is already ready for pageout: | |||||
1002 | * not on pageout queues and busy. | |||||
1003 | * Unlock everything except the | |||||
1004 | * copy_object itself. | |||||
1005 | */ | |||||
1006 | ||||||
1007 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
1008 | ||||||
1009 | /* | |||||
1010 | * Write the page to the copy-object, | |||||
1011 | * flushing it from the kernel. | |||||
1012 | */ | |||||
1013 | ||||||
1014 | vm_pageout_page(copy_m, TRUE((boolean_t) 1), TRUE((boolean_t) 1)); | |||||
1015 | ||||||
1016 | /* | |||||
1017 | * Since the pageout may have | |||||
1018 | * temporarily dropped the | |||||
1019 | * copy_object's lock, we | |||||
1020 | * check whether we'll have | |||||
1021 | * to deallocate the hard way. | |||||
1022 | */ | |||||
1023 | ||||||
1024 | if ((copy_object->shadow != object) || | |||||
1025 | (copy_object->ref_count == 1)) { | |||||
1026 | vm_object_unlock(copy_object)((void)(&(copy_object)->Lock)); | |||||
1027 | vm_object_deallocate(copy_object); | |||||
1028 | vm_object_lock(object); | |||||
1029 | continue; | |||||
1030 | } | |||||
1031 | ||||||
1032 | /* | |||||
1033 | * Pick back up the old object's | |||||
1034 | * lock. [It is safe to do so, | |||||
1035 | * since it must be deeper in the | |||||
1036 | * object tree.] | |||||
1037 | */ | |||||
1038 | ||||||
1039 | vm_object_lock(object); | |||||
1040 | } | |||||
1041 | ||||||
1042 | /* | |||||
1043 | * Because we're pushing a page upward | |||||
1044 | * in the object tree, we must restart | |||||
1045 | * any faults that are waiting here. | |||||
1046 | * [Note that this is an expansion of | |||||
1047 | * PAGE_WAKEUP that uses the THREAD_RESTART | |||||
1048 | * wait result]. Can't turn off the page's | |||||
1049 | * busy bit because we're not done with it. | |||||
1050 | */ | |||||
1051 | ||||||
1052 | if (m->wanted) { | |||||
1053 | m->wanted = FALSE((boolean_t) 0); | |||||
1054 | thread_wakeup_with_result((event_t) m,thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3)) | |||||
1055 | THREAD_RESTART)thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3)); | |||||
1056 | } | |||||
1057 | } | |||||
1058 | ||||||
1059 | /* | |||||
1060 | * The reference count on copy_object must be | |||||
1061 | * at least 2: one for our extra reference, | |||||
1062 | * and at least one from the outside world | |||||
1063 | * (we checked that when we last locked | |||||
1064 | * copy_object). | |||||
1065 | */ | |||||
1066 | copy_object->ref_count--; | |||||
1067 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 1067); }); | |||||
1068 | vm_object_unlock(copy_object)((void)(&(copy_object)->Lock)); | |||||
1069 | ||||||
1070 | break; | |||||
1071 | } | |||||
1072 | ||||||
1073 | *result_page = m; | |||||
1074 | *top_page = first_m; | |||||
1075 | ||||||
1076 | /* | |||||
1077 | * If the page can be written, assume that it will be. | |||||
1078 | * [Earlier, we restrict the permission to allow write | |||||
1079 | * access only if the fault so required, so we don't | |||||
1080 | * mark read-only data as dirty.] | |||||
1081 | */ | |||||
1082 | ||||||
1083 | if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE((vm_prot_t) 0x02))) | |||||
1084 | m->dirty = TRUE((boolean_t) 1); | |||||
1085 | ||||||
1086 | return(VM_FAULT_SUCCESS0); | |||||
1087 | ||||||
1088 | block_and_backoff: | |||||
1089 | vm_fault_cleanup(object, first_m); | |||||
1090 | ||||||
1091 | if (continuation != (void (*)()) 0) { | |||||
1092 | vm_fault_state_t *state = | |||||
1093 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1094 | ||||||
1095 | /* | |||||
1096 | * Save variables in case we must restart. | |||||
1097 | */ | |||||
1098 | ||||||
1099 | state->vmfp_backoff = TRUE((boolean_t) 1); | |||||
1100 | state->vmf_prot = *protection; | |||||
1101 | ||||||
1102 | counter(c_vm_fault_page_block_backoff_user++); | |||||
1103 | thread_block(continuation); | |||||
1104 | } else | |||||
1105 | { | |||||
1106 | counter(c_vm_fault_page_block_backoff_kernel++); | |||||
1107 | thread_block((void (*)()) 0); | |||||
1108 | } | |||||
1109 | after_block_and_backoff: | |||||
1110 | if (current_thread()(active_threads[(0)])->wait_result == THREAD_AWAKENED0) | |||||
1111 | return VM_FAULT_RETRY1; | |||||
1112 | else | |||||
1113 | return VM_FAULT_INTERRUPTED2; | |||||
1114 | ||||||
1115 | #undef RELEASE_PAGE | |||||
1116 | } | |||||
1117 | ||||||
1118 | /* | |||||
1119 | * Routine: vm_fault | |||||
1120 | * Purpose: | |||||
1121 | * Handle page faults, including pseudo-faults | |||||
1122 | * used to change the wiring status of pages. | |||||
1123 | * Returns: | |||||
1124 | * If an explicit (expression) continuation is supplied, | |||||
1125 | * then we call the continuation instead of returning. | |||||
1126 | * Implementation: | |||||
1127 | * Explicit continuations make this a little icky, | |||||
1128 | * because it hasn't been rewritten to embrace CPS. | |||||
1129 | * Instead, we have resume arguments for vm_fault and | |||||
1130 | * vm_fault_page, to let continue the fault computation. | |||||
1131 | * | |||||
1132 | * vm_fault and vm_fault_page save mucho state | |||||
1133 | * in the moral equivalent of a closure. The state | |||||
1134 | * structure is allocated when first entering vm_fault | |||||
1135 | * and deallocated when leaving vm_fault. | |||||
1136 | */ | |||||
1137 | ||||||
1138 | void | |||||
1139 | vm_fault_continue(void) | |||||
1140 | { | |||||
1141 | vm_fault_state_t *state = | |||||
1142 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1143 | ||||||
1144 | (void) vm_fault(state->vmf_map, | |||||
1145 | state->vmf_vaddr, | |||||
1146 | state->vmf_fault_type, | |||||
1147 | state->vmf_change_wiring, | |||||
1148 | TRUE((boolean_t) 1), state->vmf_continuation); | |||||
1149 | /*NOTREACHED*/ | |||||
1150 | } | |||||
1151 | ||||||
1152 | kern_return_t vm_fault(map, vaddr, fault_type, change_wiring, | |||||
1153 | resume, continuation) | |||||
1154 | vm_map_t map; | |||||
1155 | vm_offset_t vaddr; | |||||
1156 | vm_prot_t fault_type; | |||||
1157 | boolean_t change_wiring; | |||||
1158 | boolean_t resume; | |||||
1159 | void (*continuation)(); | |||||
1160 | { | |||||
1161 | vm_map_version_t version; /* Map version for verificiation */ | |||||
1162 | boolean_t wired; /* Should mapping be wired down? */ | |||||
1163 | vm_object_t object; /* Top-level object */ | |||||
1164 | vm_offset_t offset; /* Top-level offset */ | |||||
1165 | vm_prot_t prot; /* Protection for mapping */ | |||||
1166 | vm_object_t old_copy_object; /* Saved copy object */ | |||||
1167 | vm_page_t result_page; /* Result of vm_fault_page */ | |||||
1168 | vm_page_t top_page; /* Placeholder page */ | |||||
1169 | kern_return_t kr; | |||||
1170 | ||||||
1171 | vm_page_t m; /* Fast access to result_page */ | |||||
1172 | ||||||
1173 | if (resume) { | |||||
1174 | vm_fault_state_t *state = | |||||
1175 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1176 | ||||||
1177 | /* | |||||
1178 | * Retrieve cached variables and | |||||
1179 | * continue vm_fault_page. | |||||
1180 | */ | |||||
1181 | ||||||
1182 | object = state->vmf_object; | |||||
1183 | if (object == VM_OBJECT_NULL((vm_object_t) 0)) | |||||
1184 | goto RetryFault; | |||||
1185 | version = state->vmf_version; | |||||
1186 | wired = state->vmf_wired; | |||||
1187 | offset = state->vmf_offset; | |||||
1188 | prot = state->vmf_prot; | |||||
1189 | ||||||
1190 | kr = vm_fault_page(object, offset, fault_type, | |||||
1191 | (change_wiring && !wired), !change_wiring, | |||||
1192 | &prot, &result_page, &top_page, | |||||
1193 | TRUE((boolean_t) 1), vm_fault_continue); | |||||
1194 | goto after_vm_fault_page; | |||||
1195 | } | |||||
1196 | ||||||
1197 | if (continuation != (void (*)()) 0) { | |||||
1198 | /* | |||||
1199 | * We will probably need to save state. | |||||
1200 | */ | |||||
1201 | ||||||
1202 | char * state; | |||||
1203 | ||||||
1204 | /* | |||||
1205 | * if this assignment stmt is written as | |||||
1206 | * 'active_threads[cpu_number()] = kmem_cache_alloc()', | |||||
1207 | * cpu_number may be evaluated before kmem_cache_alloc; | |||||
1208 | * if kmem_cache_alloc blocks, cpu_number will be wrong | |||||
1209 | */ | |||||
1210 | ||||||
1211 | state = (char *) kmem_cache_alloc(&vm_fault_state_cache); | |||||
1212 | current_thread()(active_threads[(0)])->ith_othersaved.other = state; | |||||
1213 | ||||||
1214 | } | |||||
1215 | ||||||
1216 | RetryFault: ; | |||||
1217 | ||||||
1218 | /* | |||||
1219 | * Find the backing store object and offset into | |||||
1220 | * it to begin the search. | |||||
1221 | */ | |||||
1222 | ||||||
1223 | if ((kr = vm_map_lookup(&map, vaddr, fault_type, &version, | |||||
1224 | &object, &offset, | |||||
1225 | &prot, &wired)) != KERN_SUCCESS0) { | |||||
1226 | goto done; | |||||
1227 | } | |||||
1228 | ||||||
1229 | /* | |||||
1230 | * If the page is wired, we must fault for the current protection | |||||
1231 | * value, to avoid further faults. | |||||
1232 | */ | |||||
1233 | ||||||
1234 | if (wired) | |||||
1235 | fault_type = prot; | |||||
1236 | ||||||
1237 | /* | |||||
1238 | * Make a reference to this object to | |||||
1239 | * prevent its disposal while we are messing with | |||||
1240 | * it. Once we have the reference, the map is free | |||||
1241 | * to be diddled. Since objects reference their | |||||
1242 | * shadows (and copies), they will stay around as well. | |||||
1243 | */ | |||||
1244 | ||||||
1245 | assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0" , "../vm/vm_fault.c", 1245); }); | |||||
1246 | object->ref_count++; | |||||
1247 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
1248 | ||||||
1249 | if (continuation != (void (*)()) 0) { | |||||
1250 | vm_fault_state_t *state = | |||||
1251 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1252 | ||||||
1253 | /* | |||||
1254 | * Save variables, in case vm_fault_page discards | |||||
1255 | * our kernel stack and we have to restart. | |||||
1256 | */ | |||||
1257 | ||||||
1258 | state->vmf_map = map; | |||||
1259 | state->vmf_vaddr = vaddr; | |||||
1260 | state->vmf_fault_type = fault_type; | |||||
1261 | state->vmf_change_wiring = change_wiring; | |||||
1262 | state->vmf_continuation = continuation; | |||||
1263 | ||||||
1264 | state->vmf_version = version; | |||||
1265 | state->vmf_wired = wired; | |||||
1266 | state->vmf_object = object; | |||||
1267 | state->vmf_offset = offset; | |||||
1268 | state->vmf_prot = prot; | |||||
1269 | ||||||
1270 | kr = vm_fault_page(object, offset, fault_type, | |||||
1271 | (change_wiring && !wired), !change_wiring, | |||||
1272 | &prot, &result_page, &top_page, | |||||
1273 | FALSE((boolean_t) 0), vm_fault_continue); | |||||
1274 | } else | |||||
1275 | { | |||||
1276 | kr = vm_fault_page(object, offset, fault_type, | |||||
1277 | (change_wiring && !wired), !change_wiring, | |||||
1278 | &prot, &result_page, &top_page, | |||||
1279 | FALSE((boolean_t) 0), (void (*)()) 0); | |||||
1280 | } | |||||
1281 | after_vm_fault_page: | |||||
1282 | ||||||
1283 | /* | |||||
1284 | * If we didn't succeed, lose the object reference immediately. | |||||
1285 | */ | |||||
1286 | ||||||
1287 | if (kr != VM_FAULT_SUCCESS0) | |||||
1288 | vm_object_deallocate(object); | |||||
1289 | ||||||
1290 | /* | |||||
1291 | * See why we failed, and take corrective action. | |||||
1292 | */ | |||||
1293 | ||||||
1294 | switch (kr) { | |||||
1295 | case VM_FAULT_SUCCESS0: | |||||
1296 | break; | |||||
1297 | case VM_FAULT_RETRY1: | |||||
1298 | goto RetryFault; | |||||
1299 | case VM_FAULT_INTERRUPTED2: | |||||
1300 | kr = KERN_SUCCESS0; | |||||
1301 | goto done; | |||||
1302 | case VM_FAULT_MEMORY_SHORTAGE3: | |||||
1303 | if (continuation != (void (*)()) 0) { | |||||
1304 | vm_fault_state_t *state = | |||||
1305 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1306 | ||||||
1307 | /* | |||||
1308 | * Save variables in case VM_PAGE_WAIT | |||||
1309 | * discards our kernel stack. | |||||
1310 | */ | |||||
1311 | ||||||
1312 | state->vmf_map = map; | |||||
1313 | state->vmf_vaddr = vaddr; | |||||
1314 | state->vmf_fault_type = fault_type; | |||||
1315 | state->vmf_change_wiring = change_wiring; | |||||
1316 | state->vmf_continuation = continuation; | |||||
1317 | state->vmf_object = VM_OBJECT_NULL((vm_object_t) 0); | |||||
1318 | ||||||
1319 | VM_PAGE_WAIT(vm_fault_continue)vm_page_wait(vm_fault_continue); | |||||
1320 | } else | |||||
1321 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); | |||||
1322 | goto RetryFault; | |||||
1323 | case VM_FAULT_FICTITIOUS_SHORTAGE4: | |||||
1324 | vm_page_more_fictitious(); | |||||
1325 | goto RetryFault; | |||||
1326 | case VM_FAULT_MEMORY_ERROR5: | |||||
1327 | kr = KERN_MEMORY_ERROR10; | |||||
1328 | goto done; | |||||
1329 | } | |||||
1330 | ||||||
1331 | m = result_page; | |||||
1332 | ||||||
1333 | assert((change_wiring && !wired) ?({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t ) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object )))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))" , "../vm/vm_fault.c", 1335); }) | |||||
1334 | (top_page == VM_PAGE_NULL) :({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t ) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object )))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))" , "../vm/vm_fault.c", 1335); }) | |||||
1335 | ((top_page == VM_PAGE_NULL) == (m->object == object)))({ if (!((change_wiring && !wired) ? (top_page == ((vm_page_t ) 0)) : ((top_page == ((vm_page_t) 0)) == (m->object == object )))) Assert("(change_wiring && !wired) ? (top_page == VM_PAGE_NULL) : ((top_page == VM_PAGE_NULL) == (m->object == object))" , "../vm/vm_fault.c", 1335); }); | |||||
1336 | ||||||
1337 | /* | |||||
1338 | * How to clean up the result of vm_fault_page. This | |||||
1339 | * happens whether the mapping is entered or not. | |||||
1340 | */ | |||||
1341 | ||||||
1342 | #define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); } \ | |||||
1343 | MACRO_BEGIN({ \ | |||||
1344 | vm_fault_cleanup(m->object, top_page); \ | |||||
1345 | vm_object_deallocate(object); \ | |||||
1346 | MACRO_END}) | |||||
1347 | ||||||
1348 | /* | |||||
1349 | * What to do with the resulting page from vm_fault_page | |||||
1350 | * if it doesn't get entered into the physical map: | |||||
1351 | */ | |||||
1352 | ||||||
1353 | #define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); } \ | |||||
1354 | MACRO_BEGIN({ \ | |||||
1355 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); \ | |||||
1356 | vm_page_lock_queues(); \ | |||||
1357 | if (!m->active && !m->inactive) \ | |||||
1358 | vm_page_activate(m); \ | |||||
1359 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); \ | |||||
1360 | MACRO_END}) | |||||
1361 | ||||||
1362 | /* | |||||
1363 | * We must verify that the maps have not changed | |||||
1364 | * since our last lookup. | |||||
1365 | */ | |||||
1366 | ||||||
1367 | old_copy_object = m->object->copy; | |||||
1368 | ||||||
1369 | vm_object_unlock(m->object)((void)(&(m->object)->Lock)); | |||||
1370 | while (!vm_map_verify(map, &version)) { | |||||
1371 | vm_object_t retry_object; | |||||
1372 | vm_offset_t retry_offset; | |||||
1373 | vm_prot_t retry_prot; | |||||
1374 | ||||||
1375 | /* | |||||
1376 | * To avoid trying to write_lock the map while another | |||||
1377 | * thread has it read_locked (in vm_map_pageable), we | |||||
1378 | * do not try for write permission. If the page is | |||||
1379 | * still writable, we will get write permission. If it | |||||
1380 | * is not, or has been marked needs_copy, we enter the | |||||
1381 | * mapping without write permission, and will merely | |||||
1382 | * take another fault. | |||||
1383 | */ | |||||
1384 | kr = vm_map_lookup(&map, vaddr, | |||||
1385 | fault_type & ~VM_PROT_WRITE((vm_prot_t) 0x02), &version, | |||||
1386 | &retry_object, &retry_offset, &retry_prot, | |||||
1387 | &wired); | |||||
1388 | ||||||
1389 | if (kr != KERN_SUCCESS0) { | |||||
1390 | vm_object_lock(m->object); | |||||
1391 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
1392 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; | |||||
1393 | goto done; | |||||
1394 | } | |||||
1395 | ||||||
1396 | vm_object_unlock(retry_object)((void)(&(retry_object)->Lock)); | |||||
1397 | vm_object_lock(m->object); | |||||
1398 | ||||||
1399 | if ((retry_object != object) || | |||||
1400 | (retry_offset != offset)) { | |||||
1401 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
1402 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; | |||||
1403 | goto RetryFault; | |||||
1404 | } | |||||
1405 | ||||||
1406 | /* | |||||
1407 | * Check whether the protection has changed or the object | |||||
1408 | * has been copied while we left the map unlocked. | |||||
1409 | */ | |||||
1410 | prot &= retry_prot; | |||||
1411 | vm_object_unlock(m->object)((void)(&(m->object)->Lock)); | |||||
1412 | } | |||||
1413 | vm_object_lock(m->object); | |||||
1414 | ||||||
1415 | /* | |||||
1416 | * If the copy object changed while the top-level object | |||||
1417 | * was unlocked, then we must take away write permission. | |||||
1418 | */ | |||||
1419 | ||||||
1420 | if (m->object->copy != old_copy_object) | |||||
1421 | prot &= ~VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
1422 | ||||||
1423 | /* | |||||
1424 | * If we want to wire down this page, but no longer have | |||||
1425 | * adequate permissions, we must start all over. | |||||
1426 | */ | |||||
1427 | ||||||
1428 | if (wired && (prot != fault_type)) { | |||||
1429 | vm_map_verify_done(map, &version)(lock_done(&(map)->lock)); | |||||
1430 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
1431 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; | |||||
1432 | goto RetryFault; | |||||
1433 | } | |||||
1434 | ||||||
1435 | /* | |||||
1436 | * It's critically important that a wired-down page be faulted | |||||
1437 | * only once in each map for which it is wired. | |||||
1438 | */ | |||||
1439 | ||||||
1440 | vm_object_unlock(m->object)((void)(&(m->object)->Lock)); | |||||
1441 | ||||||
1442 | /* | |||||
1443 | * Put this page into the physical map. | |||||
1444 | * We had to do the unlock above because pmap_enter | |||||
1445 | * may cause other faults. The page may be on | |||||
1446 | * the pageout queues. If the pageout daemon comes | |||||
1447 | * across the page, it will remove it from the queues. | |||||
1448 | */ | |||||
1449 | ||||||
1450 | PMAP_ENTER(map->pmap, vaddr, m, prot, wired)({ pmap_enter( (map->pmap), (vaddr), (m)->phys_addr, (prot ) & ~(m)->page_lock, (wired) ); }); | |||||
1451 | ||||||
1452 | /* | |||||
1453 | * If the page is not wired down and isn't already | |||||
1454 | * on a pageout queue, then put it where the | |||||
1455 | * pageout daemon can find it. | |||||
1456 | */ | |||||
1457 | vm_object_lock(m->object); | |||||
1458 | vm_page_lock_queues(); | |||||
1459 | if (change_wiring) { | |||||
1460 | if (wired) | |||||
1461 | vm_page_wire(m); | |||||
1462 | else | |||||
1463 | vm_page_unwire(m); | |||||
1464 | } else if (software_reference_bits) { | |||||
1465 | if (!m->active && !m->inactive) | |||||
1466 | vm_page_activate(m); | |||||
1467 | m->reference = TRUE((boolean_t) 1); | |||||
1468 | } else { | |||||
1469 | vm_page_activate(m); | |||||
1470 | } | |||||
1471 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
1472 | ||||||
1473 | /* | |||||
1474 | * Unlock everything, and return | |||||
1475 | */ | |||||
1476 | ||||||
1477 | vm_map_verify_done(map, &version)(lock_done(&(map)->lock)); | |||||
1478 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||||
1479 | kr = KERN_SUCCESS0; | |||||
1480 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; | |||||
1481 | ||||||
1482 | #undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); } | |||||
1483 | #undef RELEASE_PAGE | |||||
1484 | ||||||
1485 | done: | |||||
1486 | if (continuation != (void (*)()) 0) { | |||||
1487 | vm_fault_state_t *state = | |||||
1488 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
1489 | ||||||
1490 | kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state); | |||||
1491 | (*continuation)(kr); | |||||
1492 | /*NOTREACHED*/ | |||||
1493 | } | |||||
1494 | ||||||
1495 | return(kr); | |||||
1496 | } | |||||
1497 | ||||||
1498 | /* | |||||
1499 | * vm_fault_wire: | |||||
1500 | * | |||||
1501 | * Wire down a range of virtual addresses in a map. | |||||
1502 | */ | |||||
1503 | void vm_fault_wire(map, entry) | |||||
1504 | vm_map_t map; | |||||
1505 | vm_map_entry_t entry; | |||||
1506 | { | |||||
1507 | ||||||
1508 | vm_offset_t va; | |||||
1509 | pmap_t pmap; | |||||
1510 | vm_offset_t end_addr = entry->vme_endlinks.end; | |||||
1511 | ||||||
1512 | pmap = vm_map_pmap(map)((map)->pmap); | |||||
1513 | ||||||
1514 | /* | |||||
1515 | * Inform the physical mapping system that the | |||||
1516 | * range of addresses may not fault, so that | |||||
1517 | * page tables and such can be locked down as well. | |||||
1518 | */ | |||||
1519 | ||||||
1520 | pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, FALSE((boolean_t) 0)); | |||||
1521 | ||||||
1522 | /* | |||||
1523 | * We simulate a fault to get the page and enter it | |||||
1524 | * in the physical map. | |||||
1525 | */ | |||||
1526 | ||||||
1527 | for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) { | |||||
1528 | if (vm_fault_wire_fast(map, va, entry) != KERN_SUCCESS0) | |||||
1529 | (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1), | |||||
1530 | FALSE((boolean_t) 0), (void (*)()) 0); | |||||
1531 | } | |||||
1532 | } | |||||
1533 | ||||||
1534 | /* | |||||
1535 | * vm_fault_unwire: | |||||
1536 | * | |||||
1537 | * Unwire a range of virtual addresses in a map. | |||||
1538 | */ | |||||
1539 | void vm_fault_unwire(map, entry) | |||||
1540 | vm_map_t map; | |||||
1541 | vm_map_entry_t entry; | |||||
1542 | { | |||||
1543 | vm_offset_t va; | |||||
1544 | pmap_t pmap; | |||||
1545 | vm_offset_t end_addr = entry->vme_endlinks.end; | |||||
1546 | vm_object_t object; | |||||
1547 | ||||||
1548 | pmap = vm_map_pmap(map)((map)->pmap); | |||||
1549 | ||||||
1550 | object = (entry->is_sub_map) | |||||
1551 | ? VM_OBJECT_NULL((vm_object_t) 0) : entry->object.vm_object; | |||||
1552 | ||||||
1553 | /* | |||||
1554 | * Since the pages are wired down, we must be able to | |||||
1555 | * get their mappings from the physical map system. | |||||
1556 | */ | |||||
1557 | ||||||
1558 | for (va = entry->vme_startlinks.start; va < end_addr; va += PAGE_SIZE(1 << 12)) { | |||||
1559 | pmap_change_wiring(pmap, va, FALSE((boolean_t) 0)); | |||||
1560 | ||||||
1561 | if (object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
1562 | vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock); | |||||
1563 | (void) vm_fault(map, va, VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1), | |||||
1564 | FALSE((boolean_t) 0), (void (*)()) 0); | |||||
1565 | vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock); | |||||
1566 | } else { | |||||
1567 | vm_prot_t prot; | |||||
1568 | vm_page_t result_page; | |||||
1569 | vm_page_t top_page; | |||||
1570 | vm_fault_return_t result; | |||||
1571 | ||||||
1572 | do { | |||||
1573 | prot = VM_PROT_NONE((vm_prot_t) 0x00); | |||||
1574 | ||||||
1575 | vm_object_lock(object); | |||||
1576 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
1577 | result = vm_fault_page(object, | |||||
1578 | entry->offset + | |||||
1579 | (va - entry->vme_startlinks.start), | |||||
1580 | VM_PROT_NONE((vm_prot_t) 0x00), TRUE((boolean_t) 1), | |||||
1581 | FALSE((boolean_t) 0), &prot, | |||||
1582 | &result_page, | |||||
1583 | &top_page, | |||||
1584 | FALSE((boolean_t) 0), (void (*)()) 0); | |||||
1585 | } while (result == VM_FAULT_RETRY1); | |||||
1586 | ||||||
1587 | if (result != VM_FAULT_SUCCESS0) | |||||
1588 | panic("vm_fault_unwire: failure"); | |||||
1589 | ||||||
1590 | vm_page_lock_queues(); | |||||
1591 | vm_page_unwire(result_page); | |||||
1592 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
1593 | PAGE_WAKEUP_DONE(result_page)({ (result_page)->busy = ((boolean_t) 0); if ((result_page )->wanted) { (result_page)->wanted = ((boolean_t) 0); thread_wakeup_prim ((((event_t) result_page)), ((boolean_t) 0), 0); } }); | |||||
1594 | ||||||
1595 | vm_fault_cleanup(result_page->object, top_page); | |||||
1596 | } | |||||
1597 | } | |||||
1598 | ||||||
1599 | /* | |||||
1600 | * Inform the physical mapping system that the range | |||||
1601 | * of addresses may fault, so that page tables and | |||||
1602 | * such may be unwired themselves. | |||||
1603 | */ | |||||
1604 | ||||||
1605 | pmap_pageable(pmap, entry->vme_startlinks.start, end_addr, TRUE((boolean_t) 1)); | |||||
1606 | } | |||||
1607 | ||||||
1608 | /* | |||||
1609 | * vm_fault_wire_fast: | |||||
1610 | * | |||||
1611 | * Handle common case of a wire down page fault at the given address. | |||||
1612 | * If successful, the page is inserted into the associated physical map. | |||||
1613 | * The map entry is passed in to avoid the overhead of a map lookup. | |||||
1614 | * | |||||
1615 | * NOTE: the given address should be truncated to the | |||||
1616 | * proper page address. | |||||
1617 | * | |||||
1618 | * KERN_SUCCESS is returned if the page fault is handled; otherwise, | |||||
1619 | * a standard error specifying why the fault is fatal is returned. | |||||
1620 | * | |||||
1621 | * The map in question must be referenced, and remains so. | |||||
1622 | * Caller has a read lock on the map. | |||||
1623 | * | |||||
1624 | * This is a stripped version of vm_fault() for wiring pages. Anything | |||||
1625 | * other than the common case will return KERN_FAILURE, and the caller | |||||
1626 | * is expected to call vm_fault(). | |||||
1627 | */ | |||||
1628 | kern_return_t vm_fault_wire_fast(map, va, entry) | |||||
1629 | vm_map_t map; | |||||
1630 | vm_offset_t va; | |||||
1631 | vm_map_entry_t entry; | |||||
1632 | { | |||||
1633 | vm_object_t object; | |||||
1634 | vm_offset_t offset; | |||||
1635 | vm_page_t m; | |||||
1636 | vm_prot_t prot; | |||||
1637 | ||||||
1638 | vm_stat.faults++; /* needs lock XXX */ | |||||
1639 | current_task()((active_threads[(0)])->task)->faults++; | |||||
1640 | /* | |||||
1641 | * Recovery actions | |||||
1642 | */ | |||||
1643 | ||||||
1644 | #undef RELEASE_PAGE | |||||
1645 | #define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); } { \ | |||||
1646 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); \ | |||||
1647 | vm_page_lock_queues(); \ | |||||
1648 | vm_page_unwire(m); \ | |||||
1649 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); \ | |||||
1650 | } | |||||
1651 | ||||||
1652 | ||||||
1653 | #undef UNLOCK_THINGS{ object->paging_in_progress--; ((void)(&(object)-> Lock)); } | |||||
1654 | #define UNLOCK_THINGS{ object->paging_in_progress--; ((void)(&(object)-> Lock)); } { \ | |||||
1655 | object->paging_in_progress--; \ | |||||
1656 | vm_object_unlock(object)((void)(&(object)->Lock)); \ | |||||
1657 | } | |||||
1658 | ||||||
1659 | #undef UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); } | |||||
1660 | #define UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); } { \ | |||||
1661 | UNLOCK_THINGS{ object->paging_in_progress--; ((void)(&(object)-> Lock)); }; \ | |||||
1662 | vm_object_deallocate(object); \ | |||||
1663 | } | |||||
1664 | /* | |||||
1665 | * Give up and have caller do things the hard way. | |||||
1666 | */ | |||||
1667 | ||||||
1668 | #define GIVE_UP{ { { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; return(5); } { \ | |||||
1669 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; \ | |||||
1670 | return(KERN_FAILURE5); \ | |||||
1671 | } | |||||
1672 | ||||||
1673 | ||||||
1674 | /* | |||||
1675 | * If this entry is not directly to a vm_object, bail out. | |||||
1676 | */ | |||||
1677 | if (entry->is_sub_map) | |||||
1678 | return(KERN_FAILURE5); | |||||
1679 | ||||||
1680 | /* | |||||
1681 | * Find the backing store object and offset into it. | |||||
1682 | */ | |||||
1683 | ||||||
1684 | object = entry->object.vm_object; | |||||
1685 | offset = (va - entry->vme_startlinks.start) + entry->offset; | |||||
1686 | prot = entry->protection; | |||||
1687 | ||||||
1688 | /* | |||||
1689 | * Make a reference to this object to prevent its | |||||
1690 | * disposal while we are messing with it. | |||||
1691 | */ | |||||
1692 | ||||||
1693 | vm_object_lock(object); | |||||
1694 | assert(object->ref_count > 0)({ if (!(object->ref_count > 0)) Assert("object->ref_count > 0" , "../vm/vm_fault.c", 1694); }); | |||||
1695 | object->ref_count++; | |||||
1696 | object->paging_in_progress++; | |||||
1697 | ||||||
1698 | /* | |||||
1699 | * INVARIANTS (through entire routine): | |||||
1700 | * | |||||
1701 | * 1) At all times, we must either have the object | |||||
1702 | * lock or a busy page in some object to prevent | |||||
1703 | * some other thread from trying to bring in | |||||
1704 | * the same page. | |||||
1705 | * | |||||
1706 | * 2) Once we have a busy page, we must remove it from | |||||
1707 | * the pageout queues, so that the pageout daemon | |||||
1708 | * will not grab it away. | |||||
1709 | * | |||||
1710 | */ | |||||
1711 | ||||||
1712 | /* | |||||
1713 | * Look for page in top-level object. If it's not there or | |||||
1714 | * there's something going on, give up. | |||||
1715 | */ | |||||
1716 | m = vm_page_lookup(object, offset); | |||||
1717 | if ((m == VM_PAGE_NULL((vm_page_t) 0)) || (m->error) || | |||||
1718 | (m->busy) || (m->absent) || (prot & m->page_lock)) { | |||||
1719 | GIVE_UP{ { { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; return(5); }; | |||||
1720 | } | |||||
1721 | ||||||
1722 | /* | |||||
1723 | * Wire the page down now. All bail outs beyond this | |||||
1724 | * point must unwire the page. | |||||
1725 | */ | |||||
1726 | ||||||
1727 | vm_page_lock_queues(); | |||||
1728 | vm_page_wire(m); | |||||
1729 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
1730 | ||||||
1731 | /* | |||||
1732 | * Mark page busy for other threads. | |||||
1733 | */ | |||||
1734 | assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c" , 1734); }); | |||||
1735 | m->busy = TRUE((boolean_t) 1); | |||||
1736 | assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c" , 1736); }); | |||||
1737 | ||||||
1738 | /* | |||||
1739 | * Give up if the page is being written and there's a copy object | |||||
1740 | */ | |||||
1741 | if ((object->copy != VM_OBJECT_NULL((vm_object_t) 0)) && (prot & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||||
1742 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ((void )(&vm_page_queue_lock)); }; | |||||
1743 | GIVE_UP{ { { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; return(5); }; | |||||
1744 | } | |||||
1745 | ||||||
1746 | /* | |||||
1747 | * Put this page into the physical map. | |||||
1748 | * We have to unlock the object because pmap_enter | |||||
1749 | * may cause other faults. | |||||
1750 | */ | |||||
1751 | vm_object_unlock(object)((void)(&(object)->Lock)); | |||||
1752 | ||||||
1753 | PMAP_ENTER(map->pmap, va, m, prot, TRUE)({ pmap_enter( (map->pmap), (va), (m)->phys_addr, (prot ) & ~(m)->page_lock, (((boolean_t) 1)) ); }); | |||||
1754 | ||||||
1755 | /* | |||||
1756 | * Must relock object so that paging_in_progress can be cleared. | |||||
1757 | */ | |||||
1758 | vm_object_lock(object); | |||||
1759 | ||||||
1760 | /* | |||||
1761 | * Unlock everything, and return | |||||
1762 | */ | |||||
1763 | ||||||
1764 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||||
1765 | UNLOCK_AND_DEALLOCATE{ { object->paging_in_progress--; ((void)(&(object)-> Lock)); }; vm_object_deallocate(object); }; | |||||
1766 | ||||||
1767 | return(KERN_SUCCESS0); | |||||
1768 | ||||||
1769 | } | |||||
1770 | ||||||
1771 | /* | |||||
1772 | * Routine: vm_fault_copy_cleanup | |||||
1773 | * Purpose: | |||||
1774 | * Release a page used by vm_fault_copy. | |||||
1775 | */ | |||||
1776 | ||||||
1777 | void vm_fault_copy_cleanup(page, top_page) | |||||
1778 | vm_page_t page; | |||||
1779 | vm_page_t top_page; | |||||
1780 | { | |||||
1781 | vm_object_t object = page->object; | |||||
1782 | ||||||
1783 | vm_object_lock(object); | |||||
1784 | PAGE_WAKEUP_DONE(page)({ (page)->busy = ((boolean_t) 0); if ((page)->wanted) { (page)->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) page)), ((boolean_t) 0), 0); } }); | |||||
1785 | vm_page_lock_queues(); | |||||
1786 | if (!page->active && !page->inactive) | |||||
1787 | vm_page_activate(page); | |||||
1788 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); | |||||
1789 | vm_fault_cleanup(object, top_page); | |||||
1790 | } | |||||
1791 | ||||||
1792 | /* | |||||
1793 | * Routine: vm_fault_copy | |||||
1794 | * | |||||
1795 | * Purpose: | |||||
1796 | * Copy pages from one virtual memory object to another -- | |||||
1797 | * neither the source nor destination pages need be resident. | |||||
1798 | * | |||||
1799 | * Before actually copying a page, the version associated with | |||||
1800 | * the destination address map wil be verified. | |||||
1801 | * | |||||
1802 | * In/out conditions: | |||||
1803 | * The caller must hold a reference, but not a lock, to | |||||
1804 | * each of the source and destination objects and to the | |||||
1805 | * destination map. | |||||
1806 | * | |||||
1807 | * Results: | |||||
1808 | * Returns KERN_SUCCESS if no errors were encountered in | |||||
1809 | * reading or writing the data. Returns KERN_INTERRUPTED if | |||||
1810 | * the operation was interrupted (only possible if the | |||||
1811 | * "interruptible" argument is asserted). Other return values | |||||
1812 | * indicate a permanent error in copying the data. | |||||
1813 | * | |||||
1814 | * The actual amount of data copied will be returned in the | |||||
1815 | * "copy_size" argument. In the event that the destination map | |||||
1816 | * verification failed, this amount may be less than the amount | |||||
1817 | * requested. | |||||
1818 | */ | |||||
1819 | kern_return_t vm_fault_copy( | |||||
1820 | src_object, | |||||
1821 | src_offset, | |||||
1822 | src_size, | |||||
1823 | dst_object, | |||||
1824 | dst_offset, | |||||
1825 | dst_map, | |||||
1826 | dst_version, | |||||
1827 | interruptible | |||||
1828 | ) | |||||
1829 | vm_object_t src_object; | |||||
1830 | vm_offset_t src_offset; | |||||
1831 | vm_size_t *src_size; /* INOUT */ | |||||
1832 | vm_object_t dst_object; | |||||
1833 | vm_offset_t dst_offset; | |||||
1834 | vm_map_t dst_map; | |||||
1835 | vm_map_version_t *dst_version; | |||||
1836 | boolean_t interruptible; | |||||
1837 | { | |||||
1838 | vm_page_t result_page; | |||||
1839 | vm_prot_t prot; | |||||
1840 | ||||||
1841 | vm_page_t src_page; | |||||
1842 | vm_page_t src_top_page; | |||||
1843 | ||||||
1844 | vm_page_t dst_page; | |||||
1845 | vm_page_t dst_top_page; | |||||
1846 | ||||||
1847 | vm_size_t amount_done; | |||||
1848 | vm_object_t old_copy_object; | |||||
1849 | ||||||
1850 | #define RETURN(x) \ | |||||
1851 | MACRO_BEGIN({ \ | |||||
1852 | *src_size = amount_done; \ | |||||
1853 | MACRO_RETURNif (((boolean_t) 1)) return(x); \ | |||||
1854 | MACRO_END}) | |||||
1855 | ||||||
1856 | amount_done = 0; | |||||
1857 | do { /* while (amount_done != *src_size) */ | |||||
1858 | ||||||
1859 | RetrySourceFault: ; | |||||
1860 | ||||||
1861 | if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
1862 | /* | |||||
1863 | * No source object. We will just | |||||
1864 | * zero-fill the page in dst_object. | |||||
1865 | */ | |||||
1866 | ||||||
1867 | src_page = VM_PAGE_NULL((vm_page_t) 0); | |||||
1868 | } else { | |||||
1869 | prot = VM_PROT_READ((vm_prot_t) 0x01); | |||||
1870 | ||||||
1871 | vm_object_lock(src_object); | |||||
1872 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); | |||||
1873 | ||||||
1874 | switch (vm_fault_page(src_object, src_offset, | |||||
1875 | VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), interruptible, | |||||
1876 | &prot, &result_page, &src_top_page, | |||||
1877 | FALSE((boolean_t) 0), (void (*)()) 0)) { | |||||
1878 | ||||||
1879 | case VM_FAULT_SUCCESS0: | |||||
1880 | break; | |||||
1881 | case VM_FAULT_RETRY1: | |||||
1882 | goto RetrySourceFault; | |||||
1883 | case VM_FAULT_INTERRUPTED2: | |||||
1884 | RETURN(MACH_SEND_INTERRUPTED0x10000007); | |||||
1885 | case VM_FAULT_MEMORY_SHORTAGE3: | |||||
1886 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); | |||||
1887 | goto RetrySourceFault; | |||||
1888 | case VM_FAULT_FICTITIOUS_SHORTAGE4: | |||||
1889 | vm_page_more_fictitious(); | |||||
1890 | goto RetrySourceFault; | |||||
1891 | case VM_FAULT_MEMORY_ERROR5: | |||||
1892 | return(KERN_MEMORY_ERROR10); | |||||
1893 | } | |||||
1894 | ||||||
1895 | src_page = result_page; | |||||
1896 | ||||||
1897 | assert((src_top_page == VM_PAGE_NULL) ==({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object == src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)" , "../vm/vm_fault.c", 1898); }) | |||||
1898 | (src_page->object == src_object))({ if (!((src_top_page == ((vm_page_t) 0)) == (src_page->object == src_object))) Assert("(src_top_page == VM_PAGE_NULL) == (src_page->object == src_object)" , "../vm/vm_fault.c", 1898); }); | |||||
1899 | ||||||
1900 | assert ((prot & VM_PROT_READ) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x01)) != ((vm_prot_t) 0x00 ))) Assert("(prot & VM_PROT_READ) != VM_PROT_NONE", "../vm/vm_fault.c" , 1900); }); | |||||
1901 | ||||||
1902 | vm_object_unlock(src_page->object)((void)(&(src_page->object)->Lock)); | |||||
1903 | } | |||||
1904 | ||||||
1905 | RetryDestinationFault: ; | |||||
1906 | ||||||
1907 | prot = VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
1908 | ||||||
1909 | vm_object_lock(dst_object); | |||||
1910 | vm_object_paging_begin(dst_object)((dst_object)->paging_in_progress++); | |||||
1911 | ||||||
1912 | switch (vm_fault_page(dst_object, dst_offset, VM_PROT_WRITE((vm_prot_t) 0x02), | |||||
1913 | FALSE((boolean_t) 0), FALSE((boolean_t) 0) /* interruptible */, | |||||
1914 | &prot, &result_page, &dst_top_page, | |||||
1915 | FALSE((boolean_t) 0), (void (*)()) 0)) { | |||||
1916 | ||||||
1917 | case VM_FAULT_SUCCESS0: | |||||
1918 | break; | |||||
1919 | case VM_FAULT_RETRY1: | |||||
1920 | goto RetryDestinationFault; | |||||
1921 | case VM_FAULT_INTERRUPTED2: | |||||
1922 | if (src_page != VM_PAGE_NULL((vm_page_t) 0)) | |||||
1923 | vm_fault_copy_cleanup(src_page, | |||||
1924 | src_top_page); | |||||
1925 | RETURN(MACH_SEND_INTERRUPTED0x10000007); | |||||
1926 | case VM_FAULT_MEMORY_SHORTAGE3: | |||||
1927 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); | |||||
1928 | goto RetryDestinationFault; | |||||
1929 | case VM_FAULT_FICTITIOUS_SHORTAGE4: | |||||
1930 | vm_page_more_fictitious(); | |||||
1931 | goto RetryDestinationFault; | |||||
1932 | case VM_FAULT_MEMORY_ERROR5: | |||||
1933 | if (src_page != VM_PAGE_NULL((vm_page_t) 0)) | |||||
1934 | vm_fault_copy_cleanup(src_page, | |||||
1935 | src_top_page); | |||||
1936 | return(KERN_MEMORY_ERROR10); | |||||
1937 | } | |||||
1938 | assert ((prot & VM_PROT_WRITE) != VM_PROT_NONE)({ if (!((prot & ((vm_prot_t) 0x02)) != ((vm_prot_t) 0x00 ))) Assert("(prot & VM_PROT_WRITE) != VM_PROT_NONE", "../vm/vm_fault.c" , 1938); }); | |||||
1939 | ||||||
1940 | dst_page = result_page; | |||||
1941 | ||||||
1942 | old_copy_object = dst_page->object->copy; | |||||
1943 | ||||||
1944 | vm_object_unlock(dst_page->object)((void)(&(dst_page->object)->Lock)); | |||||
1945 | ||||||
1946 | if (!vm_map_verify(dst_map, dst_version)) { | |||||
1947 | ||||||
1948 | BailOut: ; | |||||
1949 | ||||||
1950 | if (src_page != VM_PAGE_NULL((vm_page_t) 0)) | |||||
1951 | vm_fault_copy_cleanup(src_page, src_top_page); | |||||
1952 | vm_fault_copy_cleanup(dst_page, dst_top_page); | |||||
1953 | break; | |||||
1954 | } | |||||
1955 | ||||||
1956 | ||||||
1957 | vm_object_lock(dst_page->object); | |||||
1958 | if (dst_page->object->copy != old_copy_object) { | |||||
1959 | vm_object_unlock(dst_page->object)((void)(&(dst_page->object)->Lock)); | |||||
1960 | vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock)); | |||||
1961 | goto BailOut; | |||||
1962 | } | |||||
1963 | vm_object_unlock(dst_page->object)((void)(&(dst_page->object)->Lock)); | |||||
1964 | ||||||
1965 | /* | |||||
1966 | * Copy the page, and note that it is dirty | |||||
1967 | * immediately. | |||||
1968 | */ | |||||
1969 | ||||||
1970 | if (src_page == VM_PAGE_NULL((vm_page_t) 0)) | |||||
1971 | vm_page_zero_fill(dst_page); | |||||
1972 | else | |||||
1973 | vm_page_copy(src_page, dst_page); | |||||
1974 | dst_page->dirty = TRUE((boolean_t) 1); | |||||
1975 | ||||||
1976 | /* | |||||
1977 | * Unlock everything, and return | |||||
1978 | */ | |||||
1979 | ||||||
1980 | vm_map_verify_done(dst_map, dst_version)(lock_done(&(dst_map)->lock)); | |||||
1981 | ||||||
1982 | if (src_page != VM_PAGE_NULL((vm_page_t) 0)) | |||||
1983 | vm_fault_copy_cleanup(src_page, src_top_page); | |||||
1984 | vm_fault_copy_cleanup(dst_page, dst_top_page); | |||||
1985 | ||||||
1986 | amount_done += PAGE_SIZE(1 << 12); | |||||
1987 | src_offset += PAGE_SIZE(1 << 12); | |||||
1988 | dst_offset += PAGE_SIZE(1 << 12); | |||||
1989 | ||||||
1990 | } while (amount_done != *src_size); | |||||
1991 | ||||||
1992 | RETURN(KERN_SUCCESS0); | |||||
1993 | #undef RETURN | |||||
1994 | ||||||
1995 | /*NOTREACHED*/ | |||||
1996 | } | |||||
1997 | ||||||
1998 | ||||||
1999 | ||||||
2000 | ||||||
2001 | ||||||
2002 | #ifdef notdef | |||||
2003 | ||||||
2004 | /* | |||||
2005 | * Routine: vm_fault_page_overwrite | |||||
2006 | * | |||||
2007 | * Description: | |||||
2008 | * A form of vm_fault_page that assumes that the | |||||
2009 | * resulting page will be overwritten in its entirety, | |||||
2010 | * making it unnecessary to obtain the correct *contents* | |||||
2011 | * of the page. | |||||
2012 | * | |||||
2013 | * Implementation: | |||||
2014 | * XXX Untested. Also unused. Eventually, this technology | |||||
2015 | * could be used in vm_fault_copy() to advantage. | |||||
2016 | */ | |||||
2017 | vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page) | |||||
2018 | vm_object_t dst_object; | |||||
2019 | vm_offset_t dst_offset; | |||||
2020 | vm_page_t *result_page; /* OUT */ | |||||
2021 | { | |||||
2022 | vm_page_t dst_page; | |||||
2023 | ||||||
2024 | #define interruptible FALSE((boolean_t) 0) /* XXX */ | |||||
2025 | ||||||
2026 | while (TRUE((boolean_t) 1)) { | |||||
2027 | /* | |||||
2028 | * Look for a page at this offset | |||||
2029 | */ | |||||
2030 | ||||||
2031 | while ((dst_page = vm_page_lookup(dst_object, dst_offset)) | |||||
2032 | == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
2033 | /* | |||||
2034 | * No page, no problem... just allocate one. | |||||
2035 | */ | |||||
2036 | ||||||
2037 | dst_page = vm_page_alloc(dst_object, dst_offset); | |||||
2038 | if (dst_page == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
2039 | vm_object_unlock(dst_object)((void)(&(dst_object)->Lock)); | |||||
2040 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); | |||||
2041 | vm_object_lock(dst_object); | |||||
2042 | continue; | |||||
2043 | } | |||||
2044 | ||||||
2045 | /* | |||||
2046 | * Pretend that the memory manager | |||||
2047 | * write-protected the page. | |||||
2048 | * | |||||
2049 | * Note that we will be asking for write | |||||
2050 | * permission without asking for the data | |||||
2051 | * first. | |||||
2052 | */ | |||||
2053 | ||||||
2054 | dst_page->overwriting = TRUE((boolean_t) 1); | |||||
2055 | dst_page->page_lock = VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
2056 | dst_page->absent = TRUE((boolean_t) 1); | |||||
2057 | dst_object->absent_count++; | |||||
2058 | ||||||
2059 | break; | |||||
2060 | ||||||
2061 | /* | |||||
2062 | * When we bail out, we might have to throw | |||||
2063 | * away the page created here. | |||||
2064 | */ | |||||
2065 | ||||||
2066 | #define DISCARD_PAGE \ | |||||
2067 | MACRO_BEGIN({ \ | |||||
2068 | vm_object_lock(dst_object); \ | |||||
2069 | dst_page = vm_page_lookup(dst_object, dst_offset); \ | |||||
2070 | if ((dst_page != VM_PAGE_NULL((vm_page_t) 0)) && dst_page->overwriting) \ | |||||
2071 | VM_PAGE_FREE(dst_page)({ ; vm_page_free(dst_page); ((void)(&vm_page_queue_lock) ); }); \ | |||||
2072 | vm_object_unlock(dst_object)((void)(&(dst_object)->Lock)); \ | |||||
2073 | MACRO_END}) | |||||
2074 | } | |||||
2075 | ||||||
2076 | /* | |||||
2077 | * If the page is write-protected... | |||||
2078 | */ | |||||
2079 | ||||||
2080 | if (dst_page->page_lock & VM_PROT_WRITE((vm_prot_t) 0x02)) { | |||||
2081 | /* | |||||
2082 | * ... and an unlock request hasn't been sent | |||||
2083 | */ | |||||
2084 | ||||||
2085 | if ( ! (dst_page->unlock_request & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||||
2086 | vm_prot_t u; | |||||
2087 | kern_return_t rc; | |||||
2088 | ||||||
2089 | /* | |||||
2090 | * ... then send one now. | |||||
2091 | */ | |||||
2092 | ||||||
2093 | if (!dst_object->pager_ready) { | |||||
2094 | vm_object_assert_wait(dst_object,({ (dst_object)->all_wanted |= 1 << (1); assert_wait ((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible) ); }) | |||||
2095 | VM_OBJECT_EVENT_PAGER_READY,({ (dst_object)->all_wanted |= 1 << (1); assert_wait ((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible) ); }) | |||||
2096 | interruptible)({ (dst_object)->all_wanted |= 1 << (1); assert_wait ((event_t)(((vm_offset_t) dst_object) + (1)), (interruptible) ); }); | |||||
2097 | vm_object_unlock(dst_object)((void)(&(dst_object)->Lock)); | |||||
2098 | thread_block((void (*)()) 0); | |||||
2099 | if (current_thread()(active_threads[(0)])->wait_result != | |||||
2100 | THREAD_AWAKENED0) { | |||||
2101 | DISCARD_PAGE; | |||||
2102 | return(VM_FAULT_INTERRUPTED2); | |||||
2103 | } | |||||
2104 | continue; | |||||
2105 | } | |||||
2106 | ||||||
2107 | u = dst_page->unlock_request |= VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
2108 | vm_object_unlock(dst_object)((void)(&(dst_object)->Lock)); | |||||
2109 | ||||||
2110 | if ((rc = memory_object_data_unlock( | |||||
2111 | dst_object->pager, | |||||
2112 | dst_object->pager_request, | |||||
2113 | dst_offset + dst_object->paging_offset, | |||||
2114 | PAGE_SIZE(1 << 12), | |||||
2115 | u)) != KERN_SUCCESS0) { | |||||
2116 | printf("vm_object_overwrite: memory_object_data_unlock failed\n"); | |||||
2117 | DISCARD_PAGE; | |||||
2118 | return((rc == MACH_SEND_INTERRUPTED0x10000007) ? | |||||
2119 | VM_FAULT_INTERRUPTED2 : | |||||
2120 | VM_FAULT_MEMORY_ERROR5); | |||||
2121 | } | |||||
2122 | vm_object_lock(dst_object); | |||||
2123 | continue; | |||||
2124 | } | |||||
2125 | ||||||
2126 | /* ... fall through to wait below */ | |||||
2127 | } else { | |||||
2128 | /* | |||||
2129 | * If the page isn't being used for other | |||||
2130 | * purposes, then we're done. | |||||
2131 | */ | |||||
2132 | if ( ! (dst_page->busy || dst_page->absent || dst_page->error) ) | |||||
2133 | break; | |||||
2134 | } | |||||
2135 | ||||||
2136 | PAGE_ASSERT_WAIT(dst_page, interruptible)({ (dst_page)->wanted = ((boolean_t) 1); assert_wait((event_t ) (dst_page), (interruptible)); }); | |||||
2137 | vm_object_unlock(dst_object)((void)(&(dst_object)->Lock)); | |||||
2138 | thread_block((void (*)()) 0); | |||||
2139 | if (current_thread()(active_threads[(0)])->wait_result != THREAD_AWAKENED0) { | |||||
2140 | DISCARD_PAGE; | |||||
2141 | return(VM_FAULT_INTERRUPTED2); | |||||
2142 | } | |||||
2143 | } | |||||
2144 | ||||||
2145 | *result_page = dst_page; | |||||
2146 | return(VM_FAULT_SUCCESS0); | |||||
2147 | ||||||
2148 | #undef interruptible | |||||
2149 | #undef DISCARD_PAGE | |||||
2150 | } | |||||
2151 | ||||||
2152 | #endif /* notdef */ |