| File: | obj-scan-build/../vm/vm_fault.c |
| Location: | line 252, column 2 |
| Description: | Access to field 'task' results in a dereference of a null pointer |
| 1 | /* | |||||
| 2 | * Mach Operating System | |||||
| 3 | * Copyright (c) 1994,1990,1989,1988,1987 Carnegie Mellon University. | |||||
| 4 | * Copyright (c) 1993,1994 The University of Utah and | |||||
| 5 | * the Computer Systems Laboratory (CSL). | |||||
| 6 | * All rights reserved. | |||||
| 7 | * | |||||
| 8 | * Permission to use, copy, modify and distribute this software and its | |||||
| 9 | * documentation is hereby granted, provided that both the copyright | |||||
| 10 | * notice and this permission notice appear in all copies of the | |||||
| 11 | * software, derivative works or modified versions, and any portions | |||||
| 12 | * thereof, and that both notices appear in supporting documentation. | |||||
| 13 | * | |||||
| 14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF | |||||
| 15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY | |||||
| 16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF | |||||
| 17 | * THIS SOFTWARE. | |||||
| 18 | * | |||||
| 19 | * Carnegie Mellon requests users of this software to return to | |||||
| 20 | * | |||||
| 21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||||
| 22 | * School of Computer Science | |||||
| 23 | * Carnegie Mellon University | |||||
| 24 | * Pittsburgh PA 15213-3890 | |||||
| 25 | * | |||||
| 26 | * any improvements or extensions that they make and grant Carnegie Mellon | |||||
| 27 | * the rights to redistribute these changes. | |||||
| 28 | */ | |||||
| 29 | /* | |||||
| 30 | * File: vm_fault.c | |||||
| 31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |||||
| 32 | * | |||||
| 33 | * Page fault handling module. | |||||
| 34 | */ | |||||
| 35 | ||||||
| 36 | #include <kern/printf.h> | |||||
| 37 | #include <vm/vm_fault.h> | |||||
| 38 | #include <mach/kern_return.h> | |||||
| 39 | #include <mach/message.h> /* for error codes */ | |||||
| 40 | #include <kern/counters.h> | |||||
| 41 | #include <kern/debug.h> | |||||
| 42 | #include <kern/thread.h> | |||||
| 43 | #include <kern/sched_prim.h> | |||||
| 44 | #include <vm/vm_map.h> | |||||
| 45 | #include <vm/vm_object.h> | |||||
| 46 | #include <vm/vm_page.h> | |||||
| 47 | #include <vm/pmap.h> | |||||
| 48 | #include <mach/vm_statistics.h> | |||||
| 49 | #include <vm/vm_pageout.h> | |||||
| 50 | #include <mach/vm_param.h> | |||||
| 51 | #include <mach/memory_object.h> | |||||
| 52 | #include <vm/memory_object_user.user.h> | |||||
| 53 | /* For memory_object_data_{request,unlock} */ | |||||
| 54 | #include <kern/macro_help.h> | |||||
| 55 | #include <kern/slab.h> | |||||
| 56 | ||||||
| 57 | #if MACH_PCSAMPLE1 | |||||
| 58 | #include <kern/pc_sample.h> | |||||
| 59 | #endif | |||||
| 60 | ||||||
| 61 | ||||||
| 62 | ||||||
| 63 | /* | |||||
| 64 | * State needed by vm_fault_continue. | |||||
| 65 | * This is a little hefty to drop directly | |||||
| 66 | * into the thread structure. | |||||
| 67 | */ | |||||
| 68 | typedef struct vm_fault_state { | |||||
| 69 | struct vm_map *vmf_map; | |||||
| 70 | vm_offset_t vmf_vaddr; | |||||
| 71 | vm_prot_t vmf_fault_type; | |||||
| 72 | boolean_t vmf_change_wiring; | |||||
| 73 | void (*vmf_continuation)(); | |||||
| 74 | vm_map_version_t vmf_version; | |||||
| 75 | boolean_t vmf_wired; | |||||
| 76 | struct vm_object *vmf_object; | |||||
| 77 | vm_offset_t vmf_offset; | |||||
| 78 | vm_prot_t vmf_prot; | |||||
| 79 | ||||||
| 80 | boolean_t vmfp_backoff; | |||||
| 81 | struct vm_object *vmfp_object; | |||||
| 82 | vm_offset_t vmfp_offset; | |||||
| 83 | struct vm_page *vmfp_first_m; | |||||
| 84 | vm_prot_t vmfp_access; | |||||
| 85 | } vm_fault_state_t; | |||||
| 86 | ||||||
| 87 | struct kmem_cache vm_fault_state_cache; | |||||
| 88 | ||||||
| 89 | int vm_object_absent_max = 50; | |||||
| 90 | ||||||
| 91 | boolean_t vm_fault_dirty_handling = FALSE((boolean_t) 0); | |||||
| 92 | boolean_t vm_fault_interruptible = TRUE((boolean_t) 1); | |||||
| 93 | ||||||
| 94 | boolean_t software_reference_bits = TRUE((boolean_t) 1); | |||||
| 95 | ||||||
| 96 | #if MACH_KDB0 | |||||
| 97 | extern struct db_watchpoint *db_watchpoint_list; | |||||
| 98 | #endif /* MACH_KDB */ | |||||
| 99 | ||||||
| 100 | /* | |||||
| 101 | * Routine: vm_fault_init | |||||
| 102 | * Purpose: | |||||
| 103 | * Initialize our private data structures. | |||||
| 104 | */ | |||||
| 105 | void vm_fault_init(void) | |||||
| 106 | { | |||||
| 107 | kmem_cache_init(&vm_fault_state_cache, "vm_fault_state", | |||||
| 108 | sizeof(vm_fault_state_t), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||||
| 109 | } | |||||
| 110 | ||||||
| 111 | /* | |||||
| 112 | * Routine: vm_fault_cleanup | |||||
| 113 | * Purpose: | |||||
| 114 | * Clean up the result of vm_fault_page. | |||||
| 115 | * Results: | |||||
| 116 | * The paging reference for "object" is released. | |||||
| 117 | * "object" is unlocked. | |||||
| 118 | * If "top_page" is not null, "top_page" is | |||||
| 119 | * freed and the paging reference for the object | |||||
| 120 | * containing it is released. | |||||
| 121 | * | |||||
| 122 | * In/out conditions: | |||||
| 123 | * "object" must be locked. | |||||
| 124 | */ | |||||
| 125 | void | |||||
| 126 | vm_fault_cleanup(object, top_page) | |||||
| 127 | vm_object_t object; | |||||
| 128 | vm_page_t top_page; | |||||
| 129 | { | |||||
| 130 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 130); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 131 | vm_object_unlock(object); | |||||
| 132 | ||||||
| 133 | if (top_page != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 134 | object = top_page->object; | |||||
| 135 | vm_object_lock(object); | |||||
| 136 | VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ; }); | |||||
| 137 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 137); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 138 | vm_object_unlock(object); | |||||
| 139 | } | |||||
| 140 | } | |||||
| 141 | ||||||
| 142 | ||||||
| 143 | #if MACH_PCSAMPLE1 | |||||
| 144 | /* | |||||
| 145 | * Do PC sampling on current thread, assuming | |||||
| 146 | * that it is the thread taking this page fault. | |||||
| 147 | * | |||||
| 148 | * Must check for THREAD_NULL, since faults | |||||
| 149 | * can occur before threads are running. | |||||
| 150 | */ | |||||
| 151 | ||||||
| 152 | #define vm_stat_sample(flavor)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((flavor))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((flavor))); task = (_thread_)-> task; if (task->pc_sample.sampletypes & ((flavor))) take_pc_sample ((_thread_), &task->pc_sample, ((flavor))); }); }) \ | |||||
| 153 | MACRO_BEGIN({ \ | |||||
| 154 | thread_t _thread_ = current_thread()(active_threads[(0)]); \ | |||||
| 155 | \ | |||||
| 156 | if (_thread_ != THREAD_NULL((thread_t) 0)) \ | |||||
| 157 | take_pc_sample_macro(_thread_, (flavor))({ task_t task; if ((_thread_)->pc_sample.sampletypes & ((flavor))) take_pc_sample((_thread_), &(_thread_)->pc_sample , ((flavor))); task = (_thread_)->task; if (task->pc_sample .sampletypes & ((flavor))) take_pc_sample((_thread_), & task->pc_sample, ((flavor))); }); \ | |||||
| 158 | MACRO_END}) | |||||
| 159 | ||||||
| 160 | #else | |||||
| 161 | #define vm_stat_sample(x)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((x))) take_pc_sample((_thread_), &(_thread_ )->pc_sample, ((x))); task = (_thread_)->task; if (task ->pc_sample.sampletypes & ((x))) take_pc_sample((_thread_ ), &task->pc_sample, ((x))); }); }) | |||||
| 162 | #endif /* MACH_PCSAMPLE */ | |||||
| 163 | ||||||
| 164 | ||||||
| 165 | ||||||
| 166 | /* | |||||
| 167 | * Routine: vm_fault_page | |||||
| 168 | * Purpose: | |||||
| 169 | * Find the resident page for the virtual memory | |||||
| 170 | * specified by the given virtual memory object | |||||
| 171 | * and offset. | |||||
| 172 | * Additional arguments: | |||||
| 173 | * The required permissions for the page is given | |||||
| 174 | * in "fault_type". Desired permissions are included | |||||
| 175 | * in "protection". | |||||
| 176 | * | |||||
| 177 | * If the desired page is known to be resident (for | |||||
| 178 | * example, because it was previously wired down), asserting | |||||
| 179 | * the "unwiring" parameter will speed the search. | |||||
| 180 | * | |||||
| 181 | * If the operation can be interrupted (by thread_abort | |||||
| 182 | * or thread_terminate), then the "interruptible" | |||||
| 183 | * parameter should be asserted. | |||||
| 184 | * | |||||
| 185 | * Results: | |||||
| 186 | * The page containing the proper data is returned | |||||
| 187 | * in "result_page". | |||||
| 188 | * | |||||
| 189 | * In/out conditions: | |||||
| 190 | * The source object must be locked and referenced, | |||||
| 191 | * and must donate one paging reference. The reference | |||||
| 192 | * is not affected. The paging reference and lock are | |||||
| 193 | * consumed. | |||||
| 194 | * | |||||
| 195 | * If the call succeeds, the object in which "result_page" | |||||
| 196 | * resides is left locked and holding a paging reference. | |||||
| 197 | * If this is not the original object, a busy page in the | |||||
| 198 | * original object is returned in "top_page", to prevent other | |||||
| 199 | * callers from pursuing this same data, along with a paging | |||||
| 200 | * reference for the original object. The "top_page" should | |||||
| 201 | * be destroyed when this guarantee is no longer required. | |||||
| 202 | * The "result_page" is also left busy. It is not removed | |||||
| 203 | * from the pageout queues. | |||||
| 204 | */ | |||||
| 205 | vm_fault_return_t vm_fault_page(first_object, first_offset, | |||||
| 206 | fault_type, must_be_resident, interruptible, | |||||
| 207 | protection, | |||||
| 208 | result_page, top_page, | |||||
| 209 | resume, continuation) | |||||
| 210 | /* Arguments: */ | |||||
| 211 | vm_object_t first_object; /* Object to begin search */ | |||||
| 212 | vm_offset_t first_offset; /* Offset into object */ | |||||
| 213 | vm_prot_t fault_type; /* What access is requested */ | |||||
| 214 | boolean_t must_be_resident;/* Must page be resident? */ | |||||
| 215 | boolean_t interruptible; /* May fault be interrupted? */ | |||||
| 216 | /* Modifies in place: */ | |||||
| 217 | vm_prot_t *protection; /* Protection for mapping */ | |||||
| 218 | /* Returns: */ | |||||
| 219 | vm_page_t *result_page; /* Page found, if successful */ | |||||
| 220 | vm_page_t *top_page; /* Page in top object, if | |||||
| 221 | * not result_page. | |||||
| 222 | */ | |||||
| 223 | /* More arguments: */ | |||||
| 224 | boolean_t resume; /* We are restarting. */ | |||||
| 225 | void (*continuation)(); /* Continuation for blocking. */ | |||||
| 226 | { | |||||
| 227 | vm_page_t m; | |||||
| 228 | vm_object_t object; | |||||
| 229 | vm_offset_t offset; | |||||
| 230 | vm_page_t first_m; | |||||
| 231 | vm_object_t next_object; | |||||
| 232 | vm_object_t copy_object; | |||||
| 233 | boolean_t look_for_page; | |||||
| 234 | vm_prot_t access_required; | |||||
| 235 | ||||||
| 236 | if (resume) { | |||||
| ||||||
| 237 | vm_fault_state_t *state = | |||||
| 238 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
| 239 | ||||||
| 240 | if (state->vmfp_backoff) | |||||
| 241 | goto after_block_and_backoff; | |||||
| 242 | ||||||
| 243 | object = state->vmfp_object; | |||||
| 244 | offset = state->vmfp_offset; | |||||
| 245 | first_m = state->vmfp_first_m; | |||||
| 246 | access_required = state->vmfp_access; | |||||
| 247 | goto after_thread_block; | |||||
| 248 | } | |||||
| 249 | ||||||
| 250 | vm_stat_sample(SAMPLED_PC_VM_FAULTS_ANY)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x100))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x100))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x100))) take_pc_sample ((_thread_), &task->pc_sample, ((0x100))); }); }); | |||||
| 251 | vm_stat.faults++; /* needs lock XXX */ | |||||
| 252 | current_task()((active_threads[(0)])->task)->faults++; | |||||
| ||||||
| 253 | ||||||
| 254 | /* | |||||
| 255 | * Recovery actions | |||||
| 256 | */ | |||||
| 257 | #define RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; } \ | |||||
| 258 | MACRO_BEGIN({ \ | |||||
| 259 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); \ | |||||
| 260 | vm_page_lock_queues(); \ | |||||
| 261 | if (!m->active && !m->inactive) \ | |||||
| 262 | vm_page_activate(m); \ | |||||
| 263 | vm_page_unlock_queues(); \ | |||||
| 264 | MACRO_END}) | |||||
| 265 | ||||||
| 266 | if (vm_fault_dirty_handling | |||||
| 267 | #if MACH_KDB0 | |||||
| 268 | /* | |||||
| 269 | * If there are watchpoints set, then | |||||
| 270 | * we don't want to give away write permission | |||||
| 271 | * on a read fault. Make the task write fault, | |||||
| 272 | * so that the watchpoint code notices the access. | |||||
| 273 | */ | |||||
| 274 | || db_watchpoint_list | |||||
| 275 | #endif /* MACH_KDB */ | |||||
| 276 | ) { | |||||
| 277 | /* | |||||
| 278 | * If we aren't asking for write permission, | |||||
| 279 | * then don't give it away. We're using write | |||||
| 280 | * faults to set the dirty bit. | |||||
| 281 | */ | |||||
| 282 | if (!(fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) | |||||
| 283 | *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
| 284 | } | |||||
| 285 | ||||||
| 286 | if (!vm_fault_interruptible) | |||||
| 287 | interruptible = FALSE((boolean_t) 0); | |||||
| 288 | ||||||
| 289 | /* | |||||
| 290 | * INVARIANTS (through entire routine): | |||||
| 291 | * | |||||
| 292 | * 1) At all times, we must either have the object | |||||
| 293 | * lock or a busy page in some object to prevent | |||||
| 294 | * some other thread from trying to bring in | |||||
| 295 | * the same page. | |||||
| 296 | * | |||||
| 297 | * Note that we cannot hold any locks during the | |||||
| 298 | * pager access or when waiting for memory, so | |||||
| 299 | * we use a busy page then. | |||||
| 300 | * | |||||
| 301 | * Note also that we aren't as concerned about more than | |||||
| 302 | * one thread attempting to memory_object_data_unlock | |||||
| 303 | * the same page at once, so we don't hold the page | |||||
| 304 | * as busy then, but do record the highest unlock | |||||
| 305 | * value so far. [Unlock requests may also be delivered | |||||
| 306 | * out of order.] | |||||
| 307 | * | |||||
| 308 | * 2) To prevent another thread from racing us down the | |||||
| 309 | * shadow chain and entering a new page in the top | |||||
| 310 | * object before we do, we must keep a busy page in | |||||
| 311 | * the top object while following the shadow chain. | |||||
| 312 | * | |||||
| 313 | * 3) We must increment paging_in_progress on any object | |||||
| 314 | * for which we have a busy page, to prevent | |||||
| 315 | * vm_object_collapse from removing the busy page | |||||
| 316 | * without our noticing. | |||||
| 317 | * | |||||
| 318 | * 4) We leave busy pages on the pageout queues. | |||||
| 319 | * If the pageout daemon comes across a busy page, | |||||
| 320 | * it will remove the page from the pageout queues. | |||||
| 321 | */ | |||||
| 322 | ||||||
| 323 | /* | |||||
| 324 | * Search for the page at object/offset. | |||||
| 325 | */ | |||||
| 326 | ||||||
| 327 | object = first_object; | |||||
| 328 | offset = first_offset; | |||||
| 329 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
| 330 | access_required = fault_type; | |||||
| 331 | ||||||
| 332 | /* | |||||
| 333 | * See whether this page is resident | |||||
| 334 | */ | |||||
| 335 | ||||||
| 336 | while (TRUE((boolean_t) 1)) { | |||||
| 337 | m = vm_page_lookup(object, offset); | |||||
| 338 | if (m != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 339 | /* | |||||
| 340 | * If the page is being brought in, | |||||
| 341 | * wait for it and then retry. | |||||
| 342 | * | |||||
| 343 | * A possible optimization: if the page | |||||
| 344 | * is known to be resident, we can ignore | |||||
| 345 | * pages that are absent (regardless of | |||||
| 346 | * whether they're busy). | |||||
| 347 | */ | |||||
| 348 | ||||||
| 349 | if (m->busy) { | |||||
| 350 | kern_return_t wait_result; | |||||
| 351 | ||||||
| 352 | PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m ), (interruptible)); }); | |||||
| 353 | vm_object_unlock(object); | |||||
| 354 | if (continuation != (void (*)()) 0) { | |||||
| 355 | vm_fault_state_t *state = | |||||
| 356 | (vm_fault_state_t *) current_thread()(active_threads[(0)])->ith_othersaved.other; | |||||
| 357 | ||||||
| 358 | /* | |||||
| 359 | * Save variables in case | |||||
| 360 | * thread_block discards | |||||
| 361 | * our kernel stack. | |||||
| 362 | */ | |||||
| 363 | ||||||
| 364 | state->vmfp_backoff = FALSE((boolean_t) 0); | |||||
| 365 | state->vmfp_object = object; | |||||
| 366 | state->vmfp_offset = offset; | |||||
| 367 | state->vmfp_first_m = first_m; | |||||
| 368 | state->vmfp_access = | |||||
| 369 | access_required; | |||||
| 370 | state->vmf_prot = *protection; | |||||
| 371 | ||||||
| 372 | counter(c_vm_fault_page_block_busy_user++); | |||||
| 373 | thread_block(continuation); | |||||
| 374 | } else | |||||
| 375 | { | |||||
| 376 | counter(c_vm_fault_page_block_busy_kernel++); | |||||
| 377 | thread_block((void (*)()) 0); | |||||
| 378 | } | |||||
| 379 | after_thread_block: | |||||
| 380 | wait_result = current_thread()(active_threads[(0)])->wait_result; | |||||
| 381 | vm_object_lock(object); | |||||
| 382 | if (wait_result != THREAD_AWAKENED0) { | |||||
| 383 | vm_fault_cleanup(object, first_m); | |||||
| 384 | if (wait_result == THREAD_RESTART3) | |||||
| 385 | return(VM_FAULT_RETRY1); | |||||
| 386 | else | |||||
| 387 | return(VM_FAULT_INTERRUPTED2); | |||||
| 388 | } | |||||
| 389 | continue; | |||||
| 390 | } | |||||
| 391 | ||||||
| 392 | /* | |||||
| 393 | * If the page is in error, give up now. | |||||
| 394 | */ | |||||
| 395 | ||||||
| 396 | if (m->error) { | |||||
| 397 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 398 | vm_fault_cleanup(object, first_m); | |||||
| 399 | return(VM_FAULT_MEMORY_ERROR5); | |||||
| 400 | } | |||||
| 401 | ||||||
| 402 | /* | |||||
| 403 | * If the page isn't busy, but is absent, | |||||
| 404 | * then it was deemed "unavailable". | |||||
| 405 | */ | |||||
| 406 | ||||||
| 407 | if (m->absent) { | |||||
| 408 | /* | |||||
| 409 | * Remove the non-existent page (unless it's | |||||
| 410 | * in the top object) and move on down to the | |||||
| 411 | * next object (if there is one). | |||||
| 412 | */ | |||||
| 413 | ||||||
| 414 | offset += object->shadow_offset; | |||||
| 415 | access_required = VM_PROT_READ((vm_prot_t) 0x01); | |||||
| 416 | next_object = object->shadow; | |||||
| 417 | if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
| 418 | vm_page_t real_m; | |||||
| 419 | ||||||
| 420 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 420); }); | |||||
| 421 | ||||||
| 422 | /* | |||||
| 423 | * Absent page at bottom of shadow | |||||
| 424 | * chain; zero fill the page we left | |||||
| 425 | * busy in the first object, and flush | |||||
| 426 | * the absent page. But first we | |||||
| 427 | * need to allocate a real page. | |||||
| 428 | */ | |||||
| 429 | ||||||
| 430 | real_m = vm_page_grab(!object->internal); | |||||
| 431 | if (real_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 432 | vm_fault_cleanup(object, first_m); | |||||
| 433 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
| 434 | } | |||||
| 435 | ||||||
| 436 | if (object != first_object) { | |||||
| 437 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 438 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 438); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 439 | vm_object_unlock(object); | |||||
| 440 | object = first_object; | |||||
| 441 | offset = first_offset; | |||||
| 442 | m = first_m; | |||||
| 443 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
| 444 | vm_object_lock(object); | |||||
| 445 | } | |||||
| 446 | ||||||
| 447 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 448 | assert(real_m->busy)({ if (!(real_m->busy)) Assert("real_m->busy", "../vm/vm_fault.c" , 448); }); | |||||
| 449 | vm_page_lock_queues(); | |||||
| 450 | vm_page_insert(real_m, object, offset); | |||||
| 451 | vm_page_unlock_queues(); | |||||
| 452 | m = real_m; | |||||
| 453 | ||||||
| 454 | /* | |||||
| 455 | * Drop the lock while zero filling | |||||
| 456 | * page. Then break because this | |||||
| 457 | * is the page we wanted. Checking | |||||
| 458 | * the page lock is a waste of time; | |||||
| 459 | * this page was either absent or | |||||
| 460 | * newly allocated -- in both cases | |||||
| 461 | * it can't be page locked by a pager. | |||||
| 462 | */ | |||||
| 463 | vm_object_unlock(object); | |||||
| 464 | ||||||
| 465 | vm_page_zero_fill(m); | |||||
| 466 | ||||||
| 467 | vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x10))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x10))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample ((_thread_), &task->pc_sample, ((0x10))); }); }); | |||||
| 468 | ||||||
| 469 | vm_stat.zero_fill_count++; | |||||
| 470 | current_task()((active_threads[(0)])->task)->zero_fills++; | |||||
| 471 | vm_object_lock(object); | |||||
| 472 | pmap_clear_modify(m->phys_addr); | |||||
| 473 | break; | |||||
| 474 | } else { | |||||
| 475 | if (must_be_resident) { | |||||
| 476 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 476); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 477 | } else if (object != first_object) { | |||||
| 478 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 478); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 479 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 480 | } else { | |||||
| 481 | first_m = m; | |||||
| 482 | m->absent = FALSE((boolean_t) 0); | |||||
| 483 | vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted & (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t ) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted &= ~(1 << (3)); }); }); | |||||
| 484 | m->busy = TRUE((boolean_t) 1); | |||||
| 485 | ||||||
| 486 | vm_page_lock_queues(); | |||||
| 487 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); | |||||
| 488 | vm_page_unlock_queues(); | |||||
| 489 | } | |||||
| 490 | vm_object_lock(next_object); | |||||
| 491 | vm_object_unlock(object); | |||||
| 492 | object = next_object; | |||||
| 493 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
| 494 | continue; | |||||
| 495 | } | |||||
| 496 | } | |||||
| 497 | ||||||
| 498 | /* | |||||
| 499 | * If the desired access to this page has | |||||
| 500 | * been locked out, request that it be unlocked. | |||||
| 501 | */ | |||||
| 502 | ||||||
| 503 | if (access_required & m->page_lock) { | |||||
| 504 | if ((access_required & m->unlock_request) != access_required) { | |||||
| 505 | vm_prot_t new_unlock_request; | |||||
| 506 | kern_return_t rc; | |||||
| 507 | ||||||
| 508 | if (!object->pager_ready) { | |||||
| 509 | vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
| 510 | VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
| 511 | interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }); | |||||
| 512 | goto block_and_backoff; | |||||
| 513 | } | |||||
| 514 | ||||||
| 515 | new_unlock_request = m->unlock_request = | |||||
| 516 | (access_required | m->unlock_request); | |||||
| 517 | vm_object_unlock(object); | |||||
| 518 | if ((rc = memory_object_data_unlock( | |||||
| 519 | object->pager, | |||||
| 520 | object->pager_request, | |||||
| 521 | offset + object->paging_offset, | |||||
| 522 | PAGE_SIZE(1 << 12), | |||||
| 523 | new_unlock_request)) | |||||
| 524 | != KERN_SUCCESS0) { | |||||
| 525 | printf("vm_fault: memory_object_data_unlock failed\n"); | |||||
| 526 | vm_object_lock(object); | |||||
| 527 | vm_fault_cleanup(object, first_m); | |||||
| 528 | return((rc == MACH_SEND_INTERRUPTED0x10000007) ? | |||||
| 529 | VM_FAULT_INTERRUPTED2 : | |||||
| 530 | VM_FAULT_MEMORY_ERROR5); | |||||
| 531 | } | |||||
| 532 | vm_object_lock(object); | |||||
| 533 | continue; | |||||
| 534 | } | |||||
| 535 | ||||||
| 536 | PAGE_ASSERT_WAIT(m, interruptible)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m ), (interruptible)); }); | |||||
| 537 | goto block_and_backoff; | |||||
| 538 | } | |||||
| 539 | ||||||
| 540 | /* | |||||
| 541 | * We mark the page busy and leave it on | |||||
| 542 | * the pageout queues. If the pageout | |||||
| 543 | * deamon comes across it, then it will | |||||
| 544 | * remove the page. | |||||
| 545 | */ | |||||
| 546 | ||||||
| 547 | if (!software_reference_bits) { | |||||
| 548 | vm_page_lock_queues(); | |||||
| 549 | if (m->inactive) { | |||||
| 550 | vm_stat_sample(SAMPLED_PC_VM_REACTIVATION_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x20))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x20))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x20))) take_pc_sample ((_thread_), &task->pc_sample, ((0x20))); }); }); | |||||
| 551 | vm_stat.reactivations++; | |||||
| 552 | current_task()((active_threads[(0)])->task)->reactivations++; | |||||
| 553 | } | |||||
| 554 | ||||||
| 555 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); | |||||
| 556 | vm_page_unlock_queues(); | |||||
| 557 | } | |||||
| 558 | ||||||
| 559 | assert(!m->busy)({ if (!(!m->busy)) Assert("!m->busy", "../vm/vm_fault.c" , 559); }); | |||||
| 560 | m->busy = TRUE((boolean_t) 1); | |||||
| 561 | assert(!m->absent)({ if (!(!m->absent)) Assert("!m->absent", "../vm/vm_fault.c" , 561); }); | |||||
| 562 | break; | |||||
| 563 | } | |||||
| 564 | ||||||
| 565 | look_for_page = | |||||
| 566 | (object->pager_created) | |||||
| 567 | #if MACH_PAGEMAP1 | |||||
| 568 | && (vm_external_state_get(object->existence_info, offset + object->paging_offset)(((object->existence_info) != ((vm_external_t) 0)) ? _vm_external_state_get (object->existence_info, offset + object->paging_offset ) : 2) != | |||||
| 569 | VM_EXTERNAL_STATE_ABSENT3) | |||||
| 570 | #endif /* MACH_PAGEMAP */ | |||||
| 571 | ; | |||||
| 572 | ||||||
| 573 | if ((look_for_page || (object == first_object)) | |||||
| 574 | && !must_be_resident) { | |||||
| 575 | /* | |||||
| 576 | * Allocate a new page for this object/offset | |||||
| 577 | * pair. | |||||
| 578 | */ | |||||
| 579 | ||||||
| 580 | m = vm_page_grab_fictitious(); | |||||
| 581 | if (m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 582 | vm_fault_cleanup(object, first_m); | |||||
| 583 | return(VM_FAULT_FICTITIOUS_SHORTAGE4); | |||||
| 584 | } | |||||
| 585 | ||||||
| 586 | vm_page_lock_queues(); | |||||
| 587 | vm_page_insert(m, object, offset); | |||||
| 588 | vm_page_unlock_queues(); | |||||
| 589 | } | |||||
| 590 | ||||||
| 591 | if (look_for_page && !must_be_resident) { | |||||
| 592 | kern_return_t rc; | |||||
| 593 | ||||||
| 594 | /* | |||||
| 595 | * If the memory manager is not ready, we | |||||
| 596 | * cannot make requests. | |||||
| 597 | */ | |||||
| 598 | if (!object->pager_ready) { | |||||
| 599 | vm_object_assert_wait(object,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
| 600 | VM_OBJECT_EVENT_PAGER_READY,({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }) | |||||
| 601 | interruptible)({ (object)->all_wanted |= 1 << (1); assert_wait((event_t )(((vm_offset_t) object) + (1)), (interruptible)); }); | |||||
| 602 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 603 | goto block_and_backoff; | |||||
| 604 | } | |||||
| 605 | ||||||
| 606 | if (object->internal) { | |||||
| 607 | /* | |||||
| 608 | * Requests to the default pager | |||||
| 609 | * must reserve a real page in advance, | |||||
| 610 | * because the pager's data-provided | |||||
| 611 | * won't block for pages. | |||||
| 612 | */ | |||||
| 613 | ||||||
| 614 | if (m->fictitious && !vm_page_convert(m, FALSE((boolean_t) 0))) { | |||||
| 615 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 616 | vm_fault_cleanup(object, first_m); | |||||
| 617 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
| 618 | } | |||||
| 619 | } else if (object->absent_count > | |||||
| 620 | vm_object_absent_max) { | |||||
| 621 | /* | |||||
| 622 | * If there are too many outstanding page | |||||
| 623 | * requests pending on this object, we | |||||
| 624 | * wait for them to be resolved now. | |||||
| 625 | */ | |||||
| 626 | ||||||
| 627 | vm_object_absent_assert_wait(object, interruptible)({ ({ ((object))->all_wanted |= 1 << (3); assert_wait ((event_t)(((vm_offset_t) (object)) + (3)), ((interruptible)) ); }); }); | |||||
| 628 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 629 | goto block_and_backoff; | |||||
| 630 | } | |||||
| 631 | ||||||
| 632 | /* | |||||
| 633 | * Indicate that the page is waiting for data | |||||
| 634 | * from the memory manager. | |||||
| 635 | */ | |||||
| 636 | ||||||
| 637 | m->absent = TRUE((boolean_t) 1); | |||||
| 638 | object->absent_count++; | |||||
| 639 | ||||||
| 640 | /* | |||||
| 641 | * We have a busy page, so we can | |||||
| 642 | * release the object lock. | |||||
| 643 | */ | |||||
| 644 | vm_object_unlock(object); | |||||
| 645 | ||||||
| 646 | /* | |||||
| 647 | * Call the memory manager to retrieve the data. | |||||
| 648 | */ | |||||
| 649 | ||||||
| 650 | vm_stat.pageins++; | |||||
| 651 | vm_stat_sample(SAMPLED_PC_VM_PAGEIN_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x40))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x40))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x40))) take_pc_sample ((_thread_), &task->pc_sample, ((0x40))); }); }); | |||||
| 652 | current_task()((active_threads[(0)])->task)->pageins++; | |||||
| 653 | ||||||
| 654 | if ((rc = memory_object_data_request(object->pager, | |||||
| 655 | object->pager_request, | |||||
| 656 | m->offset + object->paging_offset, | |||||
| 657 | PAGE_SIZE(1 << 12), access_required)) != KERN_SUCCESS0) { | |||||
| 658 | if (rc != MACH_SEND_INTERRUPTED0x10000007) | |||||
| 659 | printf("%s(0x%p, 0x%p, 0x%lx, 0x%x, 0x%x) failed, %x\n", | |||||
| 660 | "memory_object_data_request", | |||||
| 661 | object->pager, | |||||
| 662 | object->pager_request, | |||||
| 663 | m->offset + object->paging_offset, | |||||
| 664 | PAGE_SIZE(1 << 12), access_required, rc); | |||||
| 665 | /* | |||||
| 666 | * Don't want to leave a busy page around, | |||||
| 667 | * but the data request may have blocked, | |||||
| 668 | * so check if it's still there and busy. | |||||
| 669 | */ | |||||
| 670 | vm_object_lock(object); | |||||
| 671 | if (m == vm_page_lookup(object,offset) && | |||||
| 672 | m->absent && m->busy) | |||||
| 673 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 674 | vm_fault_cleanup(object, first_m); | |||||
| 675 | return((rc == MACH_SEND_INTERRUPTED0x10000007) ? | |||||
| 676 | VM_FAULT_INTERRUPTED2 : | |||||
| 677 | VM_FAULT_MEMORY_ERROR5); | |||||
| 678 | } | |||||
| 679 | ||||||
| 680 | /* | |||||
| 681 | * Retry with same object/offset, since new data may | |||||
| 682 | * be in a different page (i.e., m is meaningless at | |||||
| 683 | * this point). | |||||
| 684 | */ | |||||
| 685 | vm_object_lock(object); | |||||
| 686 | continue; | |||||
| 687 | } | |||||
| 688 | ||||||
| 689 | /* | |||||
| 690 | * For the XP system, the only case in which we get here is if | |||||
| 691 | * object has no pager (or unwiring). If the pager doesn't | |||||
| 692 | * have the page this is handled in the m->absent case above | |||||
| 693 | * (and if you change things here you should look above). | |||||
| 694 | */ | |||||
| 695 | if (object == first_object) | |||||
| 696 | first_m = m; | |||||
| 697 | else | |||||
| 698 | { | |||||
| 699 | assert(m == VM_PAGE_NULL)({ if (!(m == ((vm_page_t) 0))) Assert("m == VM_PAGE_NULL", "../vm/vm_fault.c" , 699); }); | |||||
| 700 | } | |||||
| 701 | ||||||
| 702 | /* | |||||
| 703 | * Move on to the next object. Lock the next | |||||
| 704 | * object before unlocking the current one. | |||||
| 705 | */ | |||||
| 706 | access_required = VM_PROT_READ((vm_prot_t) 0x01); | |||||
| 707 | ||||||
| 708 | offset += object->shadow_offset; | |||||
| 709 | next_object = object->shadow; | |||||
| 710 | if (next_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
| 711 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 711); }); | |||||
| 712 | ||||||
| 713 | /* | |||||
| 714 | * If there's no object left, fill the page | |||||
| 715 | * in the top object with zeros. But first we | |||||
| 716 | * need to allocate a real page. | |||||
| 717 | */ | |||||
| 718 | ||||||
| 719 | if (object != first_object) { | |||||
| 720 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 720); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 721 | vm_object_unlock(object); | |||||
| 722 | ||||||
| 723 | object = first_object; | |||||
| 724 | offset = first_offset; | |||||
| 725 | vm_object_lock(object); | |||||
| 726 | } | |||||
| 727 | ||||||
| 728 | m = first_m; | |||||
| 729 | assert(m->object == object)({ if (!(m->object == object)) Assert("m->object == object" , "../vm/vm_fault.c", 729); }); | |||||
| 730 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
| 731 | ||||||
| 732 | if (m->fictitious && !vm_page_convert(m, !object->internal)) { | |||||
| 733 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||||
| 734 | vm_fault_cleanup(object, VM_PAGE_NULL((vm_page_t) 0)); | |||||
| 735 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
| 736 | } | |||||
| 737 | ||||||
| 738 | vm_object_unlock(object); | |||||
| 739 | vm_page_zero_fill(m); | |||||
| 740 | vm_stat_sample(SAMPLED_PC_VM_ZFILL_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x10))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x10))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x10))) take_pc_sample ((_thread_), &task->pc_sample, ((0x10))); }); }); | |||||
| 741 | vm_stat.zero_fill_count++; | |||||
| 742 | current_task()((active_threads[(0)])->task)->zero_fills++; | |||||
| 743 | vm_object_lock(object); | |||||
| 744 | pmap_clear_modify(m->phys_addr); | |||||
| 745 | break; | |||||
| 746 | } | |||||
| 747 | else { | |||||
| 748 | vm_object_lock(next_object); | |||||
| 749 | if ((object != first_object) || must_be_resident) | |||||
| 750 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 750); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 751 | vm_object_unlock(object); | |||||
| 752 | object = next_object; | |||||
| 753 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
| 754 | } | |||||
| 755 | } | |||||
| 756 | ||||||
| 757 | /* | |||||
| 758 | * PAGE HAS BEEN FOUND. | |||||
| 759 | * | |||||
| 760 | * This page (m) is: | |||||
| 761 | * busy, so that we can play with it; | |||||
| 762 | * not absent, so that nobody else will fill it; | |||||
| 763 | * possibly eligible for pageout; | |||||
| 764 | * | |||||
| 765 | * The top-level page (first_m) is: | |||||
| 766 | * VM_PAGE_NULL if the page was found in the | |||||
| 767 | * top-level object; | |||||
| 768 | * busy, not absent, and ineligible for pageout. | |||||
| 769 | * | |||||
| 770 | * The current object (object) is locked. A paging | |||||
| 771 | * reference is held for the current and top-level | |||||
| 772 | * objects. | |||||
| 773 | */ | |||||
| 774 | ||||||
| 775 | #if EXTRA_ASSERTIONS | |||||
| 776 | assert(m->busy && !m->absent)({ if (!(m->busy && !m->absent)) Assert("m->busy && !m->absent" , "../vm/vm_fault.c", 776); }); | |||||
| 777 | assert((first_m == VM_PAGE_NULL) ||({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }) | |||||
| 778 | (first_m->busy && !first_m->absent &&({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }) | |||||
| 779 | !first_m->active && !first_m->inactive))({ if (!((first_m == ((vm_page_t) 0)) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive))) Assert("(first_m == VM_PAGE_NULL) || (first_m->busy && !first_m->absent && !first_m->active && !first_m->inactive)" , "../vm/vm_fault.c", 779); }); | |||||
| 780 | #endif /* EXTRA_ASSERTIONS */ | |||||
| 781 | ||||||
| 782 | /* | |||||
| 783 | * If the page is being written, but isn't | |||||
| 784 | * already owned by the top-level object, | |||||
| 785 | * we have to copy it into a new page owned | |||||
| 786 | * by the top-level object. | |||||
| 787 | */ | |||||
| 788 | ||||||
| 789 | if (object != first_object) { | |||||
| 790 | /* | |||||
| 791 | * We only really need to copy if we | |||||
| 792 | * want to write it. | |||||
| 793 | */ | |||||
| 794 | ||||||
| 795 | if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) { | |||||
| 796 | vm_page_t copy_m; | |||||
| 797 | ||||||
| 798 | assert(!must_be_resident)({ if (!(!must_be_resident)) Assert("!must_be_resident", "../vm/vm_fault.c" , 798); }); | |||||
| 799 | ||||||
| 800 | /* | |||||
| 801 | * If we try to collapse first_object at this | |||||
| 802 | * point, we may deadlock when we try to get | |||||
| 803 | * the lock on an intermediate object (since we | |||||
| 804 | * have the bottom object locked). We can't | |||||
| 805 | * unlock the bottom object, because the page | |||||
| 806 | * we found may move (by collapse) if we do. | |||||
| 807 | * | |||||
| 808 | * Instead, we first copy the page. Then, when | |||||
| 809 | * we have no more use for the bottom object, | |||||
| 810 | * we unlock it and try to collapse. | |||||
| 811 | * | |||||
| 812 | * Note that we copy the page even if we didn't | |||||
| 813 | * need to... that's the breaks. | |||||
| 814 | */ | |||||
| 815 | ||||||
| 816 | /* | |||||
| 817 | * Allocate a page for the copy | |||||
| 818 | */ | |||||
| 819 | copy_m = vm_page_grab(!first_object->internal); | |||||
| 820 | if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 821 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }; | |||||
| 822 | vm_fault_cleanup(object, first_m); | |||||
| 823 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
| 824 | } | |||||
| 825 | ||||||
| 826 | vm_object_unlock(object); | |||||
| 827 | vm_page_copy(m, copy_m); | |||||
| 828 | vm_object_lock(object); | |||||
| 829 | ||||||
| 830 | /* | |||||
| 831 | * If another map is truly sharing this | |||||
| 832 | * page with us, we have to flush all | |||||
| 833 | * uses of the original page, since we | |||||
| 834 | * can't distinguish those which want the | |||||
| 835 | * original from those which need the | |||||
| 836 | * new copy. | |||||
| 837 | * | |||||
| 838 | * XXXO If we know that only one map has | |||||
| 839 | * access to this page, then we could | |||||
| 840 | * avoid the pmap_page_protect() call. | |||||
| 841 | */ | |||||
| 842 | ||||||
| 843 | vm_page_lock_queues(); | |||||
| 844 | vm_page_deactivate(m); | |||||
| 845 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); | |||||
| 846 | vm_page_unlock_queues(); | |||||
| 847 | ||||||
| 848 | /* | |||||
| 849 | * We no longer need the old page or object. | |||||
| 850 | */ | |||||
| 851 | ||||||
| 852 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||||
| 853 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 853); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 854 | vm_object_unlock(object); | |||||
| 855 | ||||||
| 856 | vm_stat.cow_faults++; | |||||
| 857 | vm_stat_sample(SAMPLED_PC_VM_COW_FAULTS)({ thread_t _thread_ = (active_threads[(0)]); if (_thread_ != ((thread_t) 0)) ({ task_t task; if ((_thread_)->pc_sample .sampletypes & ((0x80))) take_pc_sample((_thread_), & (_thread_)->pc_sample, ((0x80))); task = (_thread_)->task ; if (task->pc_sample.sampletypes & ((0x80))) take_pc_sample ((_thread_), &task->pc_sample, ((0x80))); }); }); | |||||
| 858 | current_task()((active_threads[(0)])->task)->cow_faults++; | |||||
| 859 | object = first_object; | |||||
| 860 | offset = first_offset; | |||||
| 861 | ||||||
| 862 | vm_object_lock(object); | |||||
| 863 | VM_PAGE_FREE(first_m)({ ; vm_page_free(first_m); ; }); | |||||
| 864 | first_m = VM_PAGE_NULL((vm_page_t) 0); | |||||
| 865 | assert(copy_m->busy)({ if (!(copy_m->busy)) Assert("copy_m->busy", "../vm/vm_fault.c" , 865); }); | |||||
| 866 | vm_page_lock_queues(); | |||||
| 867 | vm_page_insert(copy_m, object, offset); | |||||
| 868 | vm_page_unlock_queues(); | |||||
| 869 | m = copy_m; | |||||
| 870 | ||||||
| 871 | /* | |||||
| 872 | * Now that we've gotten the copy out of the | |||||
| 873 | * way, let's try to collapse the top object. | |||||
| 874 | * But we have to play ugly games with | |||||
| 875 | * paging_in_progress to do that... | |||||
| 876 | */ | |||||
| 877 | ||||||
| 878 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_fault.c", 878); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||||
| 879 | vm_object_collapse(object); | |||||
| 880 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||||
| 881 | } | |||||
| 882 | else { | |||||
| 883 | *protection &= (~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||||
| 884 | } | |||||
| 885 | } | |||||
| 886 | ||||||
| 887 | /* | |||||
| 888 | * Now check whether the page needs to be pushed into the | |||||
| 889 | * copy object. The use of asymmetric copy on write for | |||||
| 890 | * shared temporary objects means that we may do two copies to | |||||
| 891 | * satisfy the fault; one above to get the page from a | |||||
| 892 | * shadowed object, and one here to push it into the copy. | |||||
| 893 | */ | |||||
| 894 | ||||||
| 895 | while ((copy_object = first_object->copy) != VM_OBJECT_NULL((vm_object_t) 0)) { | |||||
| 896 | vm_offset_t copy_offset; | |||||
| 897 | vm_page_t copy_m; | |||||
| 898 | ||||||
| 899 | /* | |||||
| 900 | * If the page is being written, but hasn't been | |||||
| 901 | * copied to the copy-object, we have to copy it there. | |||||
| 902 | */ | |||||
| 903 | ||||||
| 904 | if ((fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) == 0) { | |||||
| 905 | *protection &= ~VM_PROT_WRITE((vm_prot_t) 0x02); | |||||
| 906 | break; | |||||
| 907 | } | |||||
| 908 | ||||||
| 909 | /* | |||||
| 910 | * If the page was guaranteed to be resident, | |||||
| 911 | * we must have already performed the copy. | |||||
| 912 | */ | |||||
| 913 | ||||||
| 914 | if (must_be_resident) | |||||
| 915 | break; | |||||
| 916 | ||||||
| 917 | /* | |||||
| 918 | * Try to get the lock on the copy_object. | |||||
| 919 | */ | |||||
| 920 | if (!vm_object_lock_try(copy_object)(((boolean_t) 1))) { | |||||
| 921 | vm_object_unlock(object); | |||||
| 922 | ||||||
| 923 | simple_lock_pause(); /* wait a bit */ | |||||
| 924 | ||||||
| 925 | vm_object_lock(object); | |||||
| 926 | continue; | |||||
| 927 | } | |||||
| 928 | ||||||
| 929 | /* | |||||
| 930 | * Make another reference to the copy-object, | |||||
| 931 | * to keep it from disappearing during the | |||||
| 932 | * copy. | |||||
| 933 | */ | |||||
| 934 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 934); }); | |||||
| 935 | copy_object->ref_count++; | |||||
| 936 | ||||||
| 937 | /* | |||||
| 938 | * Does the page exist in the copy? | |||||
| 939 | */ | |||||
| 940 | copy_offset = first_offset - copy_object->shadow_offset; | |||||
| 941 | copy_m = vm_page_lookup(copy_object, copy_offset); | |||||
| 942 | if (copy_m != VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 943 | if (copy_m->busy) { | |||||
| 944 | /* | |||||
| 945 | * If the page is being brought | |||||
| 946 | * in, wait for it and then retry. | |||||
| 947 | */ | |||||
| 948 | PAGE_ASSERT_WAIT(copy_m, interruptible)({ (copy_m)->wanted = ((boolean_t) 1); assert_wait((event_t ) (copy_m), (interruptible)); }); | |||||
| 949 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }; | |||||
| 950 | copy_object->ref_count--; | |||||
| 951 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 951); }); | |||||
| 952 | vm_object_unlock(copy_object); | |||||
| 953 | goto block_and_backoff; | |||||
| 954 | } | |||||
| 955 | } | |||||
| 956 | else { | |||||
| 957 | /* | |||||
| 958 | * Allocate a page for the copy | |||||
| 959 | */ | |||||
| 960 | copy_m = vm_page_alloc(copy_object, copy_offset); | |||||
| 961 | if (copy_m == VM_PAGE_NULL((vm_page_t) 0)) { | |||||
| 962 | RELEASE_PAGE(m){ ({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m )->wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t ) m)), ((boolean_t) 0), 0); } }); ; vm_page_unwire(m); ; }; | |||||
| 963 | copy_object->ref_count--; | |||||
| 964 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 964); }); | |||||
| 965 | vm_object_unlock(copy_object); | |||||
| 966 | vm_fault_cleanup(object, first_m); | |||||
| 967 | return(VM_FAULT_MEMORY_SHORTAGE3); | |||||
| 968 | } | |||||
| 969 | ||||||
| 970 | /* | |||||
| 971 | * Must copy page into copy-object. | |||||
| 972 | */ | |||||
| 973 | ||||||
| 974 | vm_page_copy(m, copy_m); | |||||
| 975 | ||||||
| 976 | /* | |||||
| 977 | * If the old page was in use by any users | |||||
| 978 | * of the copy-object, it must be removed | |||||
| 979 | * from all pmaps. (We can't know which | |||||
| 980 | * pmaps use it.) | |||||
| 981 | */ | |||||
| 982 | ||||||
| 983 | vm_page_lock_queues(); | |||||
| 984 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); | |||||
| 985 | copy_m->dirty = TRUE((boolean_t) 1); | |||||
| 986 | vm_page_unlock_queues(); | |||||
| 987 | ||||||
| 988 | /* | |||||
| 989 | * If there's a pager, then immediately | |||||
| 990 | * page out this page, using the "initialize" | |||||
| 991 | * option. Else, we use the copy. | |||||
| 992 | */ | |||||
| 993 | ||||||
| 994 | if (!copy_object->pager_created) { | |||||
| 995 | vm_page_lock_queues(); | |||||
| 996 | vm_page_activate(copy_m); | |||||
| 997 | vm_page_unlock_queues(); | |||||
| 998 | PAGE_WAKEUP_DONE(copy_m)({ (copy_m)->busy = ((boolean_t) 0); if ((copy_m)->wanted ) { (copy_m)->wanted = ((boolean_t) 0); thread_wakeup_prim ((((event_t) copy_m)), ((boolean_t) 0), 0); } }); | |||||
| 999 | } else { | |||||
| 1000 | /* | |||||
| 1001 | * The page is already ready for pageout: | |||||
| 1002 | * not on pageout queues and busy. | |||||
| 1003 | * Unlock everything except the | |||||
| 1004 | * copy_object itself. | |||||
| 1005 | */ | |||||
| 1006 | ||||||
| 1007 | vm_object_unlock(object); | |||||
| 1008 | ||||||
| 1009 | /* | |||||
| 1010 | * Write the page to the copy-object, | |||||
| 1011 | * flushing it from the kernel. | |||||
| 1012 | */ | |||||
| 1013 | ||||||
| 1014 | vm_pageout_page(copy_m, TRUE((boolean_t) 1), TRUE((boolean_t) 1)); | |||||
| 1015 | ||||||
| 1016 | /* | |||||
| 1017 | * Since the pageout may have | |||||
| 1018 | * temporarily dropped the | |||||
| 1019 | * copy_object's lock, we | |||||
| 1020 | * check whether we'll have | |||||
| 1021 | * to deallocate the hard way. | |||||
| 1022 | */ | |||||
| 1023 | ||||||
| 1024 | if ((copy_object->shadow != object) || | |||||
| 1025 | (copy_object->ref_count == 1)) { | |||||
| 1026 | vm_object_unlock(copy_object); | |||||
| 1027 | vm_object_deallocate(copy_object); | |||||
| 1028 | vm_object_lock(object); | |||||
| 1029 | continue; | |||||
| 1030 | } | |||||
| 1031 | ||||||
| 1032 | /* | |||||
| 1033 | * Pick back up the old object's | |||||
| 1034 | * lock. [It is safe to do so, | |||||
| 1035 | * since it must be deeper in the | |||||
| 1036 | * object tree.] | |||||
| 1037 | */ | |||||
| 1038 | ||||||
| 1039 | vm_object_lock(object); | |||||
| 1040 | } | |||||
| 1041 | ||||||
| 1042 | /* | |||||
| 1043 | * Because we're pushing a page upward | |||||
| 1044 | * in the object tree, we must restart | |||||
| 1045 | * any faults that are waiting here. | |||||
| 1046 | * [Note that this is an expansion of | |||||
| 1047 | * PAGE_WAKEUP that uses the THREAD_RESTART | |||||
| 1048 | * wait result]. Can't turn off the page's | |||||
| 1049 | * busy bit because we're not done with it. | |||||
| 1050 | */ | |||||
| 1051 | ||||||
| 1052 | if (m->wanted) { | |||||
| 1053 | m->wanted = FALSE((boolean_t) 0); | |||||
| 1054 | thread_wakeup_with_result((event_t) m,thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3)) | |||||
| 1055 | THREAD_RESTART)thread_wakeup_prim(((event_t) m), ((boolean_t) 0), (3)); | |||||
| 1056 | } | |||||
| 1057 | } | |||||
| 1058 | ||||||
| 1059 | /* | |||||
| 1060 | * The reference count on copy_object must be | |||||
| 1061 | * at least 2: one for our extra reference, | |||||
| 1062 | * and at least one from the outside world | |||||
| 1063 | * (we checked that when we last locked | |||||
| 1064 | * copy_object). | |||||
| 1065 | */ | |||||
| 1066 | copy_object->ref_count--; | |||||
| 1067 | assert(copy_object->ref_count > 0)({ if (!(copy_object->ref_count > 0)) Assert("copy_object->ref_count > 0" , "../vm/vm_fault.c", 1067); }); | |||||
| 1068 | vm_object_unlock(copy_object); | |||||
| 1069 | ||||||
| 1070 | break; | |||||
| 1071 | } | |||||
| 1072 | ||||||
| 1073 | *result_page = m; | |||||
| 1074 | *top_page = first_m; | |||||
| 1075 | ||||||
| 1076 | /* | |||||
| 1077 | * If the page can be written, assume that it will be. | |||||
| 1078 | * [Earlier, we restrict the permission to allow write | |||||
| 1079 |