File: | obj-scan-build/../vm/vm_pageout.c |
Location: | line 587, column 4 |
Description: | Value stored to 'obj' is never read |
1 | /* |
2 | * Mach Operating System |
3 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. |
4 | * Copyright (c) 1993,1994 The University of Utah and |
5 | * the Computer Systems Laboratory (CSL). |
6 | * All rights reserved. |
7 | * |
8 | * Permission to use, copy, modify and distribute this software and its |
9 | * documentation is hereby granted, provided that both the copyright |
10 | * notice and this permission notice appear in all copies of the |
11 | * software, derivative works or modified versions, and any portions |
12 | * thereof, and that both notices appear in supporting documentation. |
13 | * |
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF |
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY |
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF |
17 | * THIS SOFTWARE. |
18 | * |
19 | * Carnegie Mellon requests users of this software to return to |
20 | * |
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
22 | * School of Computer Science |
23 | * Carnegie Mellon University |
24 | * Pittsburgh PA 15213-3890 |
25 | * |
26 | * any improvements or extensions that they make and grant Carnegie Mellon |
27 | * the rights to redistribute these changes. |
28 | */ |
29 | /* |
30 | * File: vm/vm_pageout.c |
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
32 | * Date: 1985 |
33 | * |
34 | * The proverbial page-out daemon. |
35 | */ |
36 | |
37 | #include <device/net_io.h> |
38 | #include <mach/mach_types.h> |
39 | #include <mach/memory_object.h> |
40 | #include <vm/memory_object_default.user.h> |
41 | #include <vm/memory_object_user.user.h> |
42 | #include <mach/vm_param.h> |
43 | #include <mach/vm_statistics.h> |
44 | #include <kern/counters.h> |
45 | #include <kern/debug.h> |
46 | #include <kern/slab.h> |
47 | #include <kern/task.h> |
48 | #include <kern/thread.h> |
49 | #include <vm/pmap.h> |
50 | #include <vm/vm_map.h> |
51 | #include <vm/vm_object.h> |
52 | #include <vm/vm_page.h> |
53 | #include <vm/vm_pageout.h> |
54 | #include <machine/locore.h> |
55 | #include <machine/vm_tuning.h> |
56 | |
57 | |
58 | |
59 | #ifndef VM_PAGEOUT_BURST_MAX10 |
60 | #define VM_PAGEOUT_BURST_MAX10 10 /* number of pages */ |
61 | #endif /* VM_PAGEOUT_BURST_MAX */ |
62 | |
63 | #ifndef VM_PAGEOUT_BURST_MIN5 |
64 | #define VM_PAGEOUT_BURST_MIN5 5 /* number of pages */ |
65 | #endif /* VM_PAGEOUT_BURST_MIN */ |
66 | |
67 | #ifndef VM_PAGEOUT_BURST_WAIT10 |
68 | #define VM_PAGEOUT_BURST_WAIT10 10 /* milliseconds per page */ |
69 | #endif /* VM_PAGEOUT_BURST_WAIT */ |
70 | |
71 | #ifndef VM_PAGEOUT_EMPTY_WAIT75 |
72 | #define VM_PAGEOUT_EMPTY_WAIT75 75 /* milliseconds */ |
73 | #endif /* VM_PAGEOUT_EMPTY_WAIT */ |
74 | |
75 | #ifndef VM_PAGEOUT_PAUSE_MAX10 |
76 | #define VM_PAGEOUT_PAUSE_MAX10 10 /* number of pauses */ |
77 | #endif /* VM_PAGEOUT_PAUSE_MAX */ |
78 | |
79 | /* |
80 | * To obtain a reasonable LRU approximation, the inactive queue |
81 | * needs to be large enough to give pages on it a chance to be |
82 | * referenced a second time. This macro defines the fraction |
83 | * of active+inactive pages that should be inactive. |
84 | * The pageout daemon uses it to update vm_page_inactive_target. |
85 | * |
86 | * If vm_page_free_count falls below vm_page_free_target and |
87 | * vm_page_inactive_count is below vm_page_inactive_target, |
88 | * then the pageout daemon starts running. |
89 | */ |
90 | |
91 | #ifndef VM_PAGE_INACTIVE_TARGET |
92 | #define VM_PAGE_INACTIVE_TARGET(avail)((avail) * 2 / 3) ((avail) * 2 / 3) |
93 | #endif /* VM_PAGE_INACTIVE_TARGET */ |
94 | |
95 | /* |
96 | * Once the pageout daemon starts running, it keeps going |
97 | * until vm_page_free_count meets or exceeds vm_page_free_target. |
98 | */ |
99 | |
100 | #ifndef VM_PAGE_FREE_TARGET |
101 | #define VM_PAGE_FREE_TARGET(free)(15 + (free) / 80) (15 + (free) / 80) |
102 | #endif /* VM_PAGE_FREE_TARGET */ |
103 | |
104 | /* |
105 | * The pageout daemon always starts running once vm_page_free_count |
106 | * falls below vm_page_free_min. |
107 | */ |
108 | |
109 | #ifndef VM_PAGE_FREE_MIN |
110 | #define VM_PAGE_FREE_MIN(free)(10 + (free) / 100) (10 + (free) / 100) |
111 | #endif /* VM_PAGE_FREE_MIN */ |
112 | |
113 | /* When vm_page_external_count exceeds vm_page_external_limit, |
114 | * allocations of externally paged pages stops. |
115 | */ |
116 | |
117 | #ifndef VM_PAGE_EXTERNAL_LIMIT |
118 | #define VM_PAGE_EXTERNAL_LIMIT(free)((free) / 2) ((free) / 2) |
119 | #endif /* VM_PAGE_EXTERNAL_LIMIT */ |
120 | |
121 | /* Attempt to keep the number of externally paged pages less |
122 | * than vm_pages_external_target. |
123 | */ |
124 | #ifndef VM_PAGE_EXTERNAL_TARGET |
125 | #define VM_PAGE_EXTERNAL_TARGET(free)((free) / 4) ((free) / 4) |
126 | #endif /* VM_PAGE_EXTERNAL_TARGET */ |
127 | |
128 | /* |
129 | * When vm_page_free_count falls below vm_page_free_reserved, |
130 | * only vm-privileged threads can allocate pages. vm-privilege |
131 | * allows the pageout daemon and default pager (and any other |
132 | * associated threads needed for default pageout) to continue |
133 | * operation by dipping into the reserved pool of pages. */ |
134 | |
135 | #ifndef VM_PAGE_FREE_RESERVED50 |
136 | #define VM_PAGE_FREE_RESERVED50 50 |
137 | #endif /* VM_PAGE_FREE_RESERVED */ |
138 | |
139 | /* |
140 | * When vm_page_free_count falls below vm_pageout_reserved_internal, |
141 | * the pageout daemon no longer trusts external pagers to clean pages. |
142 | * External pagers are probably all wedged waiting for a free page. |
143 | * It forcibly double-pages dirty pages belonging to external objects, |
144 | * getting the pages to the default pager to clean. |
145 | */ |
146 | |
147 | #ifndef VM_PAGEOUT_RESERVED_INTERNAL |
148 | #define VM_PAGEOUT_RESERVED_INTERNAL(reserve)((reserve) - 25) ((reserve) - 25) |
149 | #endif /* VM_PAGEOUT_RESERVED_INTERNAL */ |
150 | |
151 | /* |
152 | * When vm_page_free_count falls below vm_pageout_reserved_really, |
153 | * the pageout daemon stops work entirely to let the default pager |
154 | * catch up (assuming the default pager has pages to clean). |
155 | * Beyond this point, it is too dangerous to consume memory |
156 | * even for memory_object_data_write messages to the default pager. |
157 | */ |
158 | |
159 | #ifndef VM_PAGEOUT_RESERVED_REALLY |
160 | #define VM_PAGEOUT_RESERVED_REALLY(reserve)((reserve) - 40) ((reserve) - 40) |
161 | #endif /* VM_PAGEOUT_RESERVED_REALLY */ |
162 | |
163 | extern void vm_pageout_continue(); |
164 | extern void vm_pageout_scan_continue(); |
165 | |
166 | unsigned int vm_pageout_reserved_internal = 0; |
167 | unsigned int vm_pageout_reserved_really = 0; |
168 | |
169 | unsigned int vm_page_external_target = 0; |
170 | |
171 | unsigned int vm_pageout_burst_max = 0; |
172 | unsigned int vm_pageout_burst_min = 0; |
173 | unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */ |
174 | unsigned int vm_pageout_empty_wait = 0; /* milliseconds */ |
175 | unsigned int vm_pageout_pause_count = 0; |
176 | unsigned int vm_pageout_pause_max = 0; |
177 | |
178 | /* |
179 | * These variables record the pageout daemon's actions: |
180 | * how many pages it looks at and what happens to those pages. |
181 | * No locking needed because only one thread modifies the variables. |
182 | */ |
183 | |
184 | unsigned int vm_pageout_active = 0; /* debugging */ |
185 | unsigned int vm_pageout_inactive = 0; /* debugging */ |
186 | unsigned int vm_pageout_inactive_nolock = 0; /* debugging */ |
187 | unsigned int vm_pageout_inactive_busy = 0; /* debugging */ |
188 | unsigned int vm_pageout_inactive_absent = 0; /* debugging */ |
189 | unsigned int vm_pageout_inactive_used = 0; /* debugging */ |
190 | unsigned int vm_pageout_inactive_clean = 0; /* debugging */ |
191 | unsigned int vm_pageout_inactive_dirty = 0; /* debugging */ |
192 | unsigned int vm_pageout_inactive_double = 0; /* debugging */ |
193 | unsigned int vm_pageout_inactive_cleaned_external = 0; |
194 | |
195 | /* |
196 | * Routine: vm_pageout_setup |
197 | * Purpose: |
198 | * Set up a page for pageout. |
199 | * |
200 | * Move or copy the page to a new object, as part |
201 | * of which it will be sent to its memory manager |
202 | * in a memory_object_data_write or memory_object_initialize |
203 | * message. |
204 | * |
205 | * The "paging_offset" argument specifies the offset |
206 | * of the page within its external memory object. |
207 | * |
208 | * The "new_object" and "new_offset" arguments |
209 | * indicate where the page should be moved. |
210 | * |
211 | * The "flush" argument specifies whether the page |
212 | * should be flushed from its object. If not, a |
213 | * copy of the page is moved to the new object. |
214 | * |
215 | * In/Out conditions: |
216 | * The page in question must not be on any pageout queues, |
217 | * and must be busy. The object to which it belongs |
218 | * must be unlocked, and the caller must hold a paging |
219 | * reference to it. The new_object must not be locked. |
220 | * |
221 | * If the page is flushed from its original object, |
222 | * this routine returns a pointer to a place-holder page, |
223 | * inserted at the same offset, to block out-of-order |
224 | * requests for the page. The place-holder page must |
225 | * be freed after the data_write or initialize message |
226 | * has been sent. If the page is copied, |
227 | * the holding page is VM_PAGE_NULL. |
228 | * |
229 | * The original page is put on a paging queue and marked |
230 | * not busy on exit. |
231 | */ |
232 | vm_page_t |
233 | vm_pageout_setup(m, paging_offset, new_object, new_offset, flush) |
234 | register vm_page_t m; |
235 | vm_offset_t paging_offset; |
236 | register vm_object_t new_object; |
237 | vm_offset_t new_offset; |
238 | boolean_t flush; |
239 | { |
240 | register vm_object_t old_object = m->object; |
241 | register vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/ |
242 | register vm_page_t new_m; |
243 | |
244 | assert(m->busy && !m->absent && !m->fictitious)({ if (!(m->busy && !m->absent && !m-> fictitious)) Assert("m->busy && !m->absent && !m->fictitious" , "../vm/vm_pageout.c", 244); }); |
245 | |
246 | /* |
247 | * If we are not flushing the page, allocate a |
248 | * page in the object. If we cannot get the |
249 | * page, flush instead. |
250 | */ |
251 | if (!flush) { |
252 | vm_object_lock(new_object); |
253 | new_m = vm_page_alloc(new_object, new_offset); |
254 | if (new_m == VM_PAGE_NULL((vm_page_t) 0)) |
255 | flush = TRUE((boolean_t) 1); |
256 | vm_object_unlock(new_object); |
257 | } |
258 | |
259 | if (flush) { |
260 | /* |
261 | * Create a place-holder page where the old one was, |
262 | * to prevent anyone from attempting to page in this |
263 | * page while we`re unlocked. |
264 | */ |
265 | while ((holding_page = vm_page_grab_fictitious()) |
266 | == VM_PAGE_NULL((vm_page_t) 0)) |
267 | vm_page_more_fictitious(); |
268 | |
269 | vm_object_lock(old_object); |
270 | vm_page_lock_queues(); |
271 | vm_page_remove(m); |
272 | vm_page_unlock_queues(); |
273 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
274 | |
275 | vm_page_lock_queues(); |
276 | vm_page_insert(holding_page, old_object, m->offset); |
277 | vm_page_unlock_queues(); |
278 | |
279 | /* |
280 | * Record that this page has been written out |
281 | */ |
282 | #if MACH_PAGEMAP1 |
283 | vm_external_state_set(old_object->existence_info, |
284 | paging_offset, |
285 | VM_EXTERNAL_STATE_EXISTS1); |
286 | #endif /* MACH_PAGEMAP */ |
287 | |
288 | vm_object_unlock(old_object); |
289 | |
290 | vm_object_lock(new_object); |
291 | |
292 | /* |
293 | * Move this page into the new object |
294 | */ |
295 | |
296 | vm_page_lock_queues(); |
297 | vm_page_insert(m, new_object, new_offset); |
298 | vm_page_unlock_queues(); |
299 | |
300 | m->dirty = TRUE((boolean_t) 1); |
301 | m->precious = FALSE((boolean_t) 0); |
302 | m->page_lock = VM_PROT_NONE((vm_prot_t) 0x00); |
303 | m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00); |
304 | } |
305 | else { |
306 | /* |
307 | * Copy the data into the new page, |
308 | * and mark the new page as clean. |
309 | */ |
310 | vm_page_copy(m, new_m); |
311 | |
312 | vm_object_lock(old_object); |
313 | m->dirty = FALSE((boolean_t) 0); |
314 | pmap_clear_modify(m->phys_addr); |
315 | |
316 | /* |
317 | * Deactivate old page. |
318 | */ |
319 | vm_page_lock_queues(); |
320 | vm_page_deactivate(m); |
321 | vm_page_unlock_queues(); |
322 | |
323 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
324 | |
325 | /* |
326 | * Record that this page has been written out |
327 | */ |
328 | |
329 | #if MACH_PAGEMAP1 |
330 | vm_external_state_set(old_object->existence_info, |
331 | paging_offset, |
332 | VM_EXTERNAL_STATE_EXISTS1); |
333 | #endif /* MACH_PAGEMAP */ |
334 | |
335 | vm_object_unlock(old_object); |
336 | |
337 | vm_object_lock(new_object); |
338 | |
339 | /* |
340 | * Use the new page below. |
341 | */ |
342 | m = new_m; |
343 | m->dirty = TRUE((boolean_t) 1); |
344 | assert(!m->precious)({ if (!(!m->precious)) Assert("!m->precious", "../vm/vm_pageout.c" , 344); }); |
345 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
346 | } |
347 | |
348 | /* |
349 | * Make the old page eligible for replacement again; if a |
350 | * user-supplied memory manager fails to release the page, |
351 | * it will be paged out again to the default memory manager. |
352 | * |
353 | * Note that pages written to the default memory manager |
354 | * must be wired down -- in return, it guarantees to free |
355 | * this page, rather than reusing it. |
356 | */ |
357 | |
358 | vm_page_lock_queues(); |
359 | vm_stat.pageouts++; |
360 | if (m->laundry) { |
361 | /* |
362 | * vm_pageout_scan is telling us to put this page |
363 | * at the front of the inactive queue, so it will |
364 | * be immediately paged out to the default pager. |
365 | */ |
366 | |
367 | assert(!old_object->internal)({ if (!(!old_object->internal)) Assert("!old_object->internal" , "../vm/vm_pageout.c", 367); }); |
368 | m->laundry = FALSE((boolean_t) 0); |
369 | |
370 | queue_enter_first(&vm_page_queue_inactive, m,{ register queue_entry_t next; next = (&vm_page_queue_inactive )->next; if ((&vm_page_queue_inactive) == next) { (& vm_page_queue_inactive)->prev = (queue_entry_t) (m); } else { ((vm_page_t)next)->pageq.prev = (queue_entry_t)(m); } ( m)->pageq.next = next; (m)->pageq.prev = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->next = (queue_entry_t) m; } |
371 | vm_page_t, pageq){ register queue_entry_t next; next = (&vm_page_queue_inactive )->next; if ((&vm_page_queue_inactive) == next) { (& vm_page_queue_inactive)->prev = (queue_entry_t) (m); } else { ((vm_page_t)next)->pageq.prev = (queue_entry_t)(m); } ( m)->pageq.next = next; (m)->pageq.prev = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->next = (queue_entry_t) m; }; |
372 | m->inactive = TRUE((boolean_t) 1); |
373 | vm_page_inactive_count++; |
374 | } else if (old_object->internal) { |
375 | m->laundry = TRUE((boolean_t) 1); |
376 | vm_page_laundry_count++; |
377 | |
378 | vm_page_wire(m); |
379 | } else |
380 | vm_page_activate(m); |
381 | vm_page_unlock_queues(); |
382 | |
383 | /* |
384 | * Since IPC operations may block, we drop locks now. |
385 | * [The placeholder page is busy, and we still have |
386 | * paging_in_progress incremented.] |
387 | */ |
388 | |
389 | vm_object_unlock(new_object); |
390 | |
391 | /* |
392 | * Return the placeholder page to simplify cleanup. |
393 | */ |
394 | return (flush ? holding_page : VM_PAGE_NULL((vm_page_t) 0)); |
395 | } |
396 | |
397 | /* |
398 | * Routine: vm_pageout_page |
399 | * Purpose: |
400 | * Causes the specified page to be written back to |
401 | * the appropriate memory object. |
402 | * |
403 | * The "initial" argument specifies whether this |
404 | * data is an initialization only, and should use |
405 | * memory_object_data_initialize instead of |
406 | * memory_object_data_write. |
407 | * |
408 | * The "flush" argument specifies whether the page |
409 | * should be flushed from the object. If not, a |
410 | * copy of the data is sent to the memory object. |
411 | * |
412 | * In/out conditions: |
413 | * The page in question must not be on any pageout queues. |
414 | * The object to which it belongs must be locked. |
415 | * Implementation: |
416 | * Move this page to a completely new object, if flushing; |
417 | * copy to a new page in a new object, if not. |
418 | */ |
419 | void |
420 | vm_pageout_page(m, initial, flush) |
421 | register vm_page_t m; |
422 | boolean_t initial; |
423 | boolean_t flush; |
424 | { |
425 | vm_map_copy_t copy; |
426 | register vm_object_t old_object; |
427 | register vm_object_t new_object; |
428 | register vm_page_t holding_page; |
429 | vm_offset_t paging_offset; |
430 | kern_return_t rc; |
431 | boolean_t precious_clean; |
432 | |
433 | assert(m->busy)({ if (!(m->busy)) Assert("m->busy", "../vm/vm_pageout.c" , 433); }); |
434 | |
435 | /* |
436 | * Cleaning but not flushing a clean precious page is a |
437 | * no-op. Remember whether page is clean and precious now |
438 | * because vm_pageout_setup will mark it dirty and not precious. |
439 | * |
440 | * XXX Check if precious_clean && !flush can really happen. |
441 | */ |
442 | precious_clean = (!m->dirty) && m->precious; |
443 | if (precious_clean && !flush) { |
444 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
445 | return; |
446 | } |
447 | |
448 | /* |
449 | * Verify that we really want to clean this page. |
450 | */ |
451 | if (m->absent || m->error || (!m->dirty && !m->precious)) { |
452 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); |
453 | return; |
454 | } |
455 | |
456 | /* |
457 | * Create a paging reference to let us play with the object. |
458 | */ |
459 | old_object = m->object; |
460 | paging_offset = m->offset + old_object->paging_offset; |
461 | vm_object_paging_begin(old_object)((old_object)->paging_in_progress++); |
462 | vm_object_unlock(old_object); |
463 | |
464 | /* |
465 | * Allocate a new object into which we can put the page. |
466 | */ |
467 | new_object = vm_object_allocate(PAGE_SIZE(1 << 12)); |
468 | |
469 | /* |
470 | * Move the page into the new object. |
471 | */ |
472 | holding_page = vm_pageout_setup(m, |
473 | paging_offset, |
474 | new_object, |
475 | 0, /* new offset */ |
476 | flush); /* flush */ |
477 | |
478 | rc = vm_map_copyin_object(new_object, 0, PAGE_SIZE(1 << 12), ©); |
479 | assert(rc == KERN_SUCCESS)({ if (!(rc == 0)) Assert("rc == KERN_SUCCESS", "../vm/vm_pageout.c" , 479); }); |
480 | |
481 | if (initial || old_object->use_old_pageout) { |
482 | rc = (*(initial ? memory_object_data_initialize |
483 | : memory_object_data_write)) |
484 | (old_object->pager, |
485 | old_object->pager_request, |
486 | paging_offset, (pointer_t) copy, PAGE_SIZE(1 << 12)); |
487 | } |
488 | else { |
489 | rc = memory_object_data_return( |
490 | old_object->pager, |
491 | old_object->pager_request, |
492 | paging_offset, (pointer_t) copy, PAGE_SIZE(1 << 12), |
493 | !precious_clean, !flush); |
494 | } |
495 | |
496 | if (rc != KERN_SUCCESS0) |
497 | vm_map_copy_discard(copy); |
498 | |
499 | /* |
500 | * Clean up. |
501 | */ |
502 | vm_object_lock(old_object); |
503 | if (holding_page != VM_PAGE_NULL((vm_page_t) 0)) |
504 | VM_PAGE_FREE(holding_page)({ ; vm_page_free(holding_page); ; }); |
505 | vm_object_paging_end(old_object)({ ({ if (!((old_object)->paging_in_progress != 0)) Assert ("(old_object)->paging_in_progress != 0", "../vm/vm_pageout.c" , 505); }); if (--(old_object)->paging_in_progress == 0) { ({ if ((old_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) old_object) + (2))), ((boolean_t) 0 ), 0); (old_object)->all_wanted &= ~(1 << (2)); } ); } }); |
506 | } |
507 | |
508 | /* |
509 | * vm_pageout_scan does the dirty work for the pageout daemon. |
510 | * It returns with vm_page_queue_free_lock held and |
511 | * vm_page_free_wanted == 0. |
512 | */ |
513 | |
514 | void vm_pageout_scan() |
515 | { |
516 | unsigned int burst_count; |
517 | unsigned int want_pages; |
518 | |
519 | /* |
520 | * We want to gradually dribble pages from the active queue |
521 | * to the inactive queue. If we let the inactive queue get |
522 | * very small, and then suddenly dump many pages into it, |
523 | * those pages won't get a sufficient chance to be referenced |
524 | * before we start taking them from the inactive queue. |
525 | * |
526 | * We must limit the rate at which we send pages to the pagers. |
527 | * data_write messages consume memory, for message buffers and |
528 | * for map-copy objects. If we get too far ahead of the pagers, |
529 | * we can potentially run out of memory. |
530 | * |
531 | * We can use the laundry count to limit directly the number |
532 | * of pages outstanding to the default pager. A similar |
533 | * strategy for external pagers doesn't work, because |
534 | * external pagers don't have to deallocate the pages sent them, |
535 | * and because we might have to send pages to external pagers |
536 | * even if they aren't processing writes. So we also |
537 | * use a burst count to limit writes to external pagers. |
538 | * |
539 | * When memory is very tight, we can't rely on external pagers to |
540 | * clean pages. They probably aren't running, because they |
541 | * aren't vm-privileged. If we kept sending dirty pages to them, |
542 | * we could exhaust the free list. However, we can't just ignore |
543 | * pages belonging to external objects, because there might be no |
544 | * pages belonging to internal objects. Hence, we get the page |
545 | * into an internal object and then immediately double-page it, |
546 | * sending it to the default pager. |
547 | * |
548 | * slab_collect should be last, because the other operations |
549 | * might return memory to caches. When we pause we use |
550 | * vm_pageout_scan_continue as our continuation, so we will |
551 | * reenter vm_pageout_scan periodically and attempt to reclaim |
552 | * internal memory even if we never reach vm_page_free_target. |
553 | */ |
554 | |
555 | stack_collect(); |
556 | net_kmsg_collect(); |
557 | consider_task_collect(); |
558 | consider_thread_collect(); |
559 | slab_collect(); |
560 | |
561 | for (burst_count = 0;;) { |
562 | register vm_page_t m; |
563 | register vm_object_t object; |
564 | unsigned int free_count; |
565 | |
566 | /* |
567 | * Recalculate vm_page_inactivate_target. |
568 | */ |
569 | |
570 | vm_page_lock_queues(); |
571 | vm_page_inactive_target = |
572 | VM_PAGE_INACTIVE_TARGET(vm_page_active_count +((vm_page_active_count + vm_page_inactive_count) * 2 / 3) |
573 | vm_page_inactive_count)((vm_page_active_count + vm_page_inactive_count) * 2 / 3); |
574 | |
575 | /* |
576 | * Move pages from active to inactive. |
577 | */ |
578 | |
579 | while ((vm_page_inactive_count < vm_page_inactive_target) && |
580 | !queue_empty(&vm_page_queue_active)(((&vm_page_queue_active)) == (((&vm_page_queue_active )->next)))) { |
581 | register vm_object_t obj; |
582 | |
583 | vm_pageout_active++; |
584 | m = (vm_page_t) queue_first(&vm_page_queue_active)((&vm_page_queue_active)->next); |
585 | assert(m->active && !m->inactive)({ if (!(m->active && !m->inactive)) Assert("m->active && !m->inactive" , "../vm/vm_pageout.c", 585); }); |
586 | |
587 | obj = m->object; |
Value stored to 'obj' is never read | |
588 | if (!vm_object_lock_try(obj)(((boolean_t) 1))) { |
589 | /* |
590 | * Move page to end and continue. |
591 | */ |
592 | |
593 | queue_remove(&vm_page_queue_active, m,{ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; } |
594 | vm_page_t, pageq){ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; |
595 | queue_enter(&vm_page_queue_active, m,{ register queue_entry_t prev; prev = (&vm_page_queue_active )->prev; if ((&vm_page_queue_active) == prev) { (& vm_page_queue_active)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } (m) ->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_active ; (&vm_page_queue_active)->prev = (queue_entry_t) m; } |
596 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_active )->prev; if ((&vm_page_queue_active) == prev) { (& vm_page_queue_active)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } (m) ->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_active ; (&vm_page_queue_active)->prev = (queue_entry_t) m; }; |
597 | vm_page_unlock_queues(); |
598 | vm_page_lock_queues(); |
599 | continue; |
600 | } |
601 | |
602 | /* |
603 | * If the page is busy, then we pull it |
604 | * off the active queue and leave it alone. |
605 | */ |
606 | |
607 | if (m->busy) { |
608 | vm_object_unlock(obj); |
609 | queue_remove(&vm_page_queue_active, m,{ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; } |
610 | vm_page_t, pageq){ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; |
611 | m->active = FALSE((boolean_t) 0); |
612 | vm_page_active_count--; |
613 | continue; |
614 | } |
615 | |
616 | /* |
617 | * Deactivate the page while holding the object |
618 | * locked, so we know the page is still not busy. |
619 | * This should prevent races between pmap_enter |
620 | * and pmap_clear_reference. The page might be |
621 | * absent or fictitious, but vm_page_deactivate |
622 | * can handle that. |
623 | */ |
624 | |
625 | vm_page_deactivate(m); |
626 | vm_object_unlock(obj); |
627 | } |
628 | |
629 | /* |
630 | * We are done if we have met our targets *and* |
631 | * nobody is still waiting for a page. |
632 | */ |
633 | |
634 | simple_lock(&vm_page_queue_free_lock); |
635 | free_count = vm_page_free_count; |
636 | if ((free_count >= vm_page_free_target) && |
637 | (vm_page_external_count <= vm_page_external_target) && |
638 | (vm_page_free_wanted == 0)) { |
639 | vm_page_unlock_queues(); |
640 | break; |
641 | } |
642 | want_pages = ((free_count < vm_page_free_target) || |
643 | vm_page_free_wanted); |
644 | simple_unlock(&vm_page_queue_free_lock); |
645 | |
646 | /* |
647 | * Sometimes we have to pause: |
648 | * 1) No inactive pages - nothing to do. |
649 | * 2) Flow control - wait for pagers to catch up. |
650 | * 3) Extremely low memory - sending out dirty pages |
651 | * consumes memory. We don't take the risk of doing |
652 | * this if the default pager already has work to do. |
653 | */ |
654 | pause: |
655 | if (queue_empty(&vm_page_queue_inactive)(((&vm_page_queue_inactive)) == (((&vm_page_queue_inactive )->next))) || |
656 | (burst_count >= vm_pageout_burst_max) || |
657 | (vm_page_laundry_count >= vm_pageout_burst_max) || |
658 | ((free_count < vm_pageout_reserved_really) && |
659 | (vm_page_laundry_count > 0))) { |
660 | unsigned int pages, msecs; |
661 | |
662 | /* |
663 | * vm_pageout_burst_wait is msecs/page. |
664 | * If there is nothing for us to do, we wait |
665 | * at least vm_pageout_empty_wait msecs. |
666 | */ |
667 | |
668 | if (vm_page_laundry_count > burst_count) |
669 | pages = vm_page_laundry_count; |
670 | else |
671 | pages = burst_count; |
672 | msecs = pages * vm_pageout_burst_wait; |
673 | |
674 | if (queue_empty(&vm_page_queue_inactive)(((&vm_page_queue_inactive)) == (((&vm_page_queue_inactive )->next))) && |
675 | (msecs < vm_pageout_empty_wait)) |
676 | msecs = vm_pageout_empty_wait; |
677 | vm_page_unlock_queues(); |
678 | |
679 | thread_will_wait_with_timeout(current_thread()(active_threads[(0)]), msecs); |
680 | counter(c_vm_pageout_scan_block++); |
681 | thread_block(vm_pageout_scan_continue); |
682 | call_continuation(vm_pageout_scan_continue); |
683 | /*NOTREACHED*/ |
684 | } |
685 | |
686 | vm_pageout_inactive++; |
687 | |
688 | /* Find a page we are interested in paging out. If we |
689 | need pages, then we'll page anything out; otherwise |
690 | we only page out external pages. */ |
691 | m = (vm_page_t) queue_first (&vm_page_queue_inactive)((&vm_page_queue_inactive)->next); |
692 | while (1) |
693 | { |
694 | assert (!m->active && m->inactive)({ if (!(!m->active && m->inactive)) Assert("!m->active && m->inactive" , "../vm/vm_pageout.c", 694); }); |
695 | if (want_pages || m->external) |
696 | break; |
697 | |
698 | m = (vm_page_t) queue_next (m)((m)->next); |
699 | if (!m) |
700 | goto pause; |
701 | } |
702 | |
703 | object = m->object; |
704 | |
705 | /* |
706 | * Try to lock object; since we've got the |
707 | * page queues lock, we can only try for this one. |
708 | */ |
709 | |
710 | if (!vm_object_lock_try(object)(((boolean_t) 1))) { |
711 | /* |
712 | * Move page to end and continue. |
713 | */ |
714 | |
715 | queue_remove(&vm_page_queue_inactive, m,{ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive ) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; } |
716 | vm_page_t, pageq){ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive ) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
717 | queue_enter(&vm_page_queue_inactive, m,{ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; } |
718 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; }; |
719 | vm_page_unlock_queues(); |
720 | vm_pageout_inactive_nolock++; |
721 | continue; |
722 | } |
723 | |
724 | /* |
725 | * Remove the page from the inactive list. |
726 | */ |
727 | |
728 | queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq){ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive ) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
729 | vm_page_inactive_count--; |
730 | m->inactive = FALSE((boolean_t) 0); |
731 | |
732 | if (m->busy || !object->alive) { |
733 | /* |
734 | * Somebody is already playing with this page. |
735 | * Leave it off the pageout queues. |
736 | */ |
737 | |
738 | vm_page_unlock_queues(); |
739 | vm_object_unlock(object); |
740 | vm_pageout_inactive_busy++; |
741 | continue; |
742 | } |
743 | |
744 | /* |
745 | * If it's absent, we can reclaim the page. |
746 | */ |
747 | |
748 | if (want_pages && m->absent) { |
749 | vm_pageout_inactive_absent++; |
750 | reclaim_page: |
751 | vm_page_free(m); |
752 | vm_page_unlock_queues(); |
753 | vm_object_unlock(object); |
754 | continue; |
755 | } |
756 | |
757 | /* |
758 | * If it's being used, reactivate. |
759 | * (Fictitious pages are either busy or absent.) |
760 | */ |
761 | |
762 | assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/vm_pageout.c" , 762); }); |
763 | if (m->reference || pmap_is_referenced(m->phys_addr)) { |
764 | vm_object_unlock(object); |
765 | vm_page_activate(m); |
766 | vm_stat.reactivations++; |
767 | current_task()((active_threads[(0)])->task)->reactivations++; |
768 | vm_page_unlock_queues(); |
769 | vm_pageout_inactive_used++; |
770 | continue; |
771 | } |
772 | |
773 | /* |
774 | * Eliminate all mappings. |
775 | */ |
776 | |
777 | m->busy = TRUE((boolean_t) 1); |
778 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); |
779 | if (!m->dirty) |
780 | m->dirty = pmap_is_modified(m->phys_addr); |
781 | |
782 | if (m->external) { |
783 | /* Figure out if we still care about this |
784 | page in the limit of externally managed pages. |
785 | Clean pages don't actually cause system hosage, |
786 | so it's ok to stop considering them as |
787 | "consumers" of memory. */ |
788 | if (m->dirty && !m->extcounted) { |
789 | m->extcounted = TRUE((boolean_t) 1); |
790 | vm_page_external_count++; |
791 | } else if (!m->dirty && m->extcounted) { |
792 | m->extcounted = FALSE((boolean_t) 0); |
793 | vm_page_external_count--; |
794 | } |
795 | } |
796 | |
797 | /* If we don't actually need more memory, and the page |
798 | is not dirty, put it on the tail of the inactive queue |
799 | and move on to the next page. */ |
800 | if (!want_pages && !m->dirty) { |
801 | queue_remove (&vm_page_queue_inactive, m,{ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive ) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; } |
802 | vm_page_t, pageq){ register queue_entry_t next, prev; next = (m)->pageq.next ; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive ) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
803 | queue_enter (&vm_page_queue_inactive, m,{ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; } |
804 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; }; |
805 | vm_page_unlock_queues(); |
806 | vm_pageout_inactive_cleaned_external++; |
807 | continue; |
808 | } |
809 | |
810 | /* |
811 | * If it's clean and not precious, we can free the page. |
812 | */ |
813 | |
814 | if (!m->dirty && !m->precious) { |
815 | vm_pageout_inactive_clean++; |
816 | goto reclaim_page; |
817 | } |
818 | |
819 | /* |
820 | * If we are very low on memory, then we can't |
821 | * rely on an external pager to clean a dirty page, |
822 | * because external pagers are not vm-privileged. |
823 | * |
824 | * The laundry bit tells vm_pageout_setup to |
825 | * put the page back at the front of the inactive |
826 | * queue instead of activating the page. Hence, |
827 | * we will pick the page up again immediately and |
828 | * resend it to the default pager. |
829 | */ |
830 | |
831 | assert(!m->laundry)({ if (!(!m->laundry)) Assert("!m->laundry", "../vm/vm_pageout.c" , 831); }); |
832 | if ((free_count < vm_pageout_reserved_internal) && |
833 | !object->internal) { |
834 | m->laundry = TRUE((boolean_t) 1); |
835 | vm_pageout_inactive_double++; |
836 | } |
837 | vm_page_unlock_queues(); |
838 | |
839 | /* |
840 | * If there is no memory object for the page, create |
841 | * one and hand it to the default pager. |
842 | * [First try to collapse, so we don't create |
843 | * one unnecessarily.] |
844 | */ |
845 | |
846 | if (!object->pager_initialized) |
847 | vm_object_collapse(object); |
848 | if (!object->pager_initialized) |
849 | vm_object_pager_create(object); |
850 | if (!object->pager_initialized) |
851 | panic("vm_pageout_scan"); |
852 | |
853 | vm_pageout_inactive_dirty++; |
854 | vm_pageout_page(m, FALSE((boolean_t) 0), TRUE((boolean_t) 1)); /* flush it */ |
855 | vm_object_unlock(object); |
856 | burst_count++; |
857 | } |
858 | } |
859 | |
860 | void vm_pageout_scan_continue() |
861 | { |
862 | /* |
863 | * We just paused to let the pagers catch up. |
864 | * If vm_page_laundry_count is still high, |
865 | * then we aren't waiting long enough. |
866 | * If we have paused some vm_pageout_pause_max times without |
867 | * adjusting vm_pageout_burst_wait, it might be too big, |
868 | * so we decrease it. |
869 | */ |
870 | |
871 | vm_page_lock_queues(); |
872 | if (vm_page_laundry_count > vm_pageout_burst_min) { |
873 | vm_pageout_burst_wait++; |
874 | vm_pageout_pause_count = 0; |
875 | } else if (++vm_pageout_pause_count > vm_pageout_pause_max) { |
876 | vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4; |
877 | if (vm_pageout_burst_wait < 1) |
878 | vm_pageout_burst_wait = 1; |
879 | vm_pageout_pause_count = 0; |
880 | } |
881 | vm_page_unlock_queues(); |
882 | |
883 | vm_pageout_continue(); |
884 | /*NOTREACHED*/ |
885 | } |
886 | |
887 | /* |
888 | * vm_pageout is the high level pageout daemon. |
889 | */ |
890 | |
891 | void vm_pageout_continue() |
892 | { |
893 | /* |
894 | * The pageout daemon is never done, so loop forever. |
895 | * We should call vm_pageout_scan at least once each |
896 | * time we are woken, even if vm_page_free_wanted is |
897 | * zero, to check vm_page_free_target and |
898 | * vm_page_inactive_target. |
899 | */ |
900 | |
901 | for (;;) { |
902 | vm_pageout_scan(); |
903 | /* we hold vm_page_queue_free_lock now */ |
904 | assert(vm_page_free_wanted == 0)({ if (!(vm_page_free_wanted == 0)) Assert("vm_page_free_wanted == 0" , "../vm/vm_pageout.c", 904); }); |
905 | |
906 | assert_wait(&vm_page_free_wanted, FALSE((boolean_t) 0)); |
907 | simple_unlock(&vm_page_queue_free_lock); |
908 | counter(c_vm_pageout_block++); |
909 | thread_block(vm_pageout_continue); |
910 | } |
911 | } |
912 | |
913 | void vm_pageout() |
914 | { |
915 | int free_after_reserve; |
916 | |
917 | current_thread()(active_threads[(0)])->vm_privilege = TRUE((boolean_t) 1); |
918 | stack_privilege(current_thread()(active_threads[(0)])); |
919 | |
920 | /* |
921 | * Initialize some paging parameters. |
922 | */ |
923 | |
924 | if (vm_pageout_burst_max == 0) |
925 | vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX10; |
926 | |
927 | if (vm_pageout_burst_min == 0) |
928 | vm_pageout_burst_min = VM_PAGEOUT_BURST_MIN5; |
929 | |
930 | if (vm_pageout_burst_wait == 0) |
931 | vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT10; |
932 | |
933 | if (vm_pageout_empty_wait == 0) |
934 | vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT75; |
935 | |
936 | if (vm_page_free_reserved == 0) |
937 | vm_page_free_reserved = VM_PAGE_FREE_RESERVED50; |
938 | |
939 | if (vm_pageout_pause_max == 0) |
940 | vm_pageout_pause_max = VM_PAGEOUT_PAUSE_MAX10; |
941 | |
942 | if (vm_pageout_reserved_internal == 0) |
943 | vm_pageout_reserved_internal = |
944 | VM_PAGEOUT_RESERVED_INTERNAL(vm_page_free_reserved)((vm_page_free_reserved) - 25); |
945 | |
946 | if (vm_pageout_reserved_really == 0) |
947 | vm_pageout_reserved_really = |
948 | VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved)((vm_page_free_reserved) - 40); |
949 | |
950 | free_after_reserve = vm_page_free_count - vm_page_free_reserved; |
951 | |
952 | if (vm_page_external_limit == 0) |
953 | vm_page_external_limit = |
954 | VM_PAGE_EXTERNAL_LIMIT (free_after_reserve)((free_after_reserve) / 2); |
955 | |
956 | if (vm_page_external_target == 0) |
957 | vm_page_external_target = |
958 | VM_PAGE_EXTERNAL_TARGET (free_after_reserve)((free_after_reserve) / 4); |
959 | |
960 | if (vm_page_free_min == 0) |
961 | vm_page_free_min = vm_page_free_reserved + |
962 | VM_PAGE_FREE_MIN(free_after_reserve)(10 + (free_after_reserve) / 100); |
963 | |
964 | if (vm_page_free_target == 0) |
965 | vm_page_free_target = vm_page_free_reserved + |
966 | VM_PAGE_FREE_TARGET(free_after_reserve)(15 + (free_after_reserve) / 80); |
967 | |
968 | if (vm_page_free_target < vm_page_free_min + 5) |
969 | vm_page_free_target = vm_page_free_min + 5; |
970 | |
971 | /* |
972 | * vm_pageout_scan will set vm_page_inactive_target. |
973 | */ |
974 | |
975 | vm_pageout_continue(); |
976 | /*NOTREACHED*/ |
977 | } |