File: | obj-scan-build/../vm/vm_pageout.c |
Location: | line 586, column 4 |
Description: | Value stored to 'obj' is never read |
1 | /* |
2 | * Mach Operating System |
3 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. |
4 | * Copyright (c) 1993,1994 The University of Utah and |
5 | * the Computer Systems Laboratory (CSL). |
6 | * All rights reserved. |
7 | * |
8 | * Permission to use, copy, modify and distribute this software and its |
9 | * documentation is hereby granted, provided that both the copyright |
10 | * notice and this permission notice appear in all copies of the |
11 | * software, derivative works or modified versions, and any portions |
12 | * thereof, and that both notices appear in supporting documentation. |
13 | * |
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF |
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY |
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF |
17 | * THIS SOFTWARE. |
18 | * |
19 | * Carnegie Mellon requests users of this software to return to |
20 | * |
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
22 | * School of Computer Science |
23 | * Carnegie Mellon University |
24 | * Pittsburgh PA 15213-3890 |
25 | * |
26 | * any improvements or extensions that they make and grant Carnegie Mellon |
27 | * the rights to redistribute these changes. |
28 | */ |
29 | /* |
30 | * File: vm/vm_pageout.c |
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
32 | * Date: 1985 |
33 | * |
34 | * The proverbial page-out daemon. |
35 | */ |
36 | |
37 | #include <device/net_io.h> |
38 | #include <mach/mach_types.h> |
39 | #include <mach/memory_object.h> |
40 | #include <vm/memory_object_default.user.h> |
41 | #include <vm/memory_object_user.user.h> |
42 | #include <mach/vm_param.h> |
43 | #include <mach/vm_statistics.h> |
44 | #include <kern/counters.h> |
45 | #include <kern/debug.h> |
46 | #include <kern/slab.h> |
47 | #include <kern/task.h> |
48 | #include <kern/thread.h> |
49 | #include <vm/pmap.h> |
50 | #include <vm/vm_map.h> |
51 | #include <vm/vm_object.h> |
52 | #include <vm/vm_page.h> |
53 | #include <vm/vm_pageout.h> |
54 | #include <machine/locore.h> |
55 | |
56 | |
57 | |
58 | #ifndef VM_PAGEOUT_BURST_MAX10 |
59 | #define VM_PAGEOUT_BURST_MAX10 10 /* number of pages */ |
60 | #endif /* VM_PAGEOUT_BURST_MAX */ |
61 | |
62 | #ifndef VM_PAGEOUT_BURST_MIN5 |
63 | #define VM_PAGEOUT_BURST_MIN5 5 /* number of pages */ |
64 | #endif /* VM_PAGEOUT_BURST_MIN */ |
65 | |
66 | #ifndef VM_PAGEOUT_BURST_WAIT10 |
67 | #define VM_PAGEOUT_BURST_WAIT10 10 /* milliseconds per page */ |
68 | #endif /* VM_PAGEOUT_BURST_WAIT */ |
69 | |
70 | #ifndef VM_PAGEOUT_EMPTY_WAIT75 |
71 | #define VM_PAGEOUT_EMPTY_WAIT75 75 /* milliseconds */ |
72 | #endif /* VM_PAGEOUT_EMPTY_WAIT */ |
73 | |
74 | #ifndef VM_PAGEOUT_PAUSE_MAX10 |
75 | #define VM_PAGEOUT_PAUSE_MAX10 10 /* number of pauses */ |
76 | #endif /* VM_PAGEOUT_PAUSE_MAX */ |
77 | |
78 | /* |
79 | * To obtain a reasonable LRU approximation, the inactive queue |
80 | * needs to be large enough to give pages on it a chance to be |
81 | * referenced a second time. This macro defines the fraction |
82 | * of active+inactive pages that should be inactive. |
83 | * The pageout daemon uses it to update vm_page_inactive_target. |
84 | * |
85 | * If vm_page_free_count falls below vm_page_free_target and |
86 | * vm_page_inactive_count is below vm_page_inactive_target, |
87 | * then the pageout daemon starts running. |
88 | */ |
89 | |
90 | #ifndef VM_PAGE_INACTIVE_TARGET |
91 | #define VM_PAGE_INACTIVE_TARGET(avail)((avail) * 2 / 3) ((avail) * 2 / 3) |
92 | #endif /* VM_PAGE_INACTIVE_TARGET */ |
93 | |
94 | /* |
95 | * Once the pageout daemon starts running, it keeps going |
96 | * until vm_page_free_count meets or exceeds vm_page_free_target. |
97 | */ |
98 | |
99 | #ifndef VM_PAGE_FREE_TARGET |
100 | #define VM_PAGE_FREE_TARGET(free)(15 + (free) / 80) (15 + (free) / 80) |
101 | #endif /* VM_PAGE_FREE_TARGET */ |
102 | |
103 | /* |
104 | * The pageout daemon always starts running once vm_page_free_count |
105 | * falls below vm_page_free_min. |
106 | */ |
107 | |
108 | #ifndef VM_PAGE_FREE_MIN |
109 | #define VM_PAGE_FREE_MIN(free)(10 + (free) / 100) (10 + (free) / 100) |
110 | #endif /* VM_PAGE_FREE_MIN */ |
111 | |
112 | /* When vm_page_external_count exceeds vm_page_external_limit, |
113 | * allocations of externally paged pages stops. |
114 | */ |
115 | |
116 | #ifndef VM_PAGE_EXTERNAL_LIMIT |
117 | #define VM_PAGE_EXTERNAL_LIMIT(free)((free) / 2) ((free) / 2) |
118 | #endif /* VM_PAGE_EXTERNAL_LIMIT */ |
119 | |
120 | /* Attempt to keep the number of externally paged pages less |
121 | * than vm_pages_external_target. |
122 | */ |
123 | #ifndef VM_PAGE_EXTERNAL_TARGET |
124 | #define VM_PAGE_EXTERNAL_TARGET(free)((free) / 4) ((free) / 4) |
125 | #endif /* VM_PAGE_EXTERNAL_TARGET */ |
126 | |
127 | /* |
128 | * When vm_page_free_count falls below vm_page_free_reserved, |
129 | * only vm-privileged threads can allocate pages. vm-privilege |
130 | * allows the pageout daemon and default pager (and any other |
131 | * associated threads needed for default pageout) to continue |
132 | * operation by dipping into the reserved pool of pages. */ |
133 | |
134 | #ifndef VM_PAGE_FREE_RESERVED50 |
135 | #define VM_PAGE_FREE_RESERVED50 50 |
136 | #endif /* VM_PAGE_FREE_RESERVED */ |
137 | |
138 | /* |
139 | * When vm_page_free_count falls below vm_pageout_reserved_internal, |
140 | * the pageout daemon no longer trusts external pagers to clean pages. |
141 | * External pagers are probably all wedged waiting for a free page. |
142 | * It forcibly double-pages dirty pages belonging to external objects, |
143 | * getting the pages to the default pager to clean. |
144 | */ |
145 | |
146 | #ifndef VM_PAGEOUT_RESERVED_INTERNAL |
147 | #define VM_PAGEOUT_RESERVED_INTERNAL(reserve)((reserve) - 25) ((reserve) - 25) |
148 | #endif /* VM_PAGEOUT_RESERVED_INTERNAL */ |
149 | |
150 | /* |
151 | * When vm_page_free_count falls below vm_pageout_reserved_really, |
152 | * the pageout daemon stops work entirely to let the default pager |
153 | * catch up (assuming the default pager has pages to clean). |
154 | * Beyond this point, it is too dangerous to consume memory |
155 | * even for memory_object_data_write messages to the default pager. |
156 | */ |
157 | |
158 | #ifndef VM_PAGEOUT_RESERVED_REALLY |
159 | #define VM_PAGEOUT_RESERVED_REALLY(reserve)((reserve) - 40) ((reserve) - 40) |
160 | #endif /* VM_PAGEOUT_RESERVED_REALLY */ |
161 | |
162 | extern void vm_pageout_continue(); |
163 | extern void vm_pageout_scan_continue(); |
164 | |
165 | unsigned int vm_pageout_reserved_internal = 0; |
166 | unsigned int vm_pageout_reserved_really = 0; |
167 | |
168 | unsigned int vm_page_external_target = 0; |
169 | |
170 | unsigned int vm_pageout_burst_max = 0; |
171 | unsigned int vm_pageout_burst_min = 0; |
172 | unsigned int vm_pageout_burst_wait = 0; /* milliseconds per page */ |
173 | unsigned int vm_pageout_empty_wait = 0; /* milliseconds */ |
174 | unsigned int vm_pageout_pause_count = 0; |
175 | unsigned int vm_pageout_pause_max = 0; |
176 | |
177 | /* |
178 | * These variables record the pageout daemon's actions: |
179 | * how many pages it looks at and what happens to those pages. |
180 | * No locking needed because only one thread modifies the variables. |
181 | */ |
182 | |
183 | unsigned int vm_pageout_active = 0; /* debugging */ |
184 | unsigned int vm_pageout_inactive = 0; /* debugging */ |
185 | unsigned int vm_pageout_inactive_nolock = 0; /* debugging */ |
186 | unsigned int vm_pageout_inactive_busy = 0; /* debugging */ |
187 | unsigned int vm_pageout_inactive_absent = 0; /* debugging */ |
188 | unsigned int vm_pageout_inactive_used = 0; /* debugging */ |
189 | unsigned int vm_pageout_inactive_clean = 0; /* debugging */ |
190 | unsigned int vm_pageout_inactive_dirty = 0; /* debugging */ |
191 | unsigned int vm_pageout_inactive_double = 0; /* debugging */ |
192 | unsigned int vm_pageout_inactive_cleaned_external = 0; |
193 | |
194 | /* |
195 | * Routine: vm_pageout_setup |
196 | * Purpose: |
197 | * Set up a page for pageout. |
198 | * |
199 | * Move or copy the page to a new object, as part |
200 | * of which it will be sent to its memory manager |
201 | * in a memory_object_data_write or memory_object_initialize |
202 | * message. |
203 | * |
204 | * The "paging_offset" argument specifies the offset |
205 | * of the page within its external memory object. |
206 | * |
207 | * The "new_object" and "new_offset" arguments |
208 | * indicate where the page should be moved. |
209 | * |
210 | * The "flush" argument specifies whether the page |
211 | * should be flushed from its object. If not, a |
212 | * copy of the page is moved to the new object. |
213 | * |
214 | * In/Out conditions: |
215 | * The page in question must not be on any pageout queues, |
216 | * and must be busy. The object to which it belongs |
217 | * must be unlocked, and the caller must hold a paging |
218 | * reference to it. The new_object must not be locked. |
219 | * |
220 | * If the page is flushed from its original object, |
221 | * this routine returns a pointer to a place-holder page, |
222 | * inserted at the same offset, to block out-of-order |
223 | * requests for the page. The place-holder page must |
224 | * be freed after the data_write or initialize message |
225 | * has been sent. If the page is copied, |
226 | * the holding page is VM_PAGE_NULL. |
227 | * |
228 | * The original page is put on a paging queue and marked |
229 | * not busy on exit. |
230 | */ |
231 | vm_page_t |
232 | vm_pageout_setup(m, paging_offset, new_object, new_offset, flush) |
233 | vm_page_t m; |
234 | vm_offset_t paging_offset; |
235 | vm_object_t new_object; |
236 | vm_offset_t new_offset; |
237 | boolean_t flush; |
238 | { |
239 | vm_object_t old_object = m->object; |
240 | vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/ |
241 | vm_page_t new_m; |
242 | |
243 | assert(m->busy && !m->absent && !m->fictitious)({ if (!(m->busy && !m->absent && !m-> fictitious)) Assert("m->busy && !m->absent && !m->fictitious" , "../vm/vm_pageout.c", 243); }); |
244 | |
245 | /* |
246 | * If we are not flushing the page, allocate a |
247 | * page in the object. If we cannot get the |
248 | * page, flush instead. |
249 | */ |
250 | if (!flush) { |
251 | vm_object_lock(new_object); |
252 | new_m = vm_page_alloc(new_object, new_offset); |
253 | if (new_m == VM_PAGE_NULL((vm_page_t) 0)) |
254 | flush = TRUE((boolean_t) 1); |
255 | vm_object_unlock(new_object); |
256 | } |
257 | |
258 | if (flush) { |
259 | /* |
260 | * Create a place-holder page where the old one was, |
261 | * to prevent anyone from attempting to page in this |
262 | * page while we`re unlocked. |
263 | */ |
264 | while ((holding_page = vm_page_grab_fictitious()) |
265 | == VM_PAGE_NULL((vm_page_t) 0)) |
266 | vm_page_more_fictitious(); |
267 | |
268 | vm_object_lock(old_object); |
269 | vm_page_lock_queues(); |
270 | vm_page_remove(m); |
271 | vm_page_unlock_queues(); |
272 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
273 | |
274 | vm_page_lock_queues(); |
275 | vm_page_insert(holding_page, old_object, m->offset); |
276 | vm_page_unlock_queues(); |
277 | |
278 | /* |
279 | * Record that this page has been written out |
280 | */ |
281 | #if MACH_PAGEMAP1 |
282 | vm_external_state_set(old_object->existence_info, |
283 | paging_offset, |
284 | VM_EXTERNAL_STATE_EXISTS1); |
285 | #endif /* MACH_PAGEMAP */ |
286 | |
287 | vm_object_unlock(old_object); |
288 | |
289 | vm_object_lock(new_object); |
290 | |
291 | /* |
292 | * Move this page into the new object |
293 | */ |
294 | |
295 | vm_page_lock_queues(); |
296 | vm_page_insert(m, new_object, new_offset); |
297 | vm_page_unlock_queues(); |
298 | |
299 | m->dirty = TRUE((boolean_t) 1); |
300 | m->precious = FALSE((boolean_t) 0); |
301 | m->page_lock = VM_PROT_NONE((vm_prot_t) 0x00); |
302 | m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00); |
303 | } |
304 | else { |
305 | /* |
306 | * Copy the data into the new page, |
307 | * and mark the new page as clean. |
308 | */ |
309 | vm_page_copy(m, new_m); |
310 | |
311 | vm_object_lock(old_object); |
312 | m->dirty = FALSE((boolean_t) 0); |
313 | pmap_clear_modify(m->phys_addr); |
314 | |
315 | /* |
316 | * Deactivate old page. |
317 | */ |
318 | vm_page_lock_queues(); |
319 | vm_page_deactivate(m); |
320 | vm_page_unlock_queues(); |
321 | |
322 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
323 | |
324 | /* |
325 | * Record that this page has been written out |
326 | */ |
327 | |
328 | #if MACH_PAGEMAP1 |
329 | vm_external_state_set(old_object->existence_info, |
330 | paging_offset, |
331 | VM_EXTERNAL_STATE_EXISTS1); |
332 | #endif /* MACH_PAGEMAP */ |
333 | |
334 | vm_object_unlock(old_object); |
335 | |
336 | vm_object_lock(new_object); |
337 | |
338 | /* |
339 | * Use the new page below. |
340 | */ |
341 | m = new_m; |
342 | m->dirty = TRUE((boolean_t) 1); |
343 | assert(!m->precious)({ if (!(!m->precious)) Assert("!m->precious", "../vm/vm_pageout.c" , 343); }); |
344 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
345 | } |
346 | |
347 | /* |
348 | * Make the old page eligible for replacement again; if a |
349 | * user-supplied memory manager fails to release the page, |
350 | * it will be paged out again to the default memory manager. |
351 | * |
352 | * Note that pages written to the default memory manager |
353 | * must be wired down -- in return, it guarantees to free |
354 | * this page, rather than reusing it. |
355 | */ |
356 | |
357 | vm_page_lock_queues(); |
358 | vm_stat.pageouts++; |
359 | if (m->laundry) { |
360 | /* |
361 | * vm_pageout_scan is telling us to put this page |
362 | * at the front of the inactive queue, so it will |
363 | * be immediately paged out to the default pager. |
364 | */ |
365 | |
366 | assert(!old_object->internal)({ if (!(!old_object->internal)) Assert("!old_object->internal" , "../vm/vm_pageout.c", 366); }); |
367 | m->laundry = FALSE((boolean_t) 0); |
368 | |
369 | queue_enter_first(&vm_page_queue_inactive, m,{ queue_entry_t next; next = (&vm_page_queue_inactive)-> next; if ((&vm_page_queue_inactive) == next) { (&vm_page_queue_inactive )->prev = (queue_entry_t) (m); } else { ((vm_page_t)next)-> pageq.prev = (queue_entry_t)(m); } (m)->pageq.next = next; (m)->pageq.prev = &vm_page_queue_inactive; (&vm_page_queue_inactive )->next = (queue_entry_t) m; } |
370 | vm_page_t, pageq){ queue_entry_t next; next = (&vm_page_queue_inactive)-> next; if ((&vm_page_queue_inactive) == next) { (&vm_page_queue_inactive )->prev = (queue_entry_t) (m); } else { ((vm_page_t)next)-> pageq.prev = (queue_entry_t)(m); } (m)->pageq.next = next; (m)->pageq.prev = &vm_page_queue_inactive; (&vm_page_queue_inactive )->next = (queue_entry_t) m; }; |
371 | m->inactive = TRUE((boolean_t) 1); |
372 | vm_page_inactive_count++; |
373 | } else if (old_object->internal) { |
374 | m->laundry = TRUE((boolean_t) 1); |
375 | vm_page_laundry_count++; |
376 | |
377 | vm_page_wire(m); |
378 | } else |
379 | vm_page_activate(m); |
380 | vm_page_unlock_queues(); |
381 | |
382 | /* |
383 | * Since IPC operations may block, we drop locks now. |
384 | * [The placeholder page is busy, and we still have |
385 | * paging_in_progress incremented.] |
386 | */ |
387 | |
388 | vm_object_unlock(new_object); |
389 | |
390 | /* |
391 | * Return the placeholder page to simplify cleanup. |
392 | */ |
393 | return (flush ? holding_page : VM_PAGE_NULL((vm_page_t) 0)); |
394 | } |
395 | |
396 | /* |
397 | * Routine: vm_pageout_page |
398 | * Purpose: |
399 | * Causes the specified page to be written back to |
400 | * the appropriate memory object. |
401 | * |
402 | * The "initial" argument specifies whether this |
403 | * data is an initialization only, and should use |
404 | * memory_object_data_initialize instead of |
405 | * memory_object_data_write. |
406 | * |
407 | * The "flush" argument specifies whether the page |
408 | * should be flushed from the object. If not, a |
409 | * copy of the data is sent to the memory object. |
410 | * |
411 | * In/out conditions: |
412 | * The page in question must not be on any pageout queues. |
413 | * The object to which it belongs must be locked. |
414 | * Implementation: |
415 | * Move this page to a completely new object, if flushing; |
416 | * copy to a new page in a new object, if not. |
417 | */ |
418 | void |
419 | vm_pageout_page(m, initial, flush) |
420 | vm_page_t m; |
421 | boolean_t initial; |
422 | boolean_t flush; |
423 | { |
424 | vm_map_copy_t copy; |
425 | vm_object_t old_object; |
426 | vm_object_t new_object; |
427 | vm_page_t holding_page; |
428 | vm_offset_t paging_offset; |
429 | kern_return_t rc; |
430 | boolean_t precious_clean; |
431 | |
432 | assert(m->busy)({ if (!(m->busy)) Assert("m->busy", "../vm/vm_pageout.c" , 432); }); |
433 | |
434 | /* |
435 | * Cleaning but not flushing a clean precious page is a |
436 | * no-op. Remember whether page is clean and precious now |
437 | * because vm_pageout_setup will mark it dirty and not precious. |
438 | * |
439 | * XXX Check if precious_clean && !flush can really happen. |
440 | */ |
441 | precious_clean = (!m->dirty) && m->precious; |
442 | if (precious_clean && !flush) { |
443 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
444 | return; |
445 | } |
446 | |
447 | /* |
448 | * Verify that we really want to clean this page. |
449 | */ |
450 | if (m->absent || m->error || (!m->dirty && !m->precious)) { |
451 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); |
452 | return; |
453 | } |
454 | |
455 | /* |
456 | * Create a paging reference to let us play with the object. |
457 | */ |
458 | old_object = m->object; |
459 | paging_offset = m->offset + old_object->paging_offset; |
460 | vm_object_paging_begin(old_object)((old_object)->paging_in_progress++); |
461 | vm_object_unlock(old_object); |
462 | |
463 | /* |
464 | * Allocate a new object into which we can put the page. |
465 | */ |
466 | new_object = vm_object_allocate(PAGE_SIZE(1 << 12)); |
467 | |
468 | /* |
469 | * Move the page into the new object. |
470 | */ |
471 | holding_page = vm_pageout_setup(m, |
472 | paging_offset, |
473 | new_object, |
474 | 0, /* new offset */ |
475 | flush); /* flush */ |
476 | |
477 | rc = vm_map_copyin_object(new_object, 0, PAGE_SIZE(1 << 12), ©); |
478 | assert(rc == KERN_SUCCESS)({ if (!(rc == 0)) Assert("rc == KERN_SUCCESS", "../vm/vm_pageout.c" , 478); }); |
479 | |
480 | if (initial || old_object->use_old_pageout) { |
481 | rc = (*(initial ? memory_object_data_initialize |
482 | : memory_object_data_write)) |
483 | (old_object->pager, |
484 | old_object->pager_request, |
485 | paging_offset, (pointer_t) copy, PAGE_SIZE(1 << 12)); |
486 | } |
487 | else { |
488 | rc = memory_object_data_return( |
489 | old_object->pager, |
490 | old_object->pager_request, |
491 | paging_offset, (pointer_t) copy, PAGE_SIZE(1 << 12), |
492 | !precious_clean, !flush); |
493 | } |
494 | |
495 | if (rc != KERN_SUCCESS0) |
496 | vm_map_copy_discard(copy); |
497 | |
498 | /* |
499 | * Clean up. |
500 | */ |
501 | vm_object_lock(old_object); |
502 | if (holding_page != VM_PAGE_NULL((vm_page_t) 0)) |
503 | VM_PAGE_FREE(holding_page)({ ; vm_page_free(holding_page); ; }); |
504 | vm_object_paging_end(old_object)({ ({ if (!((old_object)->paging_in_progress != 0)) Assert ("(old_object)->paging_in_progress != 0", "../vm/vm_pageout.c" , 504); }); if (--(old_object)->paging_in_progress == 0) { ({ if ((old_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) old_object) + (2))), ((boolean_t) 0 ), 0); (old_object)->all_wanted &= ~(1 << (2)); } ); } }); |
505 | } |
506 | |
507 | /* |
508 | * vm_pageout_scan does the dirty work for the pageout daemon. |
509 | * It returns with vm_page_queue_free_lock held and |
510 | * vm_page_free_wanted == 0. |
511 | */ |
512 | |
513 | void vm_pageout_scan() |
514 | { |
515 | unsigned int burst_count; |
516 | unsigned int want_pages; |
517 | |
518 | /* |
519 | * We want to gradually dribble pages from the active queue |
520 | * to the inactive queue. If we let the inactive queue get |
521 | * very small, and then suddenly dump many pages into it, |
522 | * those pages won't get a sufficient chance to be referenced |
523 | * before we start taking them from the inactive queue. |
524 | * |
525 | * We must limit the rate at which we send pages to the pagers. |
526 | * data_write messages consume memory, for message buffers and |
527 | * for map-copy objects. If we get too far ahead of the pagers, |
528 | * we can potentially run out of memory. |
529 | * |
530 | * We can use the laundry count to limit directly the number |
531 | * of pages outstanding to the default pager. A similar |
532 | * strategy for external pagers doesn't work, because |
533 | * external pagers don't have to deallocate the pages sent them, |
534 | * and because we might have to send pages to external pagers |
535 | * even if they aren't processing writes. So we also |
536 | * use a burst count to limit writes to external pagers. |
537 | * |
538 | * When memory is very tight, we can't rely on external pagers to |
539 | * clean pages. They probably aren't running, because they |
540 | * aren't vm-privileged. If we kept sending dirty pages to them, |
541 | * we could exhaust the free list. However, we can't just ignore |
542 | * pages belonging to external objects, because there might be no |
543 | * pages belonging to internal objects. Hence, we get the page |
544 | * into an internal object and then immediately double-page it, |
545 | * sending it to the default pager. |
546 | * |
547 | * slab_collect should be last, because the other operations |
548 | * might return memory to caches. When we pause we use |
549 | * vm_pageout_scan_continue as our continuation, so we will |
550 | * reenter vm_pageout_scan periodically and attempt to reclaim |
551 | * internal memory even if we never reach vm_page_free_target. |
552 | */ |
553 | |
554 | stack_collect(); |
555 | net_kmsg_collect(); |
556 | consider_task_collect(); |
557 | consider_thread_collect(); |
558 | slab_collect(); |
559 | |
560 | for (burst_count = 0;;) { |
561 | vm_page_t m; |
562 | vm_object_t object; |
563 | unsigned int free_count; |
564 | |
565 | /* |
566 | * Recalculate vm_page_inactivate_target. |
567 | */ |
568 | |
569 | vm_page_lock_queues(); |
570 | vm_page_inactive_target = |
571 | VM_PAGE_INACTIVE_TARGET(vm_page_active_count +((vm_page_active_count + vm_page_inactive_count) * 2 / 3) |
572 | vm_page_inactive_count)((vm_page_active_count + vm_page_inactive_count) * 2 / 3); |
573 | |
574 | /* |
575 | * Move pages from active to inactive. |
576 | */ |
577 | |
578 | while ((vm_page_inactive_count < vm_page_inactive_target) && |
579 | !queue_empty(&vm_page_queue_active)(((&vm_page_queue_active)) == (((&vm_page_queue_active )->next)))) { |
580 | vm_object_t obj; |
581 | |
582 | vm_pageout_active++; |
583 | m = (vm_page_t) queue_first(&vm_page_queue_active)((&vm_page_queue_active)->next); |
584 | assert(m->active && !m->inactive)({ if (!(m->active && !m->inactive)) Assert("m->active && !m->inactive" , "../vm/vm_pageout.c", 584); }); |
585 | |
586 | obj = m->object; |
Value stored to 'obj' is never read | |
587 | if (!vm_object_lock_try(obj)(((boolean_t) 1))) { |
588 | /* |
589 | * Move page to end and continue. |
590 | */ |
591 | |
592 | queue_remove(&vm_page_queue_active, m,{ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; } |
593 | vm_page_t, pageq){ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; |
594 | queue_enter(&vm_page_queue_active, m,{ register queue_entry_t prev; prev = (&vm_page_queue_active )->prev; if ((&vm_page_queue_active) == prev) { (& vm_page_queue_active)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } (m) ->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_active ; (&vm_page_queue_active)->prev = (queue_entry_t) m; } |
595 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_active )->prev; if ((&vm_page_queue_active) == prev) { (& vm_page_queue_active)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } (m) ->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_active ; (&vm_page_queue_active)->prev = (queue_entry_t) m; }; |
596 | vm_page_unlock_queues(); |
597 | vm_page_lock_queues(); |
598 | continue; |
599 | } |
600 | |
601 | /* |
602 | * If the page is busy, then we pull it |
603 | * off the active queue and leave it alone. |
604 | */ |
605 | |
606 | if (m->busy) { |
607 | vm_object_unlock(obj); |
608 | queue_remove(&vm_page_queue_active, m,{ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; } |
609 | vm_page_t, pageq){ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active) == next) (&vm_page_queue_active)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_active) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; |
610 | m->active = FALSE((boolean_t) 0); |
611 | vm_page_active_count--; |
612 | continue; |
613 | } |
614 | |
615 | /* |
616 | * Deactivate the page while holding the object |
617 | * locked, so we know the page is still not busy. |
618 | * This should prevent races between pmap_enter |
619 | * and pmap_clear_reference. The page might be |
620 | * absent or fictitious, but vm_page_deactivate |
621 | * can handle that. |
622 | */ |
623 | |
624 | vm_page_deactivate(m); |
625 | vm_object_unlock(obj); |
626 | } |
627 | |
628 | /* |
629 | * We are done if we have met our targets *and* |
630 | * nobody is still waiting for a page. |
631 | */ |
632 | |
633 | simple_lock(&vm_page_queue_free_lock); |
634 | free_count = vm_page_free_count; |
635 | if ((free_count >= vm_page_free_target) && |
636 | (vm_page_external_count <= vm_page_external_target) && |
637 | (vm_page_free_wanted == 0)) { |
638 | vm_page_unlock_queues(); |
639 | break; |
640 | } |
641 | want_pages = ((free_count < vm_page_free_target) || |
642 | vm_page_free_wanted); |
643 | simple_unlock(&vm_page_queue_free_lock); |
644 | |
645 | /* |
646 | * Sometimes we have to pause: |
647 | * 1) No inactive pages - nothing to do. |
648 | * 2) Flow control - wait for pagers to catch up. |
649 | * 3) Extremely low memory - sending out dirty pages |
650 | * consumes memory. We don't take the risk of doing |
651 | * this if the default pager already has work to do. |
652 | */ |
653 | pause: |
654 | if (queue_empty(&vm_page_queue_inactive)(((&vm_page_queue_inactive)) == (((&vm_page_queue_inactive )->next))) || |
655 | (burst_count >= vm_pageout_burst_max) || |
656 | (vm_page_laundry_count >= vm_pageout_burst_max) || |
657 | ((free_count < vm_pageout_reserved_really) && |
658 | (vm_page_laundry_count > 0))) { |
659 | unsigned int pages, msecs; |
660 | |
661 | /* |
662 | * vm_pageout_burst_wait is msecs/page. |
663 | * If there is nothing for us to do, we wait |
664 | * at least vm_pageout_empty_wait msecs. |
665 | */ |
666 | |
667 | if (vm_page_laundry_count > burst_count) |
668 | pages = vm_page_laundry_count; |
669 | else |
670 | pages = burst_count; |
671 | msecs = pages * vm_pageout_burst_wait; |
672 | |
673 | if (queue_empty(&vm_page_queue_inactive)(((&vm_page_queue_inactive)) == (((&vm_page_queue_inactive )->next))) && |
674 | (msecs < vm_pageout_empty_wait)) |
675 | msecs = vm_pageout_empty_wait; |
676 | vm_page_unlock_queues(); |
677 | |
678 | thread_will_wait_with_timeout(current_thread()(active_threads[(0)]), msecs); |
679 | counter(c_vm_pageout_scan_block++); |
680 | thread_block(vm_pageout_scan_continue); |
681 | call_continuation(vm_pageout_scan_continue); |
682 | /*NOTREACHED*/ |
683 | } |
684 | |
685 | vm_pageout_inactive++; |
686 | |
687 | /* Find a page we are interested in paging out. If we |
688 | need pages, then we'll page anything out; otherwise |
689 | we only page out external pages. */ |
690 | m = (vm_page_t) queue_first (&vm_page_queue_inactive)((&vm_page_queue_inactive)->next); |
691 | while (1) |
692 | { |
693 | assert (!m->active && m->inactive)({ if (!(!m->active && m->inactive)) Assert("!m->active && m->inactive" , "../vm/vm_pageout.c", 693); }); |
694 | if (want_pages || m->external) |
695 | break; |
696 | |
697 | m = (vm_page_t) queue_next (m)((m)->next); |
698 | if (!m) |
699 | goto pause; |
700 | } |
701 | |
702 | object = m->object; |
703 | |
704 | /* |
705 | * Try to lock object; since we've got the |
706 | * page queues lock, we can only try for this one. |
707 | */ |
708 | |
709 | if (!vm_object_lock_try(object)(((boolean_t) 1))) { |
710 | /* |
711 | * Move page to end and continue. |
712 | */ |
713 | |
714 | queue_remove(&vm_page_queue_inactive, m,{ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive) == next ) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; } |
715 | vm_page_t, pageq){ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive) == next ) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
716 | queue_enter(&vm_page_queue_inactive, m,{ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; } |
717 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; }; |
718 | vm_page_unlock_queues(); |
719 | vm_pageout_inactive_nolock++; |
720 | continue; |
721 | } |
722 | |
723 | /* |
724 | * Remove the page from the inactive list. |
725 | */ |
726 | |
727 | queue_remove(&vm_page_queue_inactive, m, vm_page_t, pageq){ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive) == next ) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
728 | vm_page_inactive_count--; |
729 | m->inactive = FALSE((boolean_t) 0); |
730 | |
731 | if (m->busy || !object->alive) { |
732 | /* |
733 | * Somebody is already playing with this page. |
734 | * Leave it off the pageout queues. |
735 | */ |
736 | |
737 | vm_page_unlock_queues(); |
738 | vm_object_unlock(object); |
739 | vm_pageout_inactive_busy++; |
740 | continue; |
741 | } |
742 | |
743 | /* |
744 | * If it's absent, we can reclaim the page. |
745 | */ |
746 | |
747 | if (want_pages && m->absent) { |
748 | vm_pageout_inactive_absent++; |
749 | reclaim_page: |
750 | vm_page_free(m); |
751 | vm_page_unlock_queues(); |
752 | vm_object_unlock(object); |
753 | continue; |
754 | } |
755 | |
756 | /* |
757 | * If it's being used, reactivate. |
758 | * (Fictitious pages are either busy or absent.) |
759 | */ |
760 | |
761 | assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/vm_pageout.c" , 761); }); |
762 | if (m->reference || pmap_is_referenced(m->phys_addr)) { |
763 | vm_object_unlock(object); |
764 | vm_page_activate(m); |
765 | vm_stat.reactivations++; |
766 | current_task()((active_threads[(0)])->task)->reactivations++; |
767 | vm_page_unlock_queues(); |
768 | vm_pageout_inactive_used++; |
769 | continue; |
770 | } |
771 | |
772 | /* |
773 | * Eliminate all mappings. |
774 | */ |
775 | |
776 | m->busy = TRUE((boolean_t) 1); |
777 | pmap_page_protect(m->phys_addr, VM_PROT_NONE((vm_prot_t) 0x00)); |
778 | if (!m->dirty) |
779 | m->dirty = pmap_is_modified(m->phys_addr); |
780 | |
781 | if (m->external) { |
782 | /* Figure out if we still care about this |
783 | page in the limit of externally managed pages. |
784 | Clean pages don't actually cause system hosage, |
785 | so it's ok to stop considering them as |
786 | "consumers" of memory. */ |
787 | if (m->dirty && !m->extcounted) { |
788 | m->extcounted = TRUE((boolean_t) 1); |
789 | vm_page_external_count++; |
790 | } else if (!m->dirty && m->extcounted) { |
791 | m->extcounted = FALSE((boolean_t) 0); |
792 | vm_page_external_count--; |
793 | } |
794 | } |
795 | |
796 | /* If we don't actually need more memory, and the page |
797 | is not dirty, put it on the tail of the inactive queue |
798 | and move on to the next page. */ |
799 | if (!want_pages && !m->dirty) { |
800 | queue_remove (&vm_page_queue_inactive, m,{ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive) == next ) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; } |
801 | vm_page_t, pageq){ queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_inactive) == next ) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; |
802 | queue_enter (&vm_page_queue_inactive, m,{ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; } |
803 | vm_page_t, pageq){ register queue_entry_t prev; prev = (&vm_page_queue_inactive )->prev; if ((&vm_page_queue_inactive) == prev) { (& vm_page_queue_inactive)->next = (queue_entry_t) (m); } else { ((vm_page_t)prev)->pageq.next = (queue_entry_t)(m); } ( m)->pageq.prev = prev; (m)->pageq.next = &vm_page_queue_inactive ; (&vm_page_queue_inactive)->prev = (queue_entry_t) m; }; |
804 | vm_page_unlock_queues(); |
805 | vm_pageout_inactive_cleaned_external++; |
806 | continue; |
807 | } |
808 | |
809 | /* |
810 | * If it's clean and not precious, we can free the page. |
811 | */ |
812 | |
813 | if (!m->dirty && !m->precious) { |
814 | vm_pageout_inactive_clean++; |
815 | goto reclaim_page; |
816 | } |
817 | |
818 | /* |
819 | * If we are very low on memory, then we can't |
820 | * rely on an external pager to clean a dirty page, |
821 | * because external pagers are not vm-privileged. |
822 | * |
823 | * The laundry bit tells vm_pageout_setup to |
824 | * put the page back at the front of the inactive |
825 | * queue instead of activating the page. Hence, |
826 | * we will pick the page up again immediately and |
827 | * resend it to the default pager. |
828 | */ |
829 | |
830 | assert(!m->laundry)({ if (!(!m->laundry)) Assert("!m->laundry", "../vm/vm_pageout.c" , 830); }); |
831 | if ((free_count < vm_pageout_reserved_internal) && |
832 | !object->internal) { |
833 | m->laundry = TRUE((boolean_t) 1); |
834 | vm_pageout_inactive_double++; |
835 | } |
836 | vm_page_unlock_queues(); |
837 | |
838 | /* |
839 | * If there is no memory object for the page, create |
840 | * one and hand it to the default pager. |
841 | * [First try to collapse, so we don't create |
842 | * one unnecessarily.] |
843 | */ |
844 | |
845 | if (!object->pager_initialized) |
846 | vm_object_collapse(object); |
847 | if (!object->pager_initialized) |
848 | vm_object_pager_create(object); |
849 | if (!object->pager_initialized) |
850 | panic("vm_pageout_scan"); |
851 | |
852 | vm_pageout_inactive_dirty++; |
853 | vm_pageout_page(m, FALSE((boolean_t) 0), TRUE((boolean_t) 1)); /* flush it */ |
854 | vm_object_unlock(object); |
855 | burst_count++; |
856 | } |
857 | } |
858 | |
859 | void vm_pageout_scan_continue() |
860 | { |
861 | /* |
862 | * We just paused to let the pagers catch up. |
863 | * If vm_page_laundry_count is still high, |
864 | * then we aren't waiting long enough. |
865 | * If we have paused some vm_pageout_pause_max times without |
866 | * adjusting vm_pageout_burst_wait, it might be too big, |
867 | * so we decrease it. |
868 | */ |
869 | |
870 | vm_page_lock_queues(); |
871 | if (vm_page_laundry_count > vm_pageout_burst_min) { |
872 | vm_pageout_burst_wait++; |
873 | vm_pageout_pause_count = 0; |
874 | } else if (++vm_pageout_pause_count > vm_pageout_pause_max) { |
875 | vm_pageout_burst_wait = (vm_pageout_burst_wait * 3) / 4; |
876 | if (vm_pageout_burst_wait < 1) |
877 | vm_pageout_burst_wait = 1; |
878 | vm_pageout_pause_count = 0; |
879 | } |
880 | vm_page_unlock_queues(); |
881 | |
882 | vm_pageout_continue(); |
883 | /*NOTREACHED*/ |
884 | } |
885 | |
886 | /* |
887 | * vm_pageout is the high level pageout daemon. |
888 | */ |
889 | |
890 | void vm_pageout_continue() |
891 | { |
892 | /* |
893 | * The pageout daemon is never done, so loop forever. |
894 | * We should call vm_pageout_scan at least once each |
895 | * time we are woken, even if vm_page_free_wanted is |
896 | * zero, to check vm_page_free_target and |
897 | * vm_page_inactive_target. |
898 | */ |
899 | |
900 | for (;;) { |
901 | vm_pageout_scan(); |
902 | /* we hold vm_page_queue_free_lock now */ |
903 | assert(vm_page_free_wanted == 0)({ if (!(vm_page_free_wanted == 0)) Assert("vm_page_free_wanted == 0" , "../vm/vm_pageout.c", 903); }); |
904 | |
905 | assert_wait(&vm_page_free_wanted, FALSE((boolean_t) 0)); |
906 | simple_unlock(&vm_page_queue_free_lock); |
907 | counter(c_vm_pageout_block++); |
908 | thread_block(vm_pageout_continue); |
909 | } |
910 | } |
911 | |
912 | void vm_pageout() |
913 | { |
914 | int free_after_reserve; |
915 | |
916 | current_thread()(active_threads[(0)])->vm_privilege = TRUE((boolean_t) 1); |
917 | stack_privilege(current_thread()(active_threads[(0)])); |
918 | |
919 | /* |
920 | * Initialize some paging parameters. |
921 | */ |
922 | |
923 | if (vm_pageout_burst_max == 0) |
924 | vm_pageout_burst_max = VM_PAGEOUT_BURST_MAX10; |
925 | |
926 | if (vm_pageout_burst_min == 0) |
927 | vm_pageout_burst_min = VM_PAGEOUT_BURST_MIN5; |
928 | |
929 | if (vm_pageout_burst_wait == 0) |
930 | vm_pageout_burst_wait = VM_PAGEOUT_BURST_WAIT10; |
931 | |
932 | if (vm_pageout_empty_wait == 0) |
933 | vm_pageout_empty_wait = VM_PAGEOUT_EMPTY_WAIT75; |
934 | |
935 | if (vm_page_free_reserved == 0) |
936 | vm_page_free_reserved = VM_PAGE_FREE_RESERVED50; |
937 | |
938 | if (vm_pageout_pause_max == 0) |
939 | vm_pageout_pause_max = VM_PAGEOUT_PAUSE_MAX10; |
940 | |
941 | if (vm_pageout_reserved_internal == 0) |
942 | vm_pageout_reserved_internal = |
943 | VM_PAGEOUT_RESERVED_INTERNAL(vm_page_free_reserved)((vm_page_free_reserved) - 25); |
944 | |
945 | if (vm_pageout_reserved_really == 0) |
946 | vm_pageout_reserved_really = |
947 | VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved)((vm_page_free_reserved) - 40); |
948 | |
949 | free_after_reserve = vm_page_free_count - vm_page_free_reserved; |
950 | |
951 | if (vm_page_external_limit == 0) |
952 | vm_page_external_limit = |
953 | VM_PAGE_EXTERNAL_LIMIT (free_after_reserve)((free_after_reserve) / 2); |
954 | |
955 | if (vm_page_external_target == 0) |
956 | vm_page_external_target = |
957 | VM_PAGE_EXTERNAL_TARGET (free_after_reserve)((free_after_reserve) / 4); |
958 | |
959 | if (vm_page_free_min == 0) |
960 | vm_page_free_min = vm_page_free_reserved + |
961 | VM_PAGE_FREE_MIN(free_after_reserve)(10 + (free_after_reserve) / 100); |
962 | |
963 | if (vm_page_free_target == 0) |
964 | vm_page_free_target = vm_page_free_reserved + |
965 | VM_PAGE_FREE_TARGET(free_after_reserve)(15 + (free_after_reserve) / 80); |
966 | |
967 | if (vm_page_free_target < vm_page_free_min + 5) |
968 | vm_page_free_target = vm_page_free_min + 5; |
969 | |
970 | /* |
971 | * vm_pageout_scan will set vm_page_inactive_target. |
972 | */ |
973 | |
974 | vm_pageout_continue(); |
975 | /*NOTREACHED*/ |
976 | } |