Bug Summary

File:obj-scan-build/../vm/memory_object.c
Location:line 145, column 3
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm/memory_object.c
31 * Author: Michael Wayne Young
32 *
33 * External memory management interface control functions.
34 */
35
36/*
37 * Interface dependencies:
38 */
39
40#include <mach/std_types.h> /* For pointer_t */
41#include <mach/mach_types.h>
42
43#include <mach/kern_return.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <mach/memory_object.h>
47#include <mach/boolean.h>
48#include <mach/vm_prot.h>
49#include <mach/message.h>
50
51#include <vm/memory_object_user.user.h>
52#include <vm/memory_object_default.user.h>
53
54/*
55 * Implementation dependencies:
56 */
57#include <vm/memory_object.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pageout.h>
60#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
61#include <kern/debug.h> /* For panic() */
62#include <kern/thread.h> /* For current_thread() */
63#include <kern/host.h>
64#include <vm/vm_kern.h> /* For kernel_map, vm_move */
65#include <vm/vm_map.h> /* For vm_map_pageable */
66#include <ipc/ipc_port.h>
67
68#if MACH_PAGEMAP1
69#include <vm/vm_external.h>
70#endif /* MACH_PAGEMAP */
71
72typedef int memory_object_lock_result_t; /* moved from below */
73
74
75ipc_port_t memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
76decl_simple_lock_data(,memory_manager_default_lock)struct simple_lock_data_empty memory_manager_default_lock;
77
78/*
79 * Important note:
80 * All of these routines gain a reference to the
81 * object (first argument) as part of the automatic
82 * argument conversion. Explicit deallocation is necessary.
83 */
84
85kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
86 lock_value, precious, reply_to, reply_to_type)
87 vm_object_t object;
88 vm_offset_t offset;
89 vm_map_copy_t data_copy;
90 unsigned int data_cnt;
91 vm_prot_t lock_value;
92 boolean_t precious;
93 ipc_port_t reply_to;
94 mach_msg_type_name_t reply_to_type;
95{
96 kern_return_t result = KERN_SUCCESS0;
97 vm_offset_t error_offset = 0;
98 vm_page_t m;
99 vm_page_t data_m;
100 vm_size_t original_length;
101 vm_offset_t original_offset;
102 vm_page_t *page_list;
103 boolean_t was_absent;
104 vm_map_copy_t orig_copy = data_copy;
105
106 /*
107 * Look for bogus arguments
108 */
109
110 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
2
Assuming 'object' is not equal to null
3
Taking false branch
111 return(KERN_INVALID_ARGUMENT4);
112 }
113
114 if (lock_value & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) {
4
Taking false branch
115 vm_object_deallocate(object);
116 return(KERN_INVALID_ARGUMENT4);
117 }
118
119 if ((data_cnt % PAGE_SIZE(1 << 12)) != 0) {
5
Taking false branch
120 vm_object_deallocate(object);
121 return(KERN_INVALID_ARGUMENT4);
122 }
123
124 /*
125 * Adjust the offset from the memory object to the offset
126 * within the vm_object.
127 */
128
129 original_length = data_cnt;
130 original_offset = offset;
131
132 assert(data_copy->type == VM_MAP_COPY_PAGE_LIST)({ if (!(data_copy->type == 3)) Assert("data_copy->type == VM_MAP_COPY_PAGE_LIST"
, "../vm/memory_object.c", 132); })
;
133 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
134
135 vm_object_lock(object);
136 vm_object_paging_begin(object)((object)->paging_in_progress++);
137 offset -= object->paging_offset;
138
139 /*
140 * Loop over copy stealing pages for pagein.
141 */
142
143 for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12)) {
6
Assuming 'data_cnt' is > 0
7
Loop condition is true. Entering loop body
22
Assuming 'data_cnt' is > 0
23
Loop condition is true. Entering loop body
144
145 assert(data_copy->cpy_npages > 0)({ if (!(data_copy->c_u.c_p.npages > 0)) Assert("data_copy->cpy_npages > 0"
, "../vm/memory_object.c", 145); })
;
24
Within the expansion of the macro 'assert':
a
Dereference of null pointer
146 data_m = *page_list;
147
148 if (data_m == VM_PAGE_NULL((vm_page_t) 0) || data_m->tabled ||
8
Assuming 'data_m' is not equal to null
9
Taking false branch
149 data_m->error || data_m->absent || data_m->fictitious) {
150
151 panic("Data_supply: bad page");
152 }
153
154 /*
155 * Look up target page and check its state.
156 */
157
158retry_lookup:
159 m = vm_page_lookup(object,offset);
160 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
10
Assuming 'm' is not equal to null
11
Taking false branch
14
Assuming 'm' is equal to null
15
Taking true branch
161 was_absent = FALSE((boolean_t) 0);
162 }
163 else {
164 if (m->absent && m->busy) {
165
166 /*
167 * Page was requested. Free the busy
168 * page waiting for it. Insertion
169 * of new page happens below.
170 */
171
172 VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); });
173 was_absent = TRUE((boolean_t) 1);
174 }
175 else {
176
177 /*
178 * Have to wait for page that is busy and
179 * not absent. This is probably going to
180 * be an error, but go back and check.
181 */
182 if (m->busy) {
12
Taking true branch
183 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
184 vm_object_unlock(object)((void)(&(object)->Lock));
185 thread_block((void (*)()) 0);
186 vm_object_lock(object);
187 goto retry_lookup;
13
Control jumps to line 159
188 }
189
190 /*
191 * Page already present; error.
192 * This is an error if data is precious.
193 */
194 result = KERN_MEMORY_PRESENT23;
195 error_offset = offset + object->paging_offset;
196
197 break;
198 }
199 }
200
201 /*
202 * Ok to pagein page. Target object now has no page
203 * at offset. Set the page parameters, then drop
204 * in new page and set up pageout state. Object is
205 * still locked here.
206 *
207 * Must clear busy bit in page before inserting it.
208 * Ok to skip wakeup logic because nobody else
209 * can possibly know about this page.
210 */
211
212 data_m->busy = FALSE((boolean_t) 0);
213 data_m->dirty = FALSE((boolean_t) 0);
214 pmap_clear_modify(data_m->phys_addr);
215
216 data_m->page_lock = lock_value;
217 data_m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
218 data_m->precious = precious;
219
220 vm_page_lock_queues();
221 vm_page_insert(data_m, object, offset);
222
223 if (was_absent)
16
Taking false branch
224 vm_page_activate(data_m);
225 else
226 vm_page_deactivate(data_m);
227
228 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
229
230 /*
231 * Null out this page list entry, and advance to next
232 * page.
233 */
234
235 *page_list++ = VM_PAGE_NULL((vm_page_t) 0);
236
237 if (--(data_copy->cpy_npagesc_u.c_p.npages) == 0 &&
17
Taking true branch
238 vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
239 vm_map_copy_t new_copy;
240
241 vm_object_unlock(object)((void)(&(object)->Lock));
242
243 vm_map_copy_invoke_cont(data_copy, &new_copy, &result)({ vm_map_copy_page_discard(data_copy); *&result = (*((data_copy
)->c_u.c_p.cont))((data_copy)->c_u.c_p.cont_args, &
new_copy); (data_copy)->c_u.c_p.cont = (kern_return_t (*)(
)) 0; })
;
244
245 if (result == KERN_SUCCESS0) {
18
Assuming 'result' is equal to 0
19
Taking true branch
246
247 /*
248 * Consume on success requires that
249 * we keep the original vm_map_copy
250 * around in case something fails.
251 * Free the old copy if it's not the original
252 */
253 if (data_copy != orig_copy) {
20
Taking false branch
254 vm_map_copy_discard(data_copy);
255 }
256
257 if ((data_copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0))
21
Taking false branch
258 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
259
260 vm_object_lock(object);
261 }
262 else {
263 vm_object_lock(object);
264 error_offset = offset + object->paging_offset +
265 PAGE_SIZE(1 << 12);
266 break;
267 }
268 }
269 }
270
271 /*
272 * Send reply if one was requested.
273 */
274 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 274); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
275 vm_object_unlock(object)((void)(&(object)->Lock));
276
277 if (vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0))
278 vm_map_copy_abort_cont(data_copy)({ vm_map_copy_page_discard(data_copy); (*((data_copy)->c_u
.c_p.cont))((data_copy)->c_u.c_p.cont_args, (vm_map_copy_t
*) 0); (data_copy)->c_u.c_p.cont = (kern_return_t (*)()) 0
; (data_copy)->c_u.c_p.cont_args = (char *) 0; })
;
279
280 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
281 memory_object_supply_completed(
282 reply_to, reply_to_type,
283 object->pager_request,
284 original_offset,
285 original_length,
286 result,
287 error_offset);
288 }
289
290 vm_object_deallocate(object);
291
292 /*
293 * Consume on success: The final data copy must be
294 * be discarded if it is not the original. The original
295 * gets discarded only if this routine succeeds.
296 */
297 if (data_copy != orig_copy)
298 vm_map_copy_discard(data_copy);
299 if (result == KERN_SUCCESS0)
300 vm_map_copy_discard(orig_copy);
301
302
303 return(result);
304}
305
306
307/*
308 * If successful, destroys the map copy object.
309 */
310kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
311 lock_value)
312 vm_object_t object;
313 vm_offset_t offset;
314 pointer_t data;
315 unsigned int data_cnt;
316 vm_prot_t lock_value;
317{
318 return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
1
Calling 'memory_object_data_supply'
319 data_cnt, lock_value, FALSE((boolean_t) 0), IP_NULL((ipc_port_t) ((ipc_object_t) 0)),
320 0);
321}
322
323
324kern_return_t memory_object_data_error(object, offset, size, error_value)
325 vm_object_t object;
326 vm_offset_t offset;
327 vm_size_t size;
328 kern_return_t error_value;
329{
330 if (object == VM_OBJECT_NULL((vm_object_t) 0))
331 return(KERN_INVALID_ARGUMENT4);
332
333 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
334 return(KERN_INVALID_ARGUMENT4);
335
336 vm_object_lock(object);
337 offset -= object->paging_offset;
338
339 while (size != 0) {
340 vm_page_t m;
341
342 m = vm_page_lookup(object, offset);
343 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
344 m->error = TRUE((boolean_t) 1);
345 m->absent = FALSE((boolean_t) 0);
346 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
347
348 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
349
350 vm_page_lock_queues();
351 vm_page_activate(m);
352 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
353 }
354
355 size -= PAGE_SIZE(1 << 12);
356 offset += PAGE_SIZE(1 << 12);
357 }
358 vm_object_unlock(object)((void)(&(object)->Lock));
359
360 vm_object_deallocate(object);
361 return(KERN_SUCCESS0);
362}
363
364kern_return_t memory_object_data_unavailable(object, offset, size)
365 vm_object_t object;
366 vm_offset_t offset;
367 vm_size_t size;
368{
369#if MACH_PAGEMAP1
370 vm_external_t existence_info = VM_EXTERNAL_NULL((vm_external_t) 0);
371#endif /* MACH_PAGEMAP */
372
373 if (object == VM_OBJECT_NULL((vm_object_t) 0))
374 return(KERN_INVALID_ARGUMENT4);
375
376 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
377 return(KERN_INVALID_ARGUMENT4);
378
379#if MACH_PAGEMAP1
380 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192) &&
381 (object->existence_info == VM_EXTERNAL_NULL((vm_external_t) 0))) {
382 existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE128);
383 }
384#endif /* MACH_PAGEMAP */
385
386 vm_object_lock(object);
387#if MACH_PAGEMAP1
388 if (existence_info != VM_EXTERNAL_NULL((vm_external_t) 0)) {
389 object->existence_info = existence_info;
390 }
391 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192)) {
392 vm_object_unlock(object)((void)(&(object)->Lock));
393 vm_object_deallocate(object);
394 return(KERN_SUCCESS0);
395 }
396#endif /* MACH_PAGEMAP */
397 offset -= object->paging_offset;
398
399 while (size != 0) {
400 vm_page_t m;
401
402 /*
403 * We're looking for pages that are both busy and
404 * absent (waiting to be filled), converting them
405 * to just absent.
406 *
407 * Pages that are just busy can be ignored entirely.
408 */
409
410 m = vm_page_lookup(object, offset);
411 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
412 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
413
414 vm_page_lock_queues();
415 vm_page_activate(m);
416 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
417 }
418 size -= PAGE_SIZE(1 << 12);
419 offset += PAGE_SIZE(1 << 12);
420 }
421
422 vm_object_unlock(object)((void)(&(object)->Lock));
423
424 vm_object_deallocate(object);
425 return(KERN_SUCCESS0);
426}
427
428/*
429 * Routine: memory_object_lock_page
430 *
431 * Description:
432 * Perform the appropriate lock operations on the
433 * given page. See the description of
434 * "memory_object_lock_request" for the meanings
435 * of the arguments.
436 *
437 * Returns an indication that the operation
438 * completed, blocked, or that the page must
439 * be cleaned.
440 */
441
442#define MEMORY_OBJECT_LOCK_RESULT_DONE0 0
443#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1 1
444#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2 2
445#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3 3
446
447memory_object_lock_result_t memory_object_lock_page(m, should_return,
448 should_flush, prot)
449 vm_page_t m;
450 memory_object_return_t should_return;
451 boolean_t should_flush;
452 vm_prot_t prot;
453{
454 /*
455 * Don't worry about pages for which the kernel
456 * does not have any data.
457 */
458
459 if (m->absent)
460 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
461
462 /*
463 * If we cannot change access to the page,
464 * either because a mapping is in progress
465 * (busy page) or because a mapping has been
466 * wired, then give up.
467 */
468
469 if (m->busy)
470 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
471
472 assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/memory_object.c"
, 472); })
;
473
474 if (m->wire_count != 0) {
475 /*
476 * If no change would take place
477 * anyway, return successfully.
478 *
479 * No change means:
480 * Not flushing AND
481 * No change to page lock [2 checks] AND
482 * Don't need to send page to manager
483 *
484 * Don't need to send page to manager means:
485 * No clean or return request OR (
486 * Page is not dirty [2 checks] AND (
487 * Page is not precious OR
488 * No request to return precious pages ))
489 *
490 * Now isn't that straightforward and obvious ?? ;-)
491 *
492 * XXX This doesn't handle sending a copy of a wired
493 * XXX page to the pager, but that will require some
494 * XXX significant surgery.
495 */
496
497 if (!should_flush &&
498 ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE((vm_prot_t) 0x08))) &&
499 ((should_return == MEMORY_OBJECT_RETURN_NONE0) ||
500 (!m->dirty && !pmap_is_modified(m->phys_addr) &&
501 (!m->precious ||
502 should_return != MEMORY_OBJECT_RETURN_ALL2)))) {
503 /*
504 * Restart page unlock requests,
505 * even though no change took place.
506 * [Memory managers may be expecting
507 * to see new requests.]
508 */
509 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
510 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
511
512 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
513 }
514
515 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
516 }
517
518 /*
519 * If the page is to be flushed, allow
520 * that to be done as part of the protection.
521 */
522
523 if (should_flush)
524 prot = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
525
526 /*
527 * Set the page lock.
528 *
529 * If we are decreasing permission, do it now;
530 * let the fault handler take care of increases
531 * (pmap_page_protect may not increase protection).
532 */
533
534 if (prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)) {
535 if ((m->page_lock ^ prot) & prot) {
536 pmap_page_protect(m->phys_addr, VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) & ~prot);
537 }
538 m->page_lock = prot;
539
540 /*
541 * Restart any past unlock requests, even if no
542 * change resulted. If the manager explicitly
543 * requested no protection change, then it is assumed
544 * to be remembering past requests.
545 */
546
547 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
548 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
549 }
550
551 /*
552 * Handle cleaning.
553 */
554
555 if (should_return != MEMORY_OBJECT_RETURN_NONE0) {
556 /*
557 * Check whether the page is dirty. If
558 * write permission has not been removed,
559 * this may have unpredictable results.
560 */
561
562 if (!m->dirty)
563 m->dirty = pmap_is_modified(m->phys_addr);
564
565 if (m->dirty || (m->precious &&
566 should_return == MEMORY_OBJECT_RETURN_ALL2)) {
567 /*
568 * If we weren't planning
569 * to flush the page anyway,
570 * we may need to remove the
571 * page from the pageout
572 * system and from physical
573 * maps now.
574 */
575
576 vm_page_lock_queues();
577 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m)
->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t
next, prev; next = (m)->pageq.next; prev = (m)->pageq.
prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive
)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev
; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive
)->next = next; else ((vm_page_t)prev)->pageq.next = next
; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count
--; } })
;
578 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
579
580 if (!should_flush)
581 pmap_page_protect(m->phys_addr,
582 VM_PROT_NONE((vm_prot_t) 0x00));
583
584 /*
585 * Cleaning a page will cause
586 * it to be flushed.
587 */
588
589 if (m->dirty)
590 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2);
591 else
592 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3);
593 }
594 }
595
596 /*
597 * Handle flushing
598 */
599
600 if (should_flush) {
601 VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); });
602 } else {
603 extern boolean_t vm_page_deactivate_hint;
604
605 /*
606 * XXX Make clean but not flush a paging hint,
607 * and deactivate the pages. This is a hack
608 * because it overloads flush/clean with
609 * implementation-dependent meaning. This only
610 * happens to pages that are already clean.
611 */
612
613 if (vm_page_deactivate_hint &&
614 (should_return != MEMORY_OBJECT_RETURN_NONE0)) {
615 vm_page_lock_queues();
616 vm_page_deactivate(m);
617 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
618 }
619 }
620
621 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
622}
623
624/*
625 * Routine: memory_object_lock_request [user interface]
626 *
627 * Description:
628 * Control use of the data associated with the given
629 * memory object. For each page in the given range,
630 * perform the following operations, in order:
631 * 1) restrict access to the page (disallow
632 * forms specified by "prot");
633 * 2) return data to the manager (if "should_return"
634 * is RETURN_DIRTY and the page is dirty, or
635 * "should_return" is RETURN_ALL and the page
636 * is either dirty or precious); and,
637 * 3) flush the cached copy (if "should_flush"
638 * is asserted).
639 * The set of pages is defined by a starting offset
640 * ("offset") and size ("size"). Only pages with the
641 * same page alignment as the starting offset are
642 * considered.
643 *
644 * A single acknowledgement is sent (to the "reply_to"
645 * port) when these actions are complete. If successful,
646 * the naked send right for reply_to is consumed.
647 */
648
649kern_return_t
650memory_object_lock_request(object, offset, size,
651 should_return, should_flush, prot,
652 reply_to, reply_to_type)
653 vm_object_t object;
654 vm_offset_t offset;
655 vm_size_t size;
656 memory_object_return_t should_return;
657 boolean_t should_flush;
658 vm_prot_t prot;
659 ipc_port_t reply_to;
660 mach_msg_type_name_t reply_to_type;
661{
662 vm_page_t m;
663 vm_offset_t original_offset = offset;
664 vm_size_t original_size = size;
665 vm_offset_t paging_offset = 0;
666 vm_object_t new_object = VM_OBJECT_NULL((vm_object_t) 0);
667 vm_offset_t new_offset = 0;
668 vm_offset_t last_offset = offset;
669 int page_lock_result;
670 int pageout_action = 0; /* '=0' to quiet lint */
671
672#define DATA_WRITE_MAX32 32
673 vm_page_t holding_pages[DATA_WRITE_MAX32];
674
675 /*
676 * Check for bogus arguments.
677 */
678 if (object == VM_OBJECT_NULL((vm_object_t) 0) ||
679 ((prot & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) != 0 && prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)))
680 return (KERN_INVALID_ARGUMENT4);
681
682 size = round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
683
684 /*
685 * Lock the object, and acquire a paging reference to
686 * prevent the memory_object and control ports from
687 * being destroyed.
688 */
689
690 vm_object_lock(object);
691 vm_object_paging_begin(object)((object)->paging_in_progress++);
692 offset -= object->paging_offset;
693
694 /*
695 * To avoid blocking while scanning for pages, save
696 * dirty pages to be cleaned all at once.
697 *
698 * XXXO A similar strategy could be used to limit the
699 * number of times that a scan must be restarted for
700 * other reasons. Those pages that would require blocking
701 * could be temporarily collected in another list, or
702 * their offsets could be recorded in a small array.
703 */
704
705 /*
706 * XXX NOTE: May want to consider converting this to a page list
707 * XXX vm_map_copy interface. Need to understand object
708 * XXX coalescing implications before doing so.
709 */
710
711#define PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 711); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
\({
712MACRO_BEGIN({ \
713 vm_map_copy_t copy; \
714 int i; \
715 vm_page_t hp; \
716 \
717 vm_object_unlock(object)((void)(&(object)->Lock)); \
718 \
719 (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
720 \
721 if (object->use_old_pageout) { \
722 assert(pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN)({ if (!(pageout_action == 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 722); })
; \
723 (void) memory_object_data_write( \
724 object->pager, \
725 object->pager_request, \
726 paging_offset, \
727 (pointer_t) copy, \
728 new_offset); \
729 } \
730 else { \
731 (void) memory_object_data_return( \
732 object->pager, \
733 object->pager_request, \
734 paging_offset, \
735 (pointer_t) copy, \
736 new_offset, \
737 (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2), \
738 !should_flush); \
739 } \
740 \
741 vm_object_lock(object); \
742 \
743 for (i = 0; i < atop(new_offset)(((vm_size_t)(new_offset)) >> 12); i++) { \
744 hp = holding_pages[i]; \
745 if (hp != VM_PAGE_NULL((vm_page_t) 0)) \
746 VM_PAGE_FREE(hp)({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }); \
747 } \
748 \
749 new_object = VM_OBJECT_NULL((vm_object_t) 0); \})
750MACRO_END})
751
752 for (;
753 size != 0;
754 size -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12))
755 {
756 /*
757 * Limit the number of pages to be cleaned at once.
758 */
759 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
760 new_offset >= PAGE_SIZE(1 << 12) * DATA_WRITE_MAX32)
761 {
762 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 762); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
763 }
764
765 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL((vm_page_t) 0)) {
766 switch ((page_lock_result = memory_object_lock_page(m,
767 should_return,
768 should_flush,
769 prot)))
770 {
771 case MEMORY_OBJECT_LOCK_RESULT_DONE0:
772 /*
773 * End of a cluster of dirty pages.
774 */
775 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
776 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 776); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
777 continue;
778 }
779 break;
780
781 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1:
782 /*
783 * Since it is necessary to block,
784 * clean any dirty pages now.
785 */
786 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
787 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 787); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
788 continue;
789 }
790
791 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
792 vm_object_unlock(object)((void)(&(object)->Lock));
793 thread_block((void (*)()) 0);
794 vm_object_lock(object);
795 continue;
796
797 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2:
798 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3:
799 /*
800 * The clean and return cases are similar.
801 *
802 * Mark the page busy since we unlock the
803 * object below.
804 */
805 m->busy = TRUE((boolean_t) 1);
806
807 /*
808 * if this would form a discontiguous block,
809 * clean the old pages and start anew.
810 *
811 * NOTE: The first time through here, new_object
812 * is null, hiding the fact that pageout_action
813 * is not initialized.
814 */
815 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
816 (last_offset != offset ||
817 pageout_action != page_lock_result)) {
818 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 818); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
819 }
820
821 vm_object_unlock(object)((void)(&(object)->Lock));
822
823 /*
824 * If we have not already allocated an object
825 * for a range of pages to be written, do so
826 * now.
827 */
828 if (new_object == VM_OBJECT_NULL((vm_object_t) 0)) {
829 new_object = vm_object_allocate(original_size);
830 new_offset = 0;
831 paging_offset = m->offset +
832 object->paging_offset;
833 pageout_action = page_lock_result;
834 }
835
836 /*
837 * Move or copy the dirty page into the
838 * new object.
839 */
840 m = vm_pageout_setup(m,
841 m->offset + object->paging_offset,
842 new_object,
843 new_offset,
844 should_flush);
845
846 /*
847 * Save the holding page if there is one.
848 */
849 holding_pages[atop(new_offset)(((vm_size_t)(new_offset)) >> 12)] = m;
850 new_offset += PAGE_SIZE(1 << 12);
851 last_offset = offset + PAGE_SIZE(1 << 12);
852
853 vm_object_lock(object);
854 break;
855 }
856 break;
857 }
858 }
859
860 /*
861 * We have completed the scan for applicable pages.
862 * Clean any pages that have been saved.
863 */
864 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
865 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 865); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
866 }
867
868 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
869 vm_object_unlock(object)((void)(&(object)->Lock));
870
871 /* consumes our naked send-once/send right for reply_to */
872 (void) memory_object_lock_completed(reply_to, reply_to_type,
873 object->pager_request, original_offset, original_size);
874
875 vm_object_lock(object);
876 }
877
878 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 878); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
879 vm_object_unlock(object)((void)(&(object)->Lock));
880 vm_object_deallocate(object);
881
882 return (KERN_SUCCESS0);
883}
884
885kern_return_t
886memory_object_set_attributes_common(object, object_ready, may_cache,
887 copy_strategy, use_old_pageout)
888 vm_object_t object;
889 boolean_t object_ready;
890 boolean_t may_cache;
891 memory_object_copy_strategy_t copy_strategy;
892 boolean_t use_old_pageout;
893{
894 if (object == VM_OBJECT_NULL((vm_object_t) 0))
895 return(KERN_INVALID_ARGUMENT4);
896
897 /*
898 * Verify the attributes of importance
899 */
900
901 switch(copy_strategy) {
902 case MEMORY_OBJECT_COPY_NONE0:
903 case MEMORY_OBJECT_COPY_CALL1:
904 case MEMORY_OBJECT_COPY_DELAY2:
905 case MEMORY_OBJECT_COPY_TEMPORARY3:
906 break;
907 default:
908 vm_object_deallocate(object);
909 return(KERN_INVALID_ARGUMENT4);
910 }
911
912 if (object_ready)
913 object_ready = TRUE((boolean_t) 1);
914 if (may_cache)
915 may_cache = TRUE((boolean_t) 1);
916
917 vm_object_lock(object);
918
919 /*
920 * Wake up anyone waiting for the ready attribute
921 * to become asserted.
922 */
923
924 if (object_ready && !object->pager_ready) {
925 object->use_old_pageout = use_old_pageout;
926 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY)({ if ((object)->all_wanted & (1 << (1))) thread_wakeup_prim
(((event_t)(((vm_offset_t) object) + (1))), ((boolean_t) 0), 0
); (object)->all_wanted &= ~(1 << (1)); })
;
927 }
928
929 /*
930 * Copy the attributes
931 */
932
933 object->can_persist = may_cache;
934 object->pager_ready = object_ready;
935 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY3) {
936 object->temporary = TRUE((boolean_t) 1);
937 } else {
938 object->copy_strategy = copy_strategy;
939 }
940
941 vm_object_unlock(object)((void)(&(object)->Lock));
942
943 vm_object_deallocate(object);
944
945 return(KERN_SUCCESS0);
946}
947
948/*
949 * XXX rpd claims that reply_to could be obviated in favor of a client
950 * XXX stub that made change_attributes an RPC. Need investigation.
951 */
952
953kern_return_t memory_object_change_attributes(object, may_cache,
954 copy_strategy, reply_to, reply_to_type)
955 vm_object_t object;
956 boolean_t may_cache;
957 memory_object_copy_strategy_t copy_strategy;
958 ipc_port_t reply_to;
959 mach_msg_type_name_t reply_to_type;
960{
961 kern_return_t result;
962
963 /*
964 * Do the work and throw away our object reference. It
965 * is important that the object reference be deallocated
966 * BEFORE sending the reply. The whole point of the reply
967 * is that it shows up after the terminate message that
968 * may be generated by setting the object uncacheable.
969 *
970 * XXX may_cache may become a tri-valued variable to handle
971 * XXX uncache if not in use.
972 */
973 result = memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
974 may_cache, copy_strategy,
975 FALSE((boolean_t) 0));
976
977 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
978
979 /* consumes our naked send-once/send right for reply_to */
980 (void) memory_object_change_completed(reply_to, reply_to_type,
981 may_cache, copy_strategy);
982
983 }
984
985 return(result);
986}
987
988kern_return_t
989memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
990 vm_object_t object;
991 boolean_t object_ready;
992 boolean_t may_cache;
993 memory_object_copy_strategy_t copy_strategy;
994{
995 return memory_object_set_attributes_common(object, object_ready,
996 may_cache, copy_strategy,
997 TRUE((boolean_t) 1));
998}
999
1000kern_return_t memory_object_ready(object, may_cache, copy_strategy)
1001 vm_object_t object;
1002 boolean_t may_cache;
1003 memory_object_copy_strategy_t copy_strategy;
1004{
1005 return memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
1006 may_cache, copy_strategy,
1007 FALSE((boolean_t) 0));
1008}
1009
1010kern_return_t memory_object_get_attributes(object, object_ready,
1011 may_cache, copy_strategy)
1012 vm_object_t object;
1013 boolean_t *object_ready;
1014 boolean_t *may_cache;
1015 memory_object_copy_strategy_t *copy_strategy;
1016{
1017 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1018 return(KERN_INVALID_ARGUMENT4);
1019
1020 vm_object_lock(object);
1021 *may_cache = object->can_persist;
1022 *object_ready = object->pager_ready;
1023 *copy_strategy = object->copy_strategy;
1024 vm_object_unlock(object)((void)(&(object)->Lock));
1025
1026 vm_object_deallocate(object);
1027
1028 return(KERN_SUCCESS0);
1029}
1030
1031/*
1032 * If successful, consumes the supplied naked send right.
1033 */
1034kern_return_t vm_set_default_memory_manager(host, default_manager)
1035 const host_t host;
1036 ipc_port_t *default_manager;
1037{
1038 ipc_port_t current_manager;
1039 ipc_port_t new_manager;
1040 ipc_port_t returned_manager;
1041
1042 if (host == HOST_NULL((host_t)0))
1043 return(KERN_INVALID_HOST22);
1044
1045 new_manager = *default_manager;
1046 simple_lock(&memory_manager_default_lock);
1047 current_manager = memory_manager_default;
1048
1049 if (new_manager == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) {
1050 /*
1051 * Retrieve the current value.
1052 */
1053
1054 returned_manager = ipc_port_copy_send(current_manager);
1055 } else {
1056 /*
1057 * Retrieve the current value,
1058 * and replace it with the supplied value.
1059 * We consume the supplied naked send right.
1060 */
1061
1062 returned_manager = current_manager;
1063 memory_manager_default = new_manager;
1064
1065 /*
1066 * In case anyone's been waiting for a memory
1067 * manager to be established, wake them up.
1068 */
1069
1070 thread_wakeup((event_t) &memory_manager_default)thread_wakeup_prim(((event_t) &memory_manager_default), (
(boolean_t) 0), 0)
;
1071 }
1072
1073 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1074
1075 *default_manager = returned_manager;
1076 return(KERN_SUCCESS0);
1077}
1078
1079/*
1080 * Routine: memory_manager_default_reference
1081 * Purpose:
1082 * Returns a naked send right for the default
1083 * memory manager. The returned right is always
1084 * valid (not IP_NULL or IP_DEAD).
1085 */
1086
1087ipc_port_t memory_manager_default_reference(void)
1088{
1089 ipc_port_t current_manager;
1090
1091 simple_lock(&memory_manager_default_lock);
1092
1093 while (current_manager = ipc_port_copy_send(memory_manager_default),
1094 !IP_VALID(current_manager)(((&(current_manager)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current_manager)->ip_target.ipt_object
) != ((ipc_object_t) -1)))
) {
1095 thread_sleep((event_t) &memory_manager_default,
1096 simple_lock_addr(memory_manager_default_lock)((simple_lock_t)0),
1097 FALSE((boolean_t) 0));
1098 simple_lock(&memory_manager_default_lock);
1099 }
1100
1101 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1102
1103 return current_manager;
1104}
1105
1106/*
1107 * Routine: memory_manager_default_port
1108 * Purpose:
1109 * Returns true if the receiver for the port
1110 * is the default memory manager.
1111 *
1112 * This is a hack to let ds_read_done
1113 * know when it should keep memory wired.
1114 */
1115
1116boolean_t memory_manager_default_port(port)
1117 const ipc_port_t port;
1118{
1119 ipc_port_t current;
1120 boolean_t result;
1121
1122 simple_lock(&memory_manager_default_lock);
1123 current = memory_manager_default;
1124 if (IP_VALID(current)(((&(current)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
1125 /*
1126 * There is no point in bothering to lock
1127 * both ports, which would be painful to do.
1128 * If the receive rights are moving around,
1129 * we might be inaccurate.
1130 */
1131
1132 result = port->ip_receiverdata.receiver == current->ip_receiverdata.receiver;
1133 } else
1134 result = FALSE((boolean_t) 0);
1135 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1136
1137 return result;
1138}
1139
1140void memory_manager_default_init(void)
1141{
1142 memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
1143 simple_lock_init(&memory_manager_default_lock);
1144}