Bug Summary

File:obj-scan-build/../vm/memory_object.c
Location:line 276, column 6
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm/memory_object.c
31 * Author: Michael Wayne Young
32 *
33 * External memory management interface control functions.
34 */
35
36/*
37 * Interface dependencies:
38 */
39
40#include <mach/std_types.h> /* For pointer_t */
41#include <mach/mach_types.h>
42
43#include <mach/kern_return.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <mach/memory_object.h>
47#include <mach/boolean.h>
48#include <mach/vm_prot.h>
49#include <mach/message.h>
50
51#include <vm/memory_object_user.user.h>
52#include <vm/memory_object_default.user.h>
53
54/*
55 * Implementation dependencies:
56 */
57#include <vm/memory_object.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pageout.h>
60#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
61#include <kern/debug.h> /* For panic() */
62#include <kern/thread.h> /* For current_thread() */
63#include <kern/host.h>
64#include <vm/vm_kern.h> /* For kernel_map, vm_move */
65#include <vm/vm_map.h> /* For vm_map_pageable */
66#include <ipc/ipc_port.h>
67
68#if MACH_PAGEMAP1
69#include <vm/vm_external.h>
70#endif /* MACH_PAGEMAP */
71
72typedef int memory_object_lock_result_t; /* moved from below */
73
74
75ipc_port_t memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
76decl_simple_lock_data(,memory_manager_default_lock)struct simple_lock_data_empty memory_manager_default_lock;
77
78/*
79 * Important note:
80 * All of these routines gain a reference to the
81 * object (first argument) as part of the automatic
82 * argument conversion. Explicit deallocation is necessary.
83 */
84
85kern_return_t memory_object_data_supply(
86 vm_object_t object,
87 vm_offset_t offset,
88 vm_map_copy_t data_copy,
89 unsigned int data_cnt,
90 vm_prot_t lock_value,
91 boolean_t precious,
92 ipc_port_t reply_to,
93 mach_msg_type_name_t reply_to_type)
94{
95 kern_return_t result = KERN_SUCCESS0;
96 vm_offset_t error_offset = 0;
97 vm_page_t m;
98 vm_page_t data_m;
99 vm_size_t original_length;
100 vm_offset_t original_offset;
101 vm_page_t *page_list;
102 boolean_t was_absent;
103 vm_map_copy_t orig_copy = data_copy;
104
105 /*
106 * Look for bogus arguments
107 */
108
109 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
2
Assuming 'object' is not equal to null
3
Taking false branch
110 return(KERN_INVALID_ARGUMENT4);
111 }
112
113 if (lock_value & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) {
4
Taking false branch
114 vm_object_deallocate(object);
115 return(KERN_INVALID_ARGUMENT4);
116 }
117
118 if ((data_cnt % PAGE_SIZE(1 << 12)) != 0) {
5
Taking false branch
119 vm_object_deallocate(object);
120 return(KERN_INVALID_ARGUMENT4);
121 }
122
123 /*
124 * Adjust the offset from the memory object to the offset
125 * within the vm_object.
126 */
127
128 original_length = data_cnt;
129 original_offset = offset;
130
131 assert(data_copy->type == VM_MAP_COPY_PAGE_LIST)({ if (!(data_copy->type == 3)) Assert("data_copy->type == VM_MAP_COPY_PAGE_LIST"
, "../vm/memory_object.c", 131); })
;
132 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
133
134 vm_object_lock(object);
135 vm_object_paging_begin(object)((object)->paging_in_progress++);
136 offset -= object->paging_offset;
137
138 /*
139 * Loop over copy stealing pages for pagein.
140 */
141
142 for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12)) {
6
Assuming 'data_cnt' is > 0
7
Loop condition is true. Entering loop body
22
Assuming 'data_cnt' is <= 0
23
Loop condition is false. Execution continues on line 273
143
144 assert(data_copy->cpy_npages > 0)({ if (!(data_copy->c_u.c_p.npages > 0)) Assert("data_copy->cpy_npages > 0"
, "../vm/memory_object.c", 144); })
;
145 data_m = *page_list;
146
147 if (data_m == VM_PAGE_NULL((vm_page_t) 0) || data_m->tabled ||
8
Assuming 'data_m' is not equal to null
9
Taking false branch
148 data_m->error || data_m->absent || data_m->fictitious) {
149
150 panic("Data_supply: bad page");
151 }
152
153 /*
154 * Look up target page and check its state.
155 */
156
157retry_lookup:
158 m = vm_page_lookup(object,offset);
159 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
10
Assuming 'm' is not equal to null
11
Taking false branch
14
Assuming 'm' is equal to null
15
Taking true branch
160 was_absent = FALSE((boolean_t) 0);
161 }
162 else {
163 if (m->absent && m->busy) {
164
165 /*
166 * Page was requested. Free the busy
167 * page waiting for it. Insertion
168 * of new page happens below.
169 */
170
171 VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); });
172 was_absent = TRUE((boolean_t) 1);
173 }
174 else {
175
176 /*
177 * Have to wait for page that is busy and
178 * not absent. This is probably going to
179 * be an error, but go back and check.
180 */
181 if (m->busy) {
12
Taking true branch
182 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
183 vm_object_unlock(object)((void)(&(object)->Lock));
184 thread_block((void (*)()) 0);
185 vm_object_lock(object);
186 goto retry_lookup;
13
Control jumps to line 158
187 }
188
189 /*
190 * Page already present; error.
191 * This is an error if data is precious.
192 */
193 result = KERN_MEMORY_PRESENT23;
194 error_offset = offset + object->paging_offset;
195
196 break;
197 }
198 }
199
200 /*
201 * Ok to pagein page. Target object now has no page
202 * at offset. Set the page parameters, then drop
203 * in new page and set up pageout state. Object is
204 * still locked here.
205 *
206 * Must clear busy bit in page before inserting it.
207 * Ok to skip wakeup logic because nobody else
208 * can possibly know about this page.
209 */
210
211 data_m->busy = FALSE((boolean_t) 0);
212 data_m->dirty = FALSE((boolean_t) 0);
213 pmap_clear_modify(data_m->phys_addr);
214
215 data_m->page_lock = lock_value;
216 data_m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
217 data_m->precious = precious;
218
219 vm_page_lock_queues();
220 vm_page_insert(data_m, object, offset);
221
222 if (was_absent)
16
Taking false branch
223 vm_page_activate(data_m);
224 else
225 vm_page_deactivate(data_m);
226
227 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
228
229 /*
230 * Null out this page list entry, and advance to next
231 * page.
232 */
233
234 *page_list++ = VM_PAGE_NULL((vm_page_t) 0);
235
236 if (--(data_copy->cpy_npagesc_u.c_p.npages) == 0 &&
17
Taking true branch
237 vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
238 vm_map_copy_t new_copy;
239
240 vm_object_unlock(object)((void)(&(object)->Lock));
241
242 vm_map_copy_invoke_cont(data_copy, &new_copy, &result)({ vm_map_copy_page_discard(data_copy); *&result = (*((data_copy
)->c_u.c_p.cont))((data_copy)->c_u.c_p.cont_args, &
new_copy); (data_copy)->c_u.c_p.cont = (kern_return_t (*)(
)) 0; })
;
243
244 if (result == KERN_SUCCESS0) {
18
Assuming 'result' is equal to 0
19
Taking true branch
245
246 /*
247 * Consume on success requires that
248 * we keep the original vm_map_copy
249 * around in case something fails.
250 * Free the old copy if it's not the original
251 */
252 if (data_copy != orig_copy) {
20
Taking false branch
253 vm_map_copy_discard(data_copy);
254 }
255
256 if ((data_copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0))
21
Taking false branch
257 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
258
259 vm_object_lock(object);
260 }
261 else {
262 vm_object_lock(object);
263 error_offset = offset + object->paging_offset +
264 PAGE_SIZE(1 << 12);
265 break;
266 }
267 }
268 }
269
270 /*
271 * Send reply if one was requested.
272 */
273 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 273); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
274 vm_object_unlock(object)((void)(&(object)->Lock));
275
276 if (vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0))
24
Within the expansion of the macro 'vm_map_copy_has_cont':
a
Dereference of null pointer
277 vm_map_copy_abort_cont(data_copy)({ vm_map_copy_page_discard(data_copy); (*((data_copy)->c_u
.c_p.cont))((data_copy)->c_u.c_p.cont_args, (vm_map_copy_t
*) 0); (data_copy)->c_u.c_p.cont = (kern_return_t (*)()) 0
; (data_copy)->c_u.c_p.cont_args = (char *) 0; })
;
278
279 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
280 memory_object_supply_completed(
281 reply_to, reply_to_type,
282 object->pager_request,
283 original_offset,
284 original_length,
285 result,
286 error_offset);
287 }
288
289 vm_object_deallocate(object);
290
291 /*
292 * Consume on success: The final data copy must be
293 * be discarded if it is not the original. The original
294 * gets discarded only if this routine succeeds.
295 */
296 if (data_copy != orig_copy)
297 vm_map_copy_discard(data_copy);
298 if (result == KERN_SUCCESS0)
299 vm_map_copy_discard(orig_copy);
300
301
302 return(result);
303}
304
305/*
306 * If successful, destroys the map copy object.
307 */
308kern_return_t memory_object_data_provided(
309 vm_object_t object,
310 vm_offset_t offset,
311 pointer_t data,
312 unsigned int data_cnt,
313 vm_prot_t lock_value)
314{
315 return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
1
Calling 'memory_object_data_supply'
316 data_cnt, lock_value, FALSE((boolean_t) 0), IP_NULL((ipc_port_t) ((ipc_object_t) 0)),
317 0);
318}
319
320kern_return_t memory_object_data_error(
321 vm_object_t object,
322 vm_offset_t offset,
323 vm_size_t size,
324 kern_return_t error_value)
325{
326 if (object == VM_OBJECT_NULL((vm_object_t) 0))
327 return(KERN_INVALID_ARGUMENT4);
328
329 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
330 return(KERN_INVALID_ARGUMENT4);
331
332 vm_object_lock(object);
333 offset -= object->paging_offset;
334
335 while (size != 0) {
336 vm_page_t m;
337
338 m = vm_page_lookup(object, offset);
339 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
340 m->error = TRUE((boolean_t) 1);
341 m->absent = FALSE((boolean_t) 0);
342 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
343
344 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
345
346 vm_page_lock_queues();
347 vm_page_activate(m);
348 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
349 }
350
351 size -= PAGE_SIZE(1 << 12);
352 offset += PAGE_SIZE(1 << 12);
353 }
354 vm_object_unlock(object)((void)(&(object)->Lock));
355
356 vm_object_deallocate(object);
357 return(KERN_SUCCESS0);
358}
359
360kern_return_t memory_object_data_unavailable(
361 vm_object_t object,
362 vm_offset_t offset,
363 vm_size_t size)
364{
365#if MACH_PAGEMAP1
366 vm_external_t existence_info = VM_EXTERNAL_NULL((vm_external_t) 0);
367#endif /* MACH_PAGEMAP */
368
369 if (object == VM_OBJECT_NULL((vm_object_t) 0))
370 return(KERN_INVALID_ARGUMENT4);
371
372 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
373 return(KERN_INVALID_ARGUMENT4);
374
375#if MACH_PAGEMAP1
376 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192) &&
377 (object->existence_info == VM_EXTERNAL_NULL((vm_external_t) 0))) {
378 existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE128);
379 }
380#endif /* MACH_PAGEMAP */
381
382 vm_object_lock(object);
383#if MACH_PAGEMAP1
384 if (existence_info != VM_EXTERNAL_NULL((vm_external_t) 0)) {
385 object->existence_info = existence_info;
386 }
387 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192)) {
388 vm_object_unlock(object)((void)(&(object)->Lock));
389 vm_object_deallocate(object);
390 return(KERN_SUCCESS0);
391 }
392#endif /* MACH_PAGEMAP */
393 offset -= object->paging_offset;
394
395 while (size != 0) {
396 vm_page_t m;
397
398 /*
399 * We're looking for pages that are both busy and
400 * absent (waiting to be filled), converting them
401 * to just absent.
402 *
403 * Pages that are just busy can be ignored entirely.
404 */
405
406 m = vm_page_lookup(object, offset);
407 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
408 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
409
410 vm_page_lock_queues();
411 vm_page_activate(m);
412 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
413 }
414 size -= PAGE_SIZE(1 << 12);
415 offset += PAGE_SIZE(1 << 12);
416 }
417
418 vm_object_unlock(object)((void)(&(object)->Lock));
419
420 vm_object_deallocate(object);
421 return(KERN_SUCCESS0);
422}
423
424/*
425 * Routine: memory_object_lock_page
426 *
427 * Description:
428 * Perform the appropriate lock operations on the
429 * given page. See the description of
430 * "memory_object_lock_request" for the meanings
431 * of the arguments.
432 *
433 * Returns an indication that the operation
434 * completed, blocked, or that the page must
435 * be cleaned.
436 */
437
438#define MEMORY_OBJECT_LOCK_RESULT_DONE0 0
439#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1 1
440#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2 2
441#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3 3
442
443memory_object_lock_result_t memory_object_lock_page(
444 vm_page_t m,
445 memory_object_return_t should_return,
446 boolean_t should_flush,
447 vm_prot_t prot)
448{
449 /*
450 * Don't worry about pages for which the kernel
451 * does not have any data.
452 */
453
454 if (m->absent)
455 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
456
457 /*
458 * If we cannot change access to the page,
459 * either because a mapping is in progress
460 * (busy page) or because a mapping has been
461 * wired, then give up.
462 */
463
464 if (m->busy)
465 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
466
467 assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/memory_object.c"
, 467); })
;
468
469 if (m->wire_count != 0) {
470 /*
471 * If no change would take place
472 * anyway, return successfully.
473 *
474 * No change means:
475 * Not flushing AND
476 * No change to page lock [2 checks] AND
477 * Don't need to send page to manager
478 *
479 * Don't need to send page to manager means:
480 * No clean or return request OR (
481 * Page is not dirty [2 checks] AND (
482 * Page is not precious OR
483 * No request to return precious pages ))
484 *
485 * Now isn't that straightforward and obvious ?? ;-)
486 *
487 * XXX This doesn't handle sending a copy of a wired
488 * XXX page to the pager, but that will require some
489 * XXX significant surgery.
490 */
491
492 if (!should_flush &&
493 ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE((vm_prot_t) 0x08))) &&
494 ((should_return == MEMORY_OBJECT_RETURN_NONE0) ||
495 (!m->dirty && !pmap_is_modified(m->phys_addr) &&
496 (!m->precious ||
497 should_return != MEMORY_OBJECT_RETURN_ALL2)))) {
498 /*
499 * Restart page unlock requests,
500 * even though no change took place.
501 * [Memory managers may be expecting
502 * to see new requests.]
503 */
504 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
505 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
506
507 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
508 }
509
510 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
511 }
512
513 /*
514 * If the page is to be flushed, allow
515 * that to be done as part of the protection.
516 */
517
518 if (should_flush)
519 prot = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
520
521 /*
522 * Set the page lock.
523 *
524 * If we are decreasing permission, do it now;
525 * let the fault handler take care of increases
526 * (pmap_page_protect may not increase protection).
527 */
528
529 if (prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)) {
530 if ((m->page_lock ^ prot) & prot) {
531 pmap_page_protect(m->phys_addr, VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) & ~prot);
532 }
533 m->page_lock = prot;
534
535 /*
536 * Restart any past unlock requests, even if no
537 * change resulted. If the manager explicitly
538 * requested no protection change, then it is assumed
539 * to be remembering past requests.
540 */
541
542 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
543 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
544 }
545
546 /*
547 * Handle cleaning.
548 */
549
550 if (should_return != MEMORY_OBJECT_RETURN_NONE0) {
551 /*
552 * Check whether the page is dirty. If
553 * write permission has not been removed,
554 * this may have unpredictable results.
555 */
556
557 if (!m->dirty)
558 m->dirty = pmap_is_modified(m->phys_addr);
559
560 if (m->dirty || (m->precious &&
561 should_return == MEMORY_OBJECT_RETURN_ALL2)) {
562 /*
563 * If we weren't planning
564 * to flush the page anyway,
565 * we may need to remove the
566 * page from the pageout
567 * system and from physical
568 * maps now.
569 */
570
571 vm_page_lock_queues();
572 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m)
->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t
next, prev; next = (m)->pageq.next; prev = (m)->pageq.
prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive
)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev
; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive
)->next = next; else ((vm_page_t)prev)->pageq.next = next
; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count
--; } })
;
573 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
574
575 if (!should_flush)
576 pmap_page_protect(m->phys_addr,
577 VM_PROT_NONE((vm_prot_t) 0x00));
578
579 /*
580 * Cleaning a page will cause
581 * it to be flushed.
582 */
583
584 if (m->dirty)
585 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2);
586 else
587 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3);
588 }
589 }
590
591 /*
592 * Handle flushing
593 */
594
595 if (should_flush) {
596 VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); });
597 } else {
598 extern boolean_t vm_page_deactivate_hint;
599
600 /*
601 * XXX Make clean but not flush a paging hint,
602 * and deactivate the pages. This is a hack
603 * because it overloads flush/clean with
604 * implementation-dependent meaning. This only
605 * happens to pages that are already clean.
606 */
607
608 if (vm_page_deactivate_hint &&
609 (should_return != MEMORY_OBJECT_RETURN_NONE0)) {
610 vm_page_lock_queues();
611 vm_page_deactivate(m);
612 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
613 }
614 }
615
616 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
617}
618
619/*
620 * Routine: memory_object_lock_request [user interface]
621 *
622 * Description:
623 * Control use of the data associated with the given
624 * memory object. For each page in the given range,
625 * perform the following operations, in order:
626 * 1) restrict access to the page (disallow
627 * forms specified by "prot");
628 * 2) return data to the manager (if "should_return"
629 * is RETURN_DIRTY and the page is dirty, or
630 * "should_return" is RETURN_ALL and the page
631 * is either dirty or precious); and,
632 * 3) flush the cached copy (if "should_flush"
633 * is asserted).
634 * The set of pages is defined by a starting offset
635 * ("offset") and size ("size"). Only pages with the
636 * same page alignment as the starting offset are
637 * considered.
638 *
639 * A single acknowledgement is sent (to the "reply_to"
640 * port) when these actions are complete. If successful,
641 * the naked send right for reply_to is consumed.
642 */
643
644kern_return_t
645memory_object_lock_request(
646 vm_object_t object,
647 vm_offset_t offset,
648 vm_size_t size,
649 memory_object_return_t should_return,
650 boolean_t should_flush,
651 vm_prot_t prot,
652 ipc_port_t reply_to,
653 mach_msg_type_name_t reply_to_type)
654{
655 vm_page_t m;
656 vm_offset_t original_offset = offset;
657 vm_size_t original_size = size;
658 vm_offset_t paging_offset = 0;
659 vm_object_t new_object = VM_OBJECT_NULL((vm_object_t) 0);
660 vm_offset_t new_offset = 0;
661 vm_offset_t last_offset = offset;
662 int page_lock_result;
663 int pageout_action = 0; /* '=0' to quiet lint */
664
665#define DATA_WRITE_MAX32 32
666 vm_page_t holding_pages[DATA_WRITE_MAX32];
667
668 /*
669 * Check for bogus arguments.
670 */
671 if (object == VM_OBJECT_NULL((vm_object_t) 0) ||
672 ((prot & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) != 0 && prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)))
673 return (KERN_INVALID_ARGUMENT4);
674
675 size = round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
676
677 /*
678 * Lock the object, and acquire a paging reference to
679 * prevent the memory_object and control ports from
680 * being destroyed.
681 */
682
683 vm_object_lock(object);
684 vm_object_paging_begin(object)((object)->paging_in_progress++);
685 offset -= object->paging_offset;
686
687 /*
688 * To avoid blocking while scanning for pages, save
689 * dirty pages to be cleaned all at once.
690 *
691 * XXXO A similar strategy could be used to limit the
692 * number of times that a scan must be restarted for
693 * other reasons. Those pages that would require blocking
694 * could be temporarily collected in another list, or
695 * their offsets could be recorded in a small array.
696 */
697
698 /*
699 * XXX NOTE: May want to consider converting this to a page list
700 * XXX vm_map_copy interface. Need to understand object
701 * XXX coalescing implications before doing so.
702 */
703
704#define PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 704); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
\({
705MACRO_BEGIN({ \
706 vm_map_copy_t copy; \
707 int i; \
708 vm_page_t hp; \
709 \
710 vm_object_unlock(object)((void)(&(object)->Lock)); \
711 \
712 (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
713 \
714 if (object->use_old_pageout) { \
715 assert(pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN)({ if (!(pageout_action == 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 715); })
; \
716 (void) memory_object_data_write( \
717 object->pager, \
718 object->pager_request, \
719 paging_offset, \
720 (pointer_t) copy, \
721 new_offset); \
722 } \
723 else { \
724 (void) memory_object_data_return( \
725 object->pager, \
726 object->pager_request, \
727 paging_offset, \
728 (pointer_t) copy, \
729 new_offset, \
730 (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2), \
731 !should_flush); \
732 } \
733 \
734 vm_object_lock(object); \
735 \
736 for (i = 0; i < atop(new_offset)(((vm_size_t)(new_offset)) >> 12); i++) { \
737 hp = holding_pages[i]; \
738 if (hp != VM_PAGE_NULL((vm_page_t) 0)) \
739 VM_PAGE_FREE(hp)({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }); \
740 } \
741 \
742 new_object = VM_OBJECT_NULL((vm_object_t) 0); \})
743MACRO_END})
744
745 for (;
746 size != 0;
747 size -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12))
748 {
749 /*
750 * Limit the number of pages to be cleaned at once.
751 */
752 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
753 new_offset >= PAGE_SIZE(1 << 12) * DATA_WRITE_MAX32)
754 {
755 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 755); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
756 }
757
758 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL((vm_page_t) 0)) {
759 switch ((page_lock_result = memory_object_lock_page(m,
760 should_return,
761 should_flush,
762 prot)))
763 {
764 case MEMORY_OBJECT_LOCK_RESULT_DONE0:
765 /*
766 * End of a cluster of dirty pages.
767 */
768 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
769 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 769); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
770 continue;
771 }
772 break;
773
774 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1:
775 /*
776 * Since it is necessary to block,
777 * clean any dirty pages now.
778 */
779 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
780 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 780); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
781 continue;
782 }
783
784 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
785 vm_object_unlock(object)((void)(&(object)->Lock));
786 thread_block((void (*)()) 0);
787 vm_object_lock(object);
788 continue;
789
790 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2:
791 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3:
792 /*
793 * The clean and return cases are similar.
794 *
795 * Mark the page busy since we unlock the
796 * object below.
797 */
798 m->busy = TRUE((boolean_t) 1);
799
800 /*
801 * if this would form a discontiguous block,
802 * clean the old pages and start anew.
803 *
804 * NOTE: The first time through here, new_object
805 * is null, hiding the fact that pageout_action
806 * is not initialized.
807 */
808 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
809 (last_offset != offset ||
810 pageout_action != page_lock_result)) {
811 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 811); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
812 }
813
814 vm_object_unlock(object)((void)(&(object)->Lock));
815
816 /*
817 * If we have not already allocated an object
818 * for a range of pages to be written, do so
819 * now.
820 */
821 if (new_object == VM_OBJECT_NULL((vm_object_t) 0)) {
822 new_object = vm_object_allocate(original_size);
823 new_offset = 0;
824 paging_offset = m->offset +
825 object->paging_offset;
826 pageout_action = page_lock_result;
827 }
828
829 /*
830 * Move or copy the dirty page into the
831 * new object.
832 */
833 m = vm_pageout_setup(m,
834 m->offset + object->paging_offset,
835 new_object,
836 new_offset,
837 should_flush);
838
839 /*
840 * Save the holding page if there is one.
841 */
842 holding_pages[atop(new_offset)(((vm_size_t)(new_offset)) >> 12)] = m;
843 new_offset += PAGE_SIZE(1 << 12);
844 last_offset = offset + PAGE_SIZE(1 << 12);
845
846 vm_object_lock(object);
847 break;
848 }
849 break;
850 }
851 }
852
853 /*
854 * We have completed the scan for applicable pages.
855 * Clean any pages that have been saved.
856 */
857 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
858 PAGEOUT_PAGES({ vm_map_copy_t copy; int i; vm_page_t hp; ((void)(&(object
)->Lock)); (void) vm_map_copyin_object(new_object, 0, new_offset
, &copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 858); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ((void)(&vm_page_queue_lock)); }
); } new_object = ((vm_object_t) 0); })
;
859 }
860
861 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
862 vm_object_unlock(object)((void)(&(object)->Lock));
863
864 /* consumes our naked send-once/send right for reply_to */
865 (void) memory_object_lock_completed(reply_to, reply_to_type,
866 object->pager_request, original_offset, original_size);
867
868 vm_object_lock(object);
869 }
870
871 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 871); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
872 vm_object_unlock(object)((void)(&(object)->Lock));
873 vm_object_deallocate(object);
874
875 return (KERN_SUCCESS0);
876}
877
878kern_return_t
879memory_object_set_attributes_common(
880 vm_object_t object,
881 boolean_t object_ready,
882 boolean_t may_cache,
883 memory_object_copy_strategy_t copy_strategy,
884 boolean_t use_old_pageout)
885{
886 if (object == VM_OBJECT_NULL((vm_object_t) 0))
887 return(KERN_INVALID_ARGUMENT4);
888
889 /*
890 * Verify the attributes of importance
891 */
892
893 switch(copy_strategy) {
894 case MEMORY_OBJECT_COPY_NONE0:
895 case MEMORY_OBJECT_COPY_CALL1:
896 case MEMORY_OBJECT_COPY_DELAY2:
897 case MEMORY_OBJECT_COPY_TEMPORARY3:
898 break;
899 default:
900 vm_object_deallocate(object);
901 return(KERN_INVALID_ARGUMENT4);
902 }
903
904 if (object_ready)
905 object_ready = TRUE((boolean_t) 1);
906 if (may_cache)
907 may_cache = TRUE((boolean_t) 1);
908
909 vm_object_lock(object);
910
911 /*
912 * Wake up anyone waiting for the ready attribute
913 * to become asserted.
914 */
915
916 if (object_ready && !object->pager_ready) {
917 object->use_old_pageout = use_old_pageout;
918 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY)({ if ((object)->all_wanted & (1 << (1))) thread_wakeup_prim
(((event_t)(((vm_offset_t) object) + (1))), ((boolean_t) 0), 0
); (object)->all_wanted &= ~(1 << (1)); })
;
919 }
920
921 /*
922 * Copy the attributes
923 */
924
925 object->can_persist = may_cache;
926 object->pager_ready = object_ready;
927 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY3) {
928 object->temporary = TRUE((boolean_t) 1);
929 } else {
930 object->copy_strategy = copy_strategy;
931 }
932
933 vm_object_unlock(object)((void)(&(object)->Lock));
934
935 vm_object_deallocate(object);
936
937 return(KERN_SUCCESS0);
938}
939
940/*
941 * XXX rpd claims that reply_to could be obviated in favor of a client
942 * XXX stub that made change_attributes an RPC. Need investigation.
943 */
944
945kern_return_t memory_object_change_attributes(
946 vm_object_t object,
947 boolean_t may_cache,
948 memory_object_copy_strategy_t copy_strategy,
949 ipc_port_t reply_to,
950 mach_msg_type_name_t reply_to_type)
951{
952 kern_return_t result;
953
954 /*
955 * Do the work and throw away our object reference. It
956 * is important that the object reference be deallocated
957 * BEFORE sending the reply. The whole point of the reply
958 * is that it shows up after the terminate message that
959 * may be generated by setting the object uncacheable.
960 *
961 * XXX may_cache may become a tri-valued variable to handle
962 * XXX uncache if not in use.
963 */
964 result = memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
965 may_cache, copy_strategy,
966 FALSE((boolean_t) 0));
967
968 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
969
970 /* consumes our naked send-once/send right for reply_to */
971 (void) memory_object_change_completed(reply_to, reply_to_type,
972 may_cache, copy_strategy);
973
974 }
975
976 return(result);
977}
978
979kern_return_t
980memory_object_set_attributes(
981 vm_object_t object,
982 boolean_t object_ready,
983 boolean_t may_cache,
984 memory_object_copy_strategy_t copy_strategy)
985{
986 return memory_object_set_attributes_common(object, object_ready,
987 may_cache, copy_strategy,
988 TRUE((boolean_t) 1));
989}
990
991kern_return_t memory_object_ready(
992 vm_object_t object,
993 boolean_t may_cache,
994 memory_object_copy_strategy_t copy_strategy)
995{
996 return memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
997 may_cache, copy_strategy,
998 FALSE((boolean_t) 0));
999}
1000
1001kern_return_t memory_object_get_attributes(
1002 vm_object_t object,
1003 boolean_t *object_ready,
1004 boolean_t *may_cache,
1005 memory_object_copy_strategy_t *copy_strategy)
1006{
1007 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1008 return(KERN_INVALID_ARGUMENT4);
1009
1010 vm_object_lock(object);
1011 *may_cache = object->can_persist;
1012 *object_ready = object->pager_ready;
1013 *copy_strategy = object->copy_strategy;
1014 vm_object_unlock(object)((void)(&(object)->Lock));
1015
1016 vm_object_deallocate(object);
1017
1018 return(KERN_SUCCESS0);
1019}
1020
1021/*
1022 * If successful, consumes the supplied naked send right.
1023 */
1024kern_return_t vm_set_default_memory_manager(host, default_manager)
1025 const host_t host;
1026 ipc_port_t *default_manager;
1027{
1028 ipc_port_t current_manager;
1029 ipc_port_t new_manager;
1030 ipc_port_t returned_manager;
1031
1032 if (host == HOST_NULL((host_t)0))
1033 return(KERN_INVALID_HOST22);
1034
1035 new_manager = *default_manager;
1036 simple_lock(&memory_manager_default_lock);
1037 current_manager = memory_manager_default;
1038
1039 if (new_manager == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) {
1040 /*
1041 * Retrieve the current value.
1042 */
1043
1044 returned_manager = ipc_port_copy_send(current_manager);
1045 } else {
1046 /*
1047 * Retrieve the current value,
1048 * and replace it with the supplied value.
1049 * We consume the supplied naked send right.
1050 */
1051
1052 returned_manager = current_manager;
1053 memory_manager_default = new_manager;
1054
1055 /*
1056 * In case anyone's been waiting for a memory
1057 * manager to be established, wake them up.
1058 */
1059
1060 thread_wakeup((event_t) &memory_manager_default)thread_wakeup_prim(((event_t) &memory_manager_default), (
(boolean_t) 0), 0)
;
1061 }
1062
1063 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1064
1065 *default_manager = returned_manager;
1066 return(KERN_SUCCESS0);
1067}
1068
1069/*
1070 * Routine: memory_manager_default_reference
1071 * Purpose:
1072 * Returns a naked send right for the default
1073 * memory manager. The returned right is always
1074 * valid (not IP_NULL or IP_DEAD).
1075 */
1076
1077ipc_port_t memory_manager_default_reference(void)
1078{
1079 ipc_port_t current_manager;
1080
1081 simple_lock(&memory_manager_default_lock);
1082
1083 while (current_manager = ipc_port_copy_send(memory_manager_default),
1084 !IP_VALID(current_manager)(((&(current_manager)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current_manager)->ip_target.ipt_object
) != ((ipc_object_t) -1)))
) {
1085 thread_sleep((event_t) &memory_manager_default,
1086 simple_lock_addr(memory_manager_default_lock)((simple_lock_t)0),
1087 FALSE((boolean_t) 0));
1088 simple_lock(&memory_manager_default_lock);
1089 }
1090
1091 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1092
1093 return current_manager;
1094}
1095
1096/*
1097 * Routine: memory_manager_default_port
1098 * Purpose:
1099 * Returns true if the receiver for the port
1100 * is the default memory manager.
1101 *
1102 * This is a hack to let ds_read_done
1103 * know when it should keep memory wired.
1104 */
1105
1106boolean_t memory_manager_default_port(port)
1107 const ipc_port_t port;
1108{
1109 ipc_port_t current;
1110 boolean_t result;
1111
1112 simple_lock(&memory_manager_default_lock);
1113 current = memory_manager_default;
1114 if (IP_VALID(current)(((&(current)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
1115 /*
1116 * There is no point in bothering to lock
1117 * both ports, which would be painful to do.
1118 * If the receive rights are moving around,
1119 * we might be inaccurate.
1120 */
1121
1122 result = port->ip_receiverdata.receiver == current->ip_receiverdata.receiver;
1123 } else
1124 result = FALSE((boolean_t) 0);
1125 simple_unlock(&memory_manager_default_lock)((void)(&memory_manager_default_lock));
1126
1127 return result;
1128}
1129
1130void memory_manager_default_init(void)
1131{
1132 memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
1133 simple_lock_init(&memory_manager_default_lock);
1134}