Bug Summary

File:obj/../vm/memory_object.c
Location:line 281, column 6
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm/memory_object.c
31 * Author: Michael Wayne Young
32 *
33 * External memory management interface control functions.
34 */
35
36/*
37 * Interface dependencies:
38 */
39
40#include <mach/std_types.h> /* For pointer_t */
41#include <mach/mach_types.h>
42
43#include <mach/kern_return.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <mach/memory_object.h>
47#include <mach/boolean.h>
48#include <mach/vm_prot.h>
49#include <mach/message.h>
50
51#include <vm/memory_object_user.user.h>
52#include <vm/memory_object_default.user.h>
53
54/*
55 * Implementation dependencies:
56 */
57#include <vm/memory_object.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pageout.h>
60#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
61#include <kern/debug.h> /* For panic() */
62#include <kern/thread.h> /* For current_thread() */
63#include <kern/host.h>
64#include <vm/vm_kern.h> /* For kernel_map, vm_move */
65#include <vm/vm_map.h> /* For vm_map_pageable */
66#include <ipc/ipc_port.h>
67
68#if MACH_PAGEMAP1
69#include <vm/vm_external.h>
70#endif /* MACH_PAGEMAP */
71
72typedef int memory_object_lock_result_t; /* moved from below */
73
74
75ipc_port_t memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
76decl_simple_lock_data(,memory_manager_default_lock)
77
78/*
79 * Important note:
80 * All of these routines gain a reference to the
81 * object (first argument) as part of the automatic
82 * argument conversion. Explicit deallocation is necessary.
83 */
84
85kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
86 lock_value, precious, reply_to, reply_to_type)
87 register
88 vm_object_t object;
89 register
90 vm_offset_t offset;
91 vm_map_copy_t data_copy;
92 unsigned int data_cnt;
93 vm_prot_t lock_value;
94 boolean_t precious;
95 ipc_port_t reply_to;
96 mach_msg_type_name_t reply_to_type;
97{
98 kern_return_t result = KERN_SUCCESS0;
99 vm_offset_t error_offset = 0;
100 register
101 vm_page_t m;
102 register
103 vm_page_t data_m;
104 vm_size_t original_length;
105 vm_offset_t original_offset;
106 vm_page_t *page_list;
107 boolean_t was_absent;
108 vm_map_copy_t orig_copy = data_copy;
109
110 /*
111 * Look for bogus arguments
112 */
113
114 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
2
Assuming 'object' is not equal to null
3
Taking false branch
115 return(KERN_INVALID_ARGUMENT4);
116 }
117
118 if (lock_value & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) {
4
Taking false branch
119 vm_object_deallocate(object);
120 return(KERN_INVALID_ARGUMENT4);
121 }
122
123 if ((data_cnt % PAGE_SIZE(1 << 12)) != 0) {
5
Taking false branch
124 vm_object_deallocate(object);
125 return(KERN_INVALID_ARGUMENT4);
126 }
127
128 /*
129 * Adjust the offset from the memory object to the offset
130 * within the vm_object.
131 */
132
133 original_length = data_cnt;
134 original_offset = offset;
135
136 assert(data_copy->type == VM_MAP_COPY_PAGE_LIST)({ if (!(data_copy->type == 3)) Assert("data_copy->type == VM_MAP_COPY_PAGE_LIST"
, "../vm/memory_object.c", 136); })
;
137 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
138
139 vm_object_lock(object);
140 vm_object_paging_begin(object)((object)->paging_in_progress++);
141 offset -= object->paging_offset;
142
143 /*
144 * Loop over copy stealing pages for pagein.
145 */
146
147 for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12)) {
6
Assuming 'data_cnt' is > 0
7
Loop condition is true. Entering loop body
22
Assuming 'data_cnt' is <= 0
23
Loop condition is false. Execution continues on line 278
148
149 assert(data_copy->cpy_npages > 0)({ if (!(data_copy->c_u.c_p.npages > 0)) Assert("data_copy->cpy_npages > 0"
, "../vm/memory_object.c", 149); })
;
150 data_m = *page_list;
151
152 if (data_m == VM_PAGE_NULL((vm_page_t) 0) || data_m->tabled ||
8
Assuming 'data_m' is not equal to null
9
Taking false branch
153 data_m->error || data_m->absent || data_m->fictitious) {
154
155 panic("Data_supply: bad page");
156 }
157
158 /*
159 * Look up target page and check its state.
160 */
161
162retry_lookup:
163 m = vm_page_lookup(object,offset);
164 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
10
Assuming 'm' is not equal to null
11
Taking false branch
14
Assuming 'm' is equal to null
15
Taking true branch
165 was_absent = FALSE((boolean_t) 0);
166 }
167 else {
168 if (m->absent && m->busy) {
169
170 /*
171 * Page was requested. Free the busy
172 * page waiting for it. Insertion
173 * of new page happens below.
174 */
175
176 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
177 was_absent = TRUE((boolean_t) 1);
178 }
179 else {
180
181 /*
182 * Have to wait for page that is busy and
183 * not absent. This is probably going to
184 * be an error, but go back and check.
185 */
186 if (m->busy) {
12
Taking true branch
187 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
188 vm_object_unlock(object);
189 thread_block((void (*)()) 0);
190 vm_object_lock(object);
191 goto retry_lookup;
13
Control jumps to line 163
192 }
193
194 /*
195 * Page already present; error.
196 * This is an error if data is precious.
197 */
198 result = KERN_MEMORY_PRESENT23;
199 error_offset = offset + object->paging_offset;
200
201 break;
202 }
203 }
204
205 /*
206 * Ok to pagein page. Target object now has no page
207 * at offset. Set the page parameters, then drop
208 * in new page and set up pageout state. Object is
209 * still locked here.
210 *
211 * Must clear busy bit in page before inserting it.
212 * Ok to skip wakeup logic because nobody else
213 * can possibly know about this page.
214 */
215
216 data_m->busy = FALSE((boolean_t) 0);
217 data_m->dirty = FALSE((boolean_t) 0);
218 pmap_clear_modify(data_m->phys_addr);
219
220 data_m->page_lock = lock_value;
221 data_m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
222 data_m->precious = precious;
223
224 vm_page_lock_queues();
225 vm_page_insert(data_m, object, offset);
226
227 if (was_absent)
16
Taking false branch
228 vm_page_activate(data_m);
229 else
230 vm_page_deactivate(data_m);
231
232 vm_page_unlock_queues();
233
234 /*
235 * Null out this page list entry, and advance to next
236 * page.
237 */
238
239 *page_list++ = VM_PAGE_NULL((vm_page_t) 0);
240
241 if (--(data_copy->cpy_npagesc_u.c_p.npages) == 0 &&
17
Taking true branch
242 vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
243 vm_map_copy_t new_copy;
244
245 vm_object_unlock(object);
246
247 vm_map_copy_invoke_cont(data_copy, &new_copy, &result)({ vm_map_copy_page_discard(data_copy); *&result = (*((data_copy
)->c_u.c_p.cont))((data_copy)->c_u.c_p.cont_args, &
new_copy); (data_copy)->c_u.c_p.cont = (kern_return_t (*)(
)) 0; })
;
248
249 if (result == KERN_SUCCESS0) {
18
Assuming 'result' is equal to 0
19
Taking true branch
250
251 /*
252 * Consume on success requires that
253 * we keep the original vm_map_copy
254 * around in case something fails.
255 * Free the old copy if it's not the original
256 */
257 if (data_copy != orig_copy) {
20
Taking false branch
258 vm_map_copy_discard(data_copy);
259 }
260
261 if ((data_copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0))
21
Taking false branch
262 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
263
264 vm_object_lock(object);
265 }
266 else {
267 vm_object_lock(object);
268 error_offset = offset + object->paging_offset +
269 PAGE_SIZE(1 << 12);
270 break;
271 }
272 }
273 }
274
275 /*
276 * Send reply if one was requested.
277 */
278 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 278); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
279 vm_object_unlock(object);
280
281 if (vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0))
24
Within the expansion of the macro 'vm_map_copy_has_cont':
a
Dereference of null pointer
282 vm_map_copy_abort_cont(data_copy)({ vm_map_copy_page_discard(data_copy); (*((data_copy)->c_u
.c_p.cont))((data_copy)->c_u.c_p.cont_args, (vm_map_copy_t
*) 0); (data_copy)->c_u.c_p.cont = (kern_return_t (*)()) 0
; (data_copy)->c_u.c_p.cont_args = (char *) 0; })
;
283
284 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
285 memory_object_supply_completed(
286 reply_to, reply_to_type,
287 object->pager_request,
288 original_offset,
289 original_length,
290 result,
291 error_offset);
292 }
293
294 vm_object_deallocate(object);
295
296 /*
297 * Consume on success: The final data copy must be
298 * be discarded if it is not the original. The original
299 * gets discarded only if this routine succeeds.
300 */
301 if (data_copy != orig_copy)
302 vm_map_copy_discard(data_copy);
303 if (result == KERN_SUCCESS0)
304 vm_map_copy_discard(orig_copy);
305
306
307 return(result);
308}
309
310
311/*
312 * If successful, destroys the map copy object.
313 */
314kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
315 lock_value)
316 vm_object_t object;
317 vm_offset_t offset;
318 pointer_t data;
319 unsigned int data_cnt;
320 vm_prot_t lock_value;
321{
322 return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
1
Calling 'memory_object_data_supply'
323 data_cnt, lock_value, FALSE((boolean_t) 0), IP_NULL((ipc_port_t) ((ipc_object_t) 0)),
324 0);
325}
326
327
328kern_return_t memory_object_data_error(object, offset, size, error_value)
329 vm_object_t object;
330 vm_offset_t offset;
331 vm_size_t size;
332 kern_return_t error_value;
333{
334 if (object == VM_OBJECT_NULL((vm_object_t) 0))
335 return(KERN_INVALID_ARGUMENT4);
336
337 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
338 return(KERN_INVALID_ARGUMENT4);
339
340#ifdef lint
341 /* Error value is ignored at this time */
342 error_value++;
343#endif
344
345 vm_object_lock(object);
346 offset -= object->paging_offset;
347
348 while (size != 0) {
349 register vm_page_t m;
350
351 m = vm_page_lookup(object, offset);
352 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
353 m->error = TRUE((boolean_t) 1);
354 m->absent = FALSE((boolean_t) 0);
355 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
356
357 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
358
359 vm_page_lock_queues();
360 vm_page_activate(m);
361 vm_page_unlock_queues();
362 }
363
364 size -= PAGE_SIZE(1 << 12);
365 offset += PAGE_SIZE(1 << 12);
366 }
367 vm_object_unlock(object);
368
369 vm_object_deallocate(object);
370 return(KERN_SUCCESS0);
371}
372
373kern_return_t memory_object_data_unavailable(object, offset, size)
374 vm_object_t object;
375 vm_offset_t offset;
376 vm_size_t size;
377{
378#if MACH_PAGEMAP1
379 vm_external_t existence_info = VM_EXTERNAL_NULL((vm_external_t) 0);
380#endif /* MACH_PAGEMAP */
381
382 if (object == VM_OBJECT_NULL((vm_object_t) 0))
383 return(KERN_INVALID_ARGUMENT4);
384
385 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
386 return(KERN_INVALID_ARGUMENT4);
387
388#if MACH_PAGEMAP1
389 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192) &&
390 (object->existence_info == VM_EXTERNAL_NULL((vm_external_t) 0))) {
391 existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE128);
392 }
393#endif /* MACH_PAGEMAP */
394
395 vm_object_lock(object);
396#if MACH_PAGEMAP1
397 if (existence_info != VM_EXTERNAL_NULL((vm_external_t) 0)) {
398 object->existence_info = existence_info;
399 }
400 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192)) {
401 vm_object_unlock(object);
402 vm_object_deallocate(object);
403 return(KERN_SUCCESS0);
404 }
405#endif /* MACH_PAGEMAP */
406 offset -= object->paging_offset;
407
408 while (size != 0) {
409 register vm_page_t m;
410
411 /*
412 * We're looking for pages that are both busy and
413 * absent (waiting to be filled), converting them
414 * to just absent.
415 *
416 * Pages that are just busy can be ignored entirely.
417 */
418
419 m = vm_page_lookup(object, offset);
420 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
421 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
422
423 vm_page_lock_queues();
424 vm_page_activate(m);
425 vm_page_unlock_queues();
426 }
427 size -= PAGE_SIZE(1 << 12);
428 offset += PAGE_SIZE(1 << 12);
429 }
430
431 vm_object_unlock(object);
432
433 vm_object_deallocate(object);
434 return(KERN_SUCCESS0);
435}
436
437/*
438 * Routine: memory_object_lock_page
439 *
440 * Description:
441 * Perform the appropriate lock operations on the
442 * given page. See the description of
443 * "memory_object_lock_request" for the meanings
444 * of the arguments.
445 *
446 * Returns an indication that the operation
447 * completed, blocked, or that the page must
448 * be cleaned.
449 */
450
451#define MEMORY_OBJECT_LOCK_RESULT_DONE0 0
452#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1 1
453#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2 2
454#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3 3
455
456memory_object_lock_result_t memory_object_lock_page(m, should_return,
457 should_flush, prot)
458 vm_page_t m;
459 memory_object_return_t should_return;
460 boolean_t should_flush;
461 vm_prot_t prot;
462{
463 /*
464 * Don't worry about pages for which the kernel
465 * does not have any data.
466 */
467
468 if (m->absent)
469 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
470
471 /*
472 * If we cannot change access to the page,
473 * either because a mapping is in progress
474 * (busy page) or because a mapping has been
475 * wired, then give up.
476 */
477
478 if (m->busy)
479 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
480
481 assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/memory_object.c"
, 481); })
;
482
483 if (m->wire_count != 0) {
484 /*
485 * If no change would take place
486 * anyway, return successfully.
487 *
488 * No change means:
489 * Not flushing AND
490 * No change to page lock [2 checks] AND
491 * Don't need to send page to manager
492 *
493 * Don't need to send page to manager means:
494 * No clean or return request OR (
495 * Page is not dirty [2 checks] AND (
496 * Page is not precious OR
497 * No request to return precious pages ))
498 *
499 * Now isn't that straightforward and obvious ?? ;-)
500 *
501 * XXX This doesn't handle sending a copy of a wired
502 * XXX page to the pager, but that will require some
503 * XXX significant surgery.
504 */
505
506 if (!should_flush &&
507 ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE((vm_prot_t) 0x08))) &&
508 ((should_return == MEMORY_OBJECT_RETURN_NONE0) ||
509 (!m->dirty && !pmap_is_modified(m->phys_addr) &&
510 (!m->precious ||
511 should_return != MEMORY_OBJECT_RETURN_ALL2)))) {
512 /*
513 * Restart page unlock requests,
514 * even though no change took place.
515 * [Memory managers may be expecting
516 * to see new requests.]
517 */
518 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
519 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
520
521 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
522 }
523
524 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
525 }
526
527 /*
528 * If the page is to be flushed, allow
529 * that to be done as part of the protection.
530 */
531
532 if (should_flush)
533 prot = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
534
535 /*
536 * Set the page lock.
537 *
538 * If we are decreasing permission, do it now;
539 * let the fault handler take care of increases
540 * (pmap_page_protect may not increase protection).
541 */
542
543 if (prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)) {
544 if ((m->page_lock ^ prot) & prot) {
545 pmap_page_protect(m->phys_addr, VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) & ~prot);
546 }
547 m->page_lock = prot;
548
549 /*
550 * Restart any past unlock requests, even if no
551 * change resulted. If the manager explicitly
552 * requested no protection change, then it is assumed
553 * to be remembering past requests.
554 */
555
556 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
557 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
558 }
559
560 /*
561 * Handle cleaning.
562 */
563
564 if (should_return != MEMORY_OBJECT_RETURN_NONE0) {
565 /*
566 * Check whether the page is dirty. If
567 * write permission has not been removed,
568 * this may have unpredictable results.
569 */
570
571 if (!m->dirty)
572 m->dirty = pmap_is_modified(m->phys_addr);
573
574 if (m->dirty || (m->precious &&
575 should_return == MEMORY_OBJECT_RETURN_ALL2)) {
576 /*
577 * If we weren't planning
578 * to flush the page anyway,
579 * we may need to remove the
580 * page from the pageout
581 * system and from physical
582 * maps now.
583 */
584
585 vm_page_lock_queues();
586 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { register queue_entry_t next, prev; next
= (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { register
queue_entry_t next, prev; next = (m)->pageq.next; prev = (
m)->pageq.prev; if ((&vm_page_queue_inactive) == next)
(&vm_page_queue_inactive)->prev = prev; else ((vm_page_t
)next)->pageq.prev = prev; if ((&vm_page_queue_inactive
) == prev) (&vm_page_queue_inactive)->next = next; else
((vm_page_t)prev)->pageq.next = next; }; m->inactive =
((boolean_t) 0); vm_page_inactive_count--; } })
;
587 vm_page_unlock_queues();
588
589 if (!should_flush)
590 pmap_page_protect(m->phys_addr,
591 VM_PROT_NONE((vm_prot_t) 0x00));
592
593 /*
594 * Cleaning a page will cause
595 * it to be flushed.
596 */
597
598 if (m->dirty)
599 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2);
600 else
601 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3);
602 }
603 }
604
605 /*
606 * Handle flushing
607 */
608
609 if (should_flush) {
610 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
611 } else {
612 extern boolean_t vm_page_deactivate_hint;
613
614 /*
615 * XXX Make clean but not flush a paging hint,
616 * and deactivate the pages. This is a hack
617 * because it overloads flush/clean with
618 * implementation-dependent meaning. This only
619 * happens to pages that are already clean.
620 */
621
622 if (vm_page_deactivate_hint &&
623 (should_return != MEMORY_OBJECT_RETURN_NONE0)) {
624 vm_page_lock_queues();
625 vm_page_deactivate(m);
626 vm_page_unlock_queues();
627 }
628 }
629
630 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
631}
632
633/*
634 * Routine: memory_object_lock_request [user interface]
635 *
636 * Description:
637 * Control use of the data associated with the given
638 * memory object. For each page in the given range,
639 * perform the following operations, in order:
640 * 1) restrict access to the page (disallow
641 * forms specified by "prot");
642 * 2) return data to the manager (if "should_return"
643 * is RETURN_DIRTY and the page is dirty, or
644 * "should_return" is RETURN_ALL and the page
645 * is either dirty or precious); and,
646 * 3) flush the cached copy (if "should_flush"
647 * is asserted).
648 * The set of pages is defined by a starting offset
649 * ("offset") and size ("size"). Only pages with the
650 * same page alignment as the starting offset are
651 * considered.
652 *
653 * A single acknowledgement is sent (to the "reply_to"
654 * port) when these actions are complete. If successful,
655 * the naked send right for reply_to is consumed.
656 */
657
658kern_return_t
659memory_object_lock_request(object, offset, size,
660 should_return, should_flush, prot,
661 reply_to, reply_to_type)
662 register vm_object_t object;
663 register vm_offset_t offset;
664 register vm_size_t size;
665 memory_object_return_t should_return;
666 boolean_t should_flush;
667 vm_prot_t prot;
668 ipc_port_t reply_to;
669 mach_msg_type_name_t reply_to_type;
670{
671 register vm_page_t m;
672 vm_offset_t original_offset = offset;
673 vm_size_t original_size = size;
674 vm_offset_t paging_offset = 0;
675 vm_object_t new_object = VM_OBJECT_NULL((vm_object_t) 0);
676 vm_offset_t new_offset = 0;
677 vm_offset_t last_offset = offset;
678 int page_lock_result;
679 int pageout_action = 0; /* '=0' to quiet lint */
680
681#define DATA_WRITE_MAX32 32
682 vm_page_t holding_pages[DATA_WRITE_MAX32];
683
684 /*
685 * Check for bogus arguments.
686 */
687 if (object == VM_OBJECT_NULL((vm_object_t) 0) ||
688 ((prot & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) != 0 && prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)))
689 return (KERN_INVALID_ARGUMENT4);
690
691 size = round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
692
693 /*
694 * Lock the object, and acquire a paging reference to
695 * prevent the memory_object and control ports from
696 * being destroyed.
697 */
698
699 vm_object_lock(object);
700 vm_object_paging_begin(object)((object)->paging_in_progress++);
701 offset -= object->paging_offset;
702
703 /*
704 * To avoid blocking while scanning for pages, save
705 * dirty pages to be cleaned all at once.
706 *
707 * XXXO A similar strategy could be used to limit the
708 * number of times that a scan must be restarted for
709 * other reasons. Those pages that would require blocking
710 * could be temporarily collected in another list, or
711 * their offsets could be recorded in a small array.
712 */
713
714 /*
715 * XXX NOTE: May want to consider converting this to a page list
716 * XXX vm_map_copy interface. Need to understand object
717 * XXX coalescing implications before doing so.
718 */
719
720#define PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 720); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
\({
721MACRO_BEGIN({ \
722 vm_map_copy_t copy; \
723 register int i; \
724 register vm_page_t hp; \
725 \
726 vm_object_unlock(object); \
727 \
728 (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
729 \
730 if (object->use_old_pageout) { \
731 assert(pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN)({ if (!(pageout_action == 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 731); })
; \
732 (void) memory_object_data_write( \
733 object->pager, \
734 object->pager_request, \
735 paging_offset, \
736 (pointer_t) copy, \
737 new_offset); \
738 } \
739 else { \
740 (void) memory_object_data_return( \
741 object->pager, \
742 object->pager_request, \
743 paging_offset, \
744 (pointer_t) copy, \
745 new_offset, \
746 (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2), \
747 !should_flush); \
748 } \
749 \
750 vm_object_lock(object); \
751 \
752 for (i = 0; i < atop(new_offset)(((vm_size_t)(new_offset)) >> 12); i++) { \
753 hp = holding_pages[i]; \
754 if (hp != VM_PAGE_NULL((vm_page_t) 0)) \
755 VM_PAGE_FREE(hp)({ ; vm_page_free(hp); ; }); \
756 } \
757 \
758 new_object = VM_OBJECT_NULL((vm_object_t) 0); \})
759MACRO_END})
760
761 for (;
762 size != 0;
763 size -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12))
764 {
765 /*
766 * Limit the number of pages to be cleaned at once.
767 */
768 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
769 new_offset >= PAGE_SIZE(1 << 12) * DATA_WRITE_MAX32)
770 {
771 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 771); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
772 }
773
774 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL((vm_page_t) 0)) {
775 switch ((page_lock_result = memory_object_lock_page(m,
776 should_return,
777 should_flush,
778 prot)))
779 {
780 case MEMORY_OBJECT_LOCK_RESULT_DONE0:
781 /*
782 * End of a cluster of dirty pages.
783 */
784 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
785 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 785); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
786 continue;
787 }
788 break;
789
790 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1:
791 /*
792 * Since it is necessary to block,
793 * clean any dirty pages now.
794 */
795 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
796 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 796); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
797 continue;
798 }
799
800 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
801 vm_object_unlock(object);
802 thread_block((void (*)()) 0);
803 vm_object_lock(object);
804 continue;
805
806 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2:
807 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3:
808 /*
809 * The clean and return cases are similar.
810 *
811 * Mark the page busy since we unlock the
812 * object below.
813 */
814 m->busy = TRUE((boolean_t) 1);
815
816 /*
817 * if this would form a discontiguous block,
818 * clean the old pages and start anew.
819 *
820 * NOTE: The first time through here, new_object
821 * is null, hiding the fact that pageout_action
822 * is not initialized.
823 */
824 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
825 (last_offset != offset ||
826 pageout_action != page_lock_result)) {
827 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 827); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
828 }
829
830 vm_object_unlock(object);
831
832 /*
833 * If we have not already allocated an object
834 * for a range of pages to be written, do so
835 * now.
836 */
837 if (new_object == VM_OBJECT_NULL((vm_object_t) 0)) {
838 new_object = vm_object_allocate(original_size);
839 new_offset = 0;
840 paging_offset = m->offset +
841 object->paging_offset;
842 pageout_action = page_lock_result;
843 }
844
845 /*
846 * Move or copy the dirty page into the
847 * new object.
848 */
849 m = vm_pageout_setup(m,
850 m->offset + object->paging_offset,
851 new_object,
852 new_offset,
853 should_flush);
854
855 /*
856 * Save the holding page if there is one.
857 */
858 holding_pages[atop(new_offset)(((vm_size_t)(new_offset)) >> 12)] = m;
859 new_offset += PAGE_SIZE(1 << 12);
860 last_offset = offset + PAGE_SIZE(1 << 12);
861
862 vm_object_lock(object);
863 break;
864 }
865 break;
866 }
867 }
868
869 /*
870 * We have completed the scan for applicable pages.
871 * Clean any pages that have been saved.
872 */
873 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
874 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 874); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
875 }
876
877 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
878 vm_object_unlock(object);
879
880 /* consumes our naked send-once/send right for reply_to */
881 (void) memory_object_lock_completed(reply_to, reply_to_type,
882 object->pager_request, original_offset, original_size);
883
884 vm_object_lock(object);
885 }
886
887 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 887); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
888 vm_object_unlock(object);
889 vm_object_deallocate(object);
890
891 return (KERN_SUCCESS0);
892}
893
894kern_return_t
895memory_object_set_attributes_common(object, object_ready, may_cache,
896 copy_strategy, use_old_pageout)
897 vm_object_t object;
898 boolean_t object_ready;
899 boolean_t may_cache;
900 memory_object_copy_strategy_t copy_strategy;
901 boolean_t use_old_pageout;
902{
903 if (object == VM_OBJECT_NULL((vm_object_t) 0))
904 return(KERN_INVALID_ARGUMENT4);
905
906 /*
907 * Verify the attributes of importance
908 */
909
910 switch(copy_strategy) {
911 case MEMORY_OBJECT_COPY_NONE0:
912 case MEMORY_OBJECT_COPY_CALL1:
913 case MEMORY_OBJECT_COPY_DELAY2:
914 case MEMORY_OBJECT_COPY_TEMPORARY3:
915 break;
916 default:
917 vm_object_deallocate(object);
918 return(KERN_INVALID_ARGUMENT4);
919 }
920
921 if (object_ready)
922 object_ready = TRUE((boolean_t) 1);
923 if (may_cache)
924 may_cache = TRUE((boolean_t) 1);
925
926 vm_object_lock(object);
927
928 /*
929 * Wake up anyone waiting for the ready attribute
930 * to become asserted.
931 */
932
933 if (object_ready && !object->pager_ready) {
934 object->use_old_pageout = use_old_pageout;
935 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY)({ if ((object)->all_wanted & (1 << (1))) thread_wakeup_prim
(((event_t)(((vm_offset_t) object) + (1))), ((boolean_t) 0), 0
); (object)->all_wanted &= ~(1 << (1)); })
;
936 }
937
938 /*
939 * Copy the attributes
940 */
941
942 object->can_persist = may_cache;
943 object->pager_ready = object_ready;
944 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY3) {
945 object->temporary = TRUE((boolean_t) 1);
946 } else {
947 object->copy_strategy = copy_strategy;
948 }
949
950 vm_object_unlock(object);
951
952 vm_object_deallocate(object);
953
954 return(KERN_SUCCESS0);
955}
956
957/*
958 * XXX rpd claims that reply_to could be obviated in favor of a client
959 * XXX stub that made change_attributes an RPC. Need investigation.
960 */
961
962kern_return_t memory_object_change_attributes(object, may_cache,
963 copy_strategy, reply_to, reply_to_type)
964 vm_object_t object;
965 boolean_t may_cache;
966 memory_object_copy_strategy_t copy_strategy;
967 ipc_port_t reply_to;
968 mach_msg_type_name_t reply_to_type;
969{
970 kern_return_t result;
971
972 /*
973 * Do the work and throw away our object reference. It
974 * is important that the object reference be deallocated
975 * BEFORE sending the reply. The whole point of the reply
976 * is that it shows up after the terminate message that
977 * may be generated by setting the object uncacheable.
978 *
979 * XXX may_cache may become a tri-valued variable to handle
980 * XXX uncache if not in use.
981 */
982 result = memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
983 may_cache, copy_strategy,
984 FALSE((boolean_t) 0));
985
986 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
987
988 /* consumes our naked send-once/send right for reply_to */
989 (void) memory_object_change_completed(reply_to, reply_to_type,
990 may_cache, copy_strategy);
991
992 }
993
994 return(result);
995}
996
997kern_return_t
998memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
999 vm_object_t object;
1000 boolean_t object_ready;
1001 boolean_t may_cache;
1002 memory_object_copy_strategy_t copy_strategy;
1003{
1004 return memory_object_set_attributes_common(object, object_ready,
1005 may_cache, copy_strategy,
1006 TRUE((boolean_t) 1));
1007}
1008
1009kern_return_t memory_object_ready(object, may_cache, copy_strategy)
1010 vm_object_t object;
1011 boolean_t may_cache;
1012 memory_object_copy_strategy_t copy_strategy;
1013{
1014 return memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
1015 may_cache, copy_strategy,
1016 FALSE((boolean_t) 0));
1017}
1018
1019kern_return_t memory_object_get_attributes(object, object_ready,
1020 may_cache, copy_strategy)
1021 vm_object_t object;
1022 boolean_t *object_ready;
1023 boolean_t *may_cache;
1024 memory_object_copy_strategy_t *copy_strategy;
1025{
1026 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1027 return(KERN_INVALID_ARGUMENT4);
1028
1029 vm_object_lock(object);
1030 *may_cache = object->can_persist;
1031 *object_ready = object->pager_ready;
1032 *copy_strategy = object->copy_strategy;
1033 vm_object_unlock(object);
1034
1035 vm_object_deallocate(object);
1036
1037 return(KERN_SUCCESS0);
1038}
1039
1040/*
1041 * If successful, consumes the supplied naked send right.
1042 */
1043kern_return_t vm_set_default_memory_manager(host, default_manager)
1044 host_t host;
1045 ipc_port_t *default_manager;
1046{
1047 ipc_port_t current_manager;
1048 ipc_port_t new_manager;
1049 ipc_port_t returned_manager;
1050
1051 if (host == HOST_NULL((host_t)0))
1052 return(KERN_INVALID_HOST22);
1053
1054 new_manager = *default_manager;
1055 simple_lock(&memory_manager_default_lock);
1056 current_manager = memory_manager_default;
1057
1058 if (new_manager == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) {
1059 /*
1060 * Retrieve the current value.
1061 */
1062
1063 returned_manager = ipc_port_copy_send(current_manager);
1064 } else {
1065 /*
1066 * Retrieve the current value,
1067 * and replace it with the supplied value.
1068 * We consume the supplied naked send right.
1069 */
1070
1071 returned_manager = current_manager;
1072 memory_manager_default = new_manager;
1073
1074 /*
1075 * In case anyone's been waiting for a memory
1076 * manager to be established, wake them up.
1077 */
1078
1079 thread_wakeup((event_t) &memory_manager_default)thread_wakeup_prim(((event_t) &memory_manager_default), (
(boolean_t) 0), 0)
;
1080 }
1081
1082 simple_unlock(&memory_manager_default_lock);
1083
1084 *default_manager = returned_manager;
1085 return(KERN_SUCCESS0);
1086}
1087
1088/*
1089 * Routine: memory_manager_default_reference
1090 * Purpose:
1091 * Returns a naked send right for the default
1092 * memory manager. The returned right is always
1093 * valid (not IP_NULL or IP_DEAD).
1094 */
1095
1096ipc_port_t memory_manager_default_reference(void)
1097{
1098 ipc_port_t current_manager;
1099
1100 simple_lock(&memory_manager_default_lock);
1101
1102 while (current_manager = ipc_port_copy_send(memory_manager_default),
1103 !IP_VALID(current_manager)(((&(current_manager)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current_manager)->ip_target.ipt_object
) != ((ipc_object_t) -1)))
) {
1104 thread_sleep((event_t) &memory_manager_default,
1105 simple_lock_addr(memory_manager_default_lock)((simple_lock_t)0),
1106 FALSE((boolean_t) 0));
1107 simple_lock(&memory_manager_default_lock);
1108 }
1109
1110 simple_unlock(&memory_manager_default_lock);
1111
1112 return current_manager;
1113}
1114
1115/*
1116 * Routine: memory_manager_default_port
1117 * Purpose:
1118 * Returns true if the receiver for the port
1119 * is the default memory manager.
1120 *
1121 * This is a hack to let ds_read_done
1122 * know when it should keep memory wired.
1123 */
1124
1125boolean_t memory_manager_default_port(port)
1126 ipc_port_t port;
1127{
1128 ipc_port_t current;
1129 boolean_t result;
1130
1131 simple_lock(&memory_manager_default_lock);
1132 current = memory_manager_default;
1133 if (IP_VALID(current)(((&(current)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
1134 /*
1135 * There is no point in bothering to lock
1136 * both ports, which would be painful to do.
1137 * If the receive rights are moving around,
1138 * we might be inaccurate.
1139 */
1140
1141 result = port->ip_receiverdata.receiver == current->ip_receiverdata.receiver;
1142 } else
1143 result = FALSE((boolean_t) 0);
1144 simple_unlock(&memory_manager_default_lock);
1145
1146 return result;
1147}
1148
1149void memory_manager_default_init(void)
1150{
1151 memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
1152 simple_lock_init(&memory_manager_default_lock);
1153}