Bug Summary

File:obj-scan-build/../vm/vm_map.c
Location:line 3370, column 4
Description:Value stored to 'src_size' is never read

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm/vm_map.c
31 * Author: Avadis Tevanian, Jr., Michael Wayne Young
32 * Date: 1985
33 *
34 * Virtual memory mapping module.
35 */
36
37#include <kern/printfdb_printf.h>
38#include <mach/kern_return.h>
39#include <mach/port.h>
40#include <mach/vm_attributes.h>
41#include <mach/vm_param.h>
42#include <kern/assert.h>
43#include <kern/debug.h>
44#include <kern/kalloc.h>
45#include <kern/rbtree.h>
46#include <kern/slab.h>
47#include <vm/pmap.h>
48#include <vm/vm_fault.h>
49#include <vm/vm_map.h>
50#include <vm/vm_object.h>
51#include <vm/vm_page.h>
52#include <vm/vm_resident.h>
53#include <vm/vm_kern.h>
54#include <ipc/ipc_port.h>
55
56#if MACH_KDB1
57#include <ddb/db_output.h>
58#include <vm/vm_print.h>
59#endif /* MACH_KDB */
60
61/*
62 * Macros to copy a vm_map_entry. We must be careful to correctly
63 * manage the wired page count. vm_map_entry_copy() creates a new
64 * map entry to the same memory - the wired count in the new entry
65 * must be set to zero. vm_map_entry_copy_full() creates a new
66 * entry that is identical to the old entry. This preserves the
67 * wire count; it's used for map splitting and cache changing in
68 * vm_map_copyout.
69 */
70#define vm_map_entry_copy(NEW,OLD)({ *(NEW) = *(OLD); (NEW)->is_shared = ((boolean_t) 0); (NEW
)->needs_wakeup = ((boolean_t) 0); (NEW)->in_transition
= ((boolean_t) 0); (NEW)->wired_count = 0; (NEW)->user_wired_count
= 0; })
\({
71MACRO_BEGIN({ \
72 *(NEW) = *(OLD); \
73 (NEW)->is_shared = FALSE((boolean_t) 0); \
74 (NEW)->needs_wakeup = FALSE((boolean_t) 0); \
75 (NEW)->in_transition = FALSE((boolean_t) 0); \
76 (NEW)->wired_count = 0; \
77 (NEW)->user_wired_count = 0; \})
78MACRO_END})
79
80#define vm_map_entry_copy_full(NEW,OLD)(*(NEW) = *(OLD)) (*(NEW) = *(OLD))
81
82/*
83 * Virtual memory maps provide for the mapping, protection,
84 * and sharing of virtual memory objects. In addition,
85 * this module provides for an efficient virtual copy of
86 * memory from one map to another.
87 *
88 * Synchronization is required prior to most operations.
89 *
90 * Maps consist of an ordered doubly-linked list of simple
91 * entries; a hint and a red-black tree are used to speed up lookups.
92 *
93 * Sharing maps have been deleted from this version of Mach.
94 * All shared objects are now mapped directly into the respective
95 * maps. This requires a change in the copy on write strategy;
96 * the asymmetric (delayed) strategy is used for shared temporary
97 * objects instead of the symmetric (shadow) strategy. This is
98 * selected by the (new) use_shared_copy bit in the object. See
99 * vm_object_copy_temporary in vm_object.c for details. All maps
100 * are now "top level" maps (either task map, kernel map or submap
101 * of the kernel map).
102 *
103 * Since portions of maps are specified by start/end addreses,
104 * which may not align with existing map entries, all
105 * routines merely "clip" entries to these start/end values.
106 * [That is, an entry is split into two, bordering at a
107 * start or end value.] Note that these clippings may not
108 * always be necessary (as the two resulting entries are then
109 * not changed); however, the clipping is done for convenience.
110 * No attempt is currently made to "glue back together" two
111 * abutting entries.
112 *
113 * The symmetric (shadow) copy strategy implements virtual copy
114 * by copying VM object references from one map to
115 * another, and then marking both regions as copy-on-write.
116 * It is important to note that only one writeable reference
117 * to a VM object region exists in any map when this strategy
118 * is used -- this means that shadow object creation can be
119 * delayed until a write operation occurs. The asymmetric (delayed)
120 * strategy allows multiple maps to have writeable references to
121 * the same region of a vm object, and hence cannot delay creating
122 * its copy objects. See vm_object_copy_temporary() in vm_object.c.
123 * Copying of permanent objects is completely different; see
124 * vm_object_copy_strategically() in vm_object.c.
125 */
126
127struct kmem_cache vm_map_cache; /* cache for vm_map structures */
128struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
129struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
130struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
131
132/*
133 * Placeholder object for submap operations. This object is dropped
134 * into the range by a call to vm_map_find, and removed when
135 * vm_map_submap creates the submap.
136 */
137
138static struct vm_object vm_submap_object_store;
139vm_object_t vm_submap_object = &vm_submap_object_store;
140
141/*
142 * vm_map_init:
143 *
144 * Initialize the vm_map module. Must be called before
145 * any other vm_map routines.
146 *
147 * Map and entry structures are allocated from caches -- we must
148 * initialize those caches.
149 *
150 * There are three caches of interest:
151 *
152 * vm_map_cache: used to allocate maps.
153 * vm_map_entry_cache: used to allocate map entries.
154 * vm_map_kentry_cache: used to allocate map entries for the kernel.
155 *
156 * Kernel map entries are allocated from a special cache, using a custom
157 * page allocation function to avoid recursion. It would be difficult
158 * (perhaps impossible) for the kernel to allocate more memory to an entry
159 * cache when it became empty since the very act of allocating memory
160 * implies the creation of a new entry.
161 */
162
163vm_offset_t kentry_data;
164vm_size_t kentry_data_size = KENTRY_DATA_SIZE(256*(1 << 12));
165
166static vm_offset_t kentry_pagealloc(vm_size_t size)
167{
168 vm_offset_t result;
169
170 if (size > kentry_data_size)
171 panic("vm_map: kentry memory exhausted");
172
173 result = kentry_data;
174 kentry_data += size;
175 kentry_data_size -= size;
176 return result;
177}
178
179void vm_map_init(void)
180{
181 kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
182 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
183 kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
184 sizeof(struct vm_map_entry), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
185 kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
186 sizeof(struct vm_map_entry), 0, NULL((void *) 0), kentry_pagealloc,
187 NULL((void *) 0), KMEM_CACHE_NOCPUPOOL0x1 | KMEM_CACHE_NOOFFSLAB0x2
188 | KMEM_CACHE_NORECLAIM0x4);
189 kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
190 sizeof(struct vm_map_copy), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
191
192 /*
193 * Submap object is initialized by vm_object_init.
194 */
195}
196
197void vm_map_setup(map, pmap, min, max, pageable)
198 vm_map_t map;
199 pmap_t pmap;
200 vm_offset_t min, max;
201 boolean_t pageable;
202{
203 vm_map_first_entry(map)((map)->hdr.links.next) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
204 vm_map_last_entry(map)((map)->hdr.links.prev) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
205 map->hdr.nentries = 0;
206 map->hdr.entries_pageable = pageable;
207 rbtree_init(&map->hdr.tree);
208
209 map->size = 0;
210 map->ref_count = 1;
211 map->pmap = pmap;
212 map->min_offsethdr.links.start = min;
213 map->max_offsethdr.links.end = max;
214 map->wiring_required = FALSE((boolean_t) 0);
215 map->wait_for_space = FALSE((boolean_t) 0);
216 map->first_free = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
217 map->hint = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
218 vm_map_lock_init(map)({ lock_init(&(map)->lock, ((boolean_t) 1)); (map)->
timestamp = 0; })
;
219 simple_lock_init(&map->ref_lock);
220 simple_lock_init(&map->hint_lock);
221}
222
223/*
224 * vm_map_create:
225 *
226 * Creates and returns a new empty VM map with
227 * the given physical map structure, and having
228 * the given lower and upper address bounds.
229 */
230vm_map_t vm_map_create(pmap, min, max, pageable)
231 pmap_t pmap;
232 vm_offset_t min, max;
233 boolean_t pageable;
234{
235 vm_map_t result;
236
237 result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
238 if (result == VM_MAP_NULL((vm_map_t) 0))
239 panic("vm_map_create");
240
241 vm_map_setup(result, pmap, min, max, pageable);
242
243 return(result);
244}
245
246/*
247 * vm_map_entry_create: [ internal use only ]
248 *
249 * Allocates a VM map entry for insertion in the
250 * given map (or map copy). No fields are filled.
251 */
252#define vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr) \
253 _vm_map_entry_create(&(map)->hdr)
254
255#define vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr) \
256 _vm_map_entry_create(&(copy)->cpy_hdrc_u.hdr)
257
258vm_map_entry_t _vm_map_entry_create(map_header)
259 const struct vm_map_header *map_header;
260{
261 kmem_cache_t cache;
262 vm_map_entry_t entry;
263
264 if (map_header->entries_pageable)
265 cache = &vm_map_entry_cache;
266 else
267 cache = &vm_map_kentry_cache;
268
269 entry = (vm_map_entry_t) kmem_cache_alloc(cache);
270 if (entry == VM_MAP_ENTRY_NULL((vm_map_entry_t) 0))
271 panic("vm_map_entry_create");
272
273 return(entry);
274}
275
276/*
277 * vm_map_entry_dispose: [ internal use only ]
278 *
279 * Inverse of vm_map_entry_create.
280 */
281#define vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)) \
282 _vm_map_entry_dispose(&(map)->hdr, (entry))
283
284#define vm_map_copy_entry_dispose(map, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)) \
285 _vm_map_entry_dispose(&(copy)->cpy_hdrc_u.hdr, (entry))
286
287void _vm_map_entry_dispose(map_header, entry)
288 const struct vm_map_header *map_header;
289 vm_map_entry_t entry;
290{
291 kmem_cache_t cache;
292
293 if (map_header->entries_pageable)
294 cache = &vm_map_entry_cache;
295 else
296 cache = &vm_map_kentry_cache;
297
298 kmem_cache_free(cache, (vm_offset_t) entry);
299}
300
301/*
302 * Red-black tree lookup/insert comparison functions
303 */
304static inline int vm_map_entry_cmp_lookup(vm_offset_t addr,
305 const struct rbtree_node *node)
306{
307 struct vm_map_entry *entry;
308
309 entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct
vm_map_entry, tree_node)))
;
310
311 if (addr < entry->vme_startlinks.start)
312 return -1;
313 else if (addr < entry->vme_endlinks.end)
314 return 0;
315 else
316 return 1;
317}
318
319static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
320 const struct rbtree_node *b)
321{
322 struct vm_map_entry *entry;
323
324 entry = rbtree_entry(a, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)a - __builtin_offsetof (struct
vm_map_entry, tree_node)))
;
325 return vm_map_entry_cmp_lookup(entry->vme_startlinks.start, b);
326}
327
328/*
329 * vm_map_entry_{un,}link:
330 *
331 * Insert/remove entries from maps (or map copies).
332 *
333 * The start and end addresses of the entries must be properly set
334 * before using these macros.
335 */
336#define vm_map_entry_link(map, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev
= (after_where); (entry)->links.next = (after_where)->
links.next; (entry)->links.prev->links.next = (entry)->
links.next->links.prev = (entry); ({ struct rbtree_node *___cur
, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index
= -1; ___cur = (&(&(map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(entry)->tree_node, ___cur); ({ if (!(___diff != 0))
Assert("___diff != 0", "../vm/vm_map.c", 336); }); ___prev =
___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(map
)->hdr)->tree, ___prev, ___index, &(entry)->tree_node
); }); })
\
337 _vm_map_entry_link(&(map)->hdr, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev
= (after_where); (entry)->links.next = (after_where)->
links.next; (entry)->links.prev->links.next = (entry)->
links.next->links.prev = (entry); ({ struct rbtree_node *___cur
, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index
= -1; ___cur = (&(&(map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(entry)->tree_node, ___cur); ({ if (!(___diff != 0))
Assert("___diff != 0", "../vm/vm_map.c", 337); }); ___prev =
___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(map
)->hdr)->tree, ___prev, ___index, &(entry)->tree_node
); }); })
338
339#define vm_map_copy_entry_link(copy, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links
.prev = (after_where); (entry)->links.next = (after_where)
->links.next; (entry)->links.prev->links.next = (entry
)->links.next->links.prev = (entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 339); }); ___prev = ___cur; ___index = rbtree_d2i(___diff);
___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(entry)->tree_node); }); })
\
340 _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links
.prev = (after_where); (entry)->links.next = (after_where)
->links.next; (entry)->links.prev->links.next = (entry
)->links.next->links.prev = (entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 340); }); ___prev = ___cur; ___index = rbtree_d2i(___diff);
___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(entry)->tree_node); }); })
341
342#define _vm_map_entry_link(hdr, after_where, entry)({ (hdr)->nentries++; (entry)->links.prev = (after_where
); (entry)->links.next = (after_where)->links.next; (entry
)->links.prev->links.next = (entry)->links.next->
links.prev = (entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(hdr)->tree)->root; while (___cur != (
(void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)
->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0"
, "../vm/vm_map.c", 342); }); ___prev = ___cur; ___index = rbtree_d2i
(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(hdr)->tree, ___prev, ___index, &(entry)->tree_node
); }); })
\
343 MACRO_BEGIN({ \
344 (hdr)->nentries++; \
345 (entry)->vme_prevlinks.prev = (after_where); \
346 (entry)->vme_nextlinks.next = (after_where)->vme_nextlinks.next; \
347 (entry)->vme_prevlinks.prev->vme_nextlinks.next = \
348 (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry); \
349 rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 350); }); ___prev = ___cur; ___index = rbtree_d2i(___diff);
___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(hdr)->tree, ___prev, ___index, &(entry)->tree_node
); })
350 vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 350); }); ___prev = ___cur; ___index = rbtree_d2i(___diff);
___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(hdr)->tree, ___prev, ___index, &(entry)->tree_node
); })
; \
351 MACRO_END})
352
353#define vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next
->links.prev = (entry)->links.prev; (entry)->links.prev
->links.next = (entry)->links.next; rbtree_remove(&
(&(map)->hdr)->tree, &(entry)->tree_node); }
)
\
354 _vm_map_entry_unlink(&(map)->hdr, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next
->links.prev = (entry)->links.prev; (entry)->links.prev
->links.next = (entry)->links.next; rbtree_remove(&
(&(map)->hdr)->tree, &(entry)->tree_node); }
)
355
356#define vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links
.next->links.prev = (entry)->links.prev; (entry)->links
.prev->links.next = (entry)->links.next; rbtree_remove(
&(&(copy)->c_u.hdr)->tree, &(entry)->tree_node
); })
\
357 _vm_map_entry_unlink(&(copy)->cpy_hdr, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links
.next->links.prev = (entry)->links.prev; (entry)->links
.prev->links.next = (entry)->links.next; rbtree_remove(
&(&(copy)->c_u.hdr)->tree, &(entry)->tree_node
); })
358
359#define _vm_map_entry_unlink(hdr, entry)({ (hdr)->nentries--; (entry)->links.next->links.prev
= (entry)->links.prev; (entry)->links.prev->links.next
= (entry)->links.next; rbtree_remove(&(hdr)->tree,
&(entry)->tree_node); })
\
360 MACRO_BEGIN({ \
361 (hdr)->nentries--; \
362 (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry)->vme_prevlinks.prev; \
363 (entry)->vme_prevlinks.prev->vme_nextlinks.next = (entry)->vme_nextlinks.next; \
364 rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \
365 MACRO_END})
366
367/*
368 * vm_map_reference:
369 *
370 * Creates another valid reference to the given map.
371 *
372 */
373void vm_map_reference(map)
374 vm_map_t map;
375{
376 if (map == VM_MAP_NULL((vm_map_t) 0))
377 return;
378
379 simple_lock(&map->ref_lock);
380 map->ref_count++;
381 simple_unlock(&map->ref_lock)((void)(&map->ref_lock));
382}
383
384/*
385 * vm_map_deallocate:
386 *
387 * Removes a reference from the specified map,
388 * destroying it if no references remain.
389 * The map should not be locked.
390 */
391void vm_map_deallocate(map)
392 vm_map_t map;
393{
394 int c;
395
396 if (map == VM_MAP_NULL((vm_map_t) 0))
397 return;
398
399 simple_lock(&map->ref_lock);
400 c = --map->ref_count;
401 simple_unlock(&map->ref_lock)((void)(&map->ref_lock));
402
403 if (c > 0) {
404 return;
405 }
406
407 projected_buffer_collect(map);
408 (void) vm_map_delete(map, map->min_offsethdr.links.start, map->max_offsethdr.links.end);
409
410 pmap_destroy(map->pmap);
411
412 kmem_cache_free(&vm_map_cache, (vm_offset_t) map);
413}
414
415/*
416 * SAVE_HINT:
417 *
418 * Saves the specified entry as the hint for
419 * future lookups. Performs necessary interlocks.
420 */
421#define SAVE_HINT(map,value); (map)->hint = (value); ((void)(&(map)->hint_lock)
);
\
422 simple_lock(&(map)->hint_lock); \
423 (map)->hint = (value); \
424 simple_unlock(&(map)->hint_lock)((void)(&(map)->hint_lock));
425
426/*
427 * vm_map_lookup_entry: [ internal use only ]
428 *
429 * Finds the map entry containing (or
430 * immediately preceding) the specified address
431 * in the given map; the entry is returned
432 * in the "entry" parameter. The boolean
433 * result indicates whether the address is
434 * actually contained in the map.
435 */
436boolean_t vm_map_lookup_entry(map, address, entry)
437 vm_map_t map;
438 vm_offset_t address;
439 vm_map_entry_t *entry; /* OUT */
440{
441 struct rbtree_node *node;
442 vm_map_entry_t hint;
443
444 /*
445 * First, make a quick check to see if we are already
446 * looking at the entry we want (which is often the case).
447 */
448
449 simple_lock(&map->hint_lock);
450 hint = map->hint;
451 simple_unlock(&map->hint_lock)((void)(&map->hint_lock));
452
453 if ((hint != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (address >= hint->vme_startlinks.start)) {
454 if (address < hint->vme_endlinks.end) {
455 *entry = hint;
456 return(TRUE((boolean_t) 1));
457 } else {
458 vm_map_entry_t next = hint->vme_nextlinks.next;
459
460 if ((next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links))
461 || (address < next->vme_startlinks.start)) {
462 *entry = hint;
463 return(FALSE((boolean_t) 0));
464 }
465 }
466 }
467
468 /*
469 * If the hint didn't help, use the red-black tree.
470 */
471
472 node = rbtree_lookup_nearest(&map->hdr.tree, address,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&map->
hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff =
vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break
; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur
->children[___index]; } if (___cur == ((void *) 0)) ___cur
= rbtree_nearest(___prev, ___index, 0); ___cur; })
473 vm_map_entry_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&map->
hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff =
vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break
; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur
->children[___index]; } if (___cur == ((void *) 0)) ___cur
= rbtree_nearest(___prev, ___index, 0); ___cur; })
;
474
475 if (node == NULL((void *) 0)) {
476 *entry = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
477 SAVE_HINT(map, *entry); (map)->hint = (*entry); ((void)(&(map)->hint_lock
));
;
478 return(FALSE((boolean_t) 0));
479 } else {
480 *entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct
vm_map_entry, tree_node)))
;
481 SAVE_HINT(map, *entry); (map)->hint = (*entry); ((void)(&(map)->hint_lock
));
;
482 return((address < (*entry)->vme_endlinks.end) ? TRUE((boolean_t) 1) : FALSE((boolean_t) 0));
483 }
484}
485
486/*
487 * Routine: invalid_user_access
488 *
489 * Verifies whether user access is valid.
490 */
491
492boolean_t
493invalid_user_access(map, start, end, prot)
494 vm_map_t map;
495 vm_offset_t start, end;
496 vm_prot_t prot;
497{
498 vm_map_entry_t entry;
499
500 return (map == VM_MAP_NULL((vm_map_t) 0) || map == kernel_map ||
501 !vm_map_lookup_entry(map, start, &entry) ||
502 entry->vme_endlinks.end < end ||
503 (prot & ~(entry->protection)));
504}
505
506
507/*
508 * Routine: vm_map_find_entry
509 * Purpose:
510 * Allocate a range in the specified virtual address map,
511 * returning the entry allocated for that range.
512 * Used by kmem_alloc, etc. Returns wired entries.
513 *
514 * The map must be locked.
515 *
516 * If an entry is allocated, the object/offset fields
517 * are initialized to zero. If an object is supplied,
518 * then an existing entry may be extended.
519 */
520kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
521 vm_map_t map;
522 vm_offset_t *address; /* OUT */
523 vm_size_t size;
524 vm_offset_t mask;
525 vm_object_t object;
526 vm_map_entry_t *o_entry; /* OUT */
527{
528 vm_map_entry_t entry, new_entry;
529 vm_offset_t start;
530 vm_offset_t end;
531
532 /*
533 * Look for the first possible address;
534 * if there's already something at this
535 * address, we have to start after it.
536 */
537
538 if ((entry = map->first_free) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links))
539 start = map->min_offsethdr.links.start;
540 else
541 start = entry->vme_endlinks.end;
542
543 /*
544 * In any case, the "entry" always precedes
545 * the proposed new region throughout the loop:
546 */
547
548 while (TRUE((boolean_t) 1)) {
549 vm_map_entry_t next;
550
551 /*
552 * Find the end of the proposed new region.
553 * Be sure we didn't go beyond the end, or
554 * wrap around the address.
555 */
556
557 if (((start + mask) & ~mask) < start) {
558 printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_find_entry in %p\n"
, map); __once = 1; } })
;
559 return(KERN_NO_SPACE3);
560 }
561 start = ((start + mask) & ~mask);
562 end = start + size;
563
564 if ((end > map->max_offsethdr.links.end) || (end < start)) {
565 printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_find_entry in %p\n"
, map); __once = 1; } })
;
566 return(KERN_NO_SPACE3);
567 }
568
569 /*
570 * If there are no more entries, we must win.
571 */
572
573 next = entry->vme_nextlinks.next;
574 if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links))
575 break;
576
577 /*
578 * If there is another entry, it must be
579 * after the end of the potential new region.
580 */
581
582 if (next->vme_startlinks.start >= end)
583 break;
584
585 /*
586 * Didn't fit -- move to the next entry.
587 */
588
589 entry = next;
590 start = entry->vme_endlinks.end;
591 }
592
593 /*
594 * At this point,
595 * "start" and "end" should define the endpoints of the
596 * available new range, and
597 * "entry" should refer to the region before the new
598 * range, and
599 *
600 * the map should be locked.
601 */
602
603 *address = start;
604
605 /*
606 * See whether we can avoid creating a new entry by
607 * extending one of our neighbors. [So far, we only attempt to
608 * extend from below.]
609 */
610
611 if ((object != VM_OBJECT_NULL((vm_object_t) 0)) &&
612 (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
613 (entry->vme_endlinks.end == start) &&
614 (!entry->is_shared) &&
615 (!entry->is_sub_map) &&
616 (entry->object.vm_object == object) &&
617 (entry->needs_copy == FALSE((boolean_t) 0)) &&
618 (entry->inheritance == VM_INHERIT_DEFAULT((vm_inherit_t) 1)) &&
619 (entry->protection == VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02))) &&
620 (entry->max_protection == VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) &&
621 (entry->wired_count == 1) &&
622 (entry->user_wired_count == 0) &&
623 (entry->projected_on == 0)) {
624 /*
625 * Because this is a special case,
626 * we don't need to use vm_object_coalesce.
627 */
628
629 entry->vme_endlinks.end = end;
630 new_entry = entry;
631 } else {
632 new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr);
633
634 new_entry->vme_startlinks.start = start;
635 new_entry->vme_endlinks.end = end;
636
637 new_entry->is_shared = FALSE((boolean_t) 0);
638 new_entry->is_sub_map = FALSE((boolean_t) 0);
639 new_entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0);
640 new_entry->offset = (vm_offset_t) 0;
641
642 new_entry->needs_copy = FALSE((boolean_t) 0);
643
644 new_entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1);
645 new_entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02));
646 new_entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
647 new_entry->wired_count = 1;
648 new_entry->user_wired_count = 0;
649
650 new_entry->in_transition = FALSE((boolean_t) 0);
651 new_entry->needs_wakeup = FALSE((boolean_t) 0);
652 new_entry->projected_on = 0;
653
654 /*
655 * Insert the new entry into the list
656 */
657
658 vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links
.prev = (entry); (new_entry)->links.next = (entry)->links
.next; (new_entry)->links.prev->links.next = (new_entry
)->links.next->links.prev = (new_entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(map)->hdr)->
tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 658); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
;
659 }
660
661 map->size += size;
662
663 /*
664 * Update the free space hint and the lookup hint
665 */
666
667 map->first_free = new_entry;
668 SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ((void)(&(map)->hint_lock
));
;
669
670 *o_entry = new_entry;
671 return(KERN_SUCCESS0);
672}
673
674boolean_t vm_map_pmap_enter_print = FALSE((boolean_t) 0);
675boolean_t vm_map_pmap_enter_enable = FALSE((boolean_t) 0);
676
677/*
678 * Routine: vm_map_pmap_enter
679 *
680 * Description:
681 * Force pages from the specified object to be entered into
682 * the pmap at the specified address if they are present.
683 * As soon as a page not found in the object the scan ends.
684 *
685 * Returns:
686 * Nothing.
687 *
688 * In/out conditions:
689 * The source map should not be locked on entry.
690 */
691void
692vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
693 vm_map_t map;
694 vm_offset_t addr;
695 vm_offset_t end_addr;
696 vm_object_t object;
697 vm_offset_t offset;
698 vm_prot_t protection;
699{
700 while (addr < end_addr) {
701 vm_page_t m;
702
703 vm_object_lock(object);
704 vm_object_paging_begin(object)((object)->paging_in_progress++);
705
706 m = vm_page_lookup(object, offset);
707 if (m == VM_PAGE_NULL((vm_page_t) 0) || m->absent) {
708 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_map.c", 708); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
709 vm_object_unlock(object)((void)(&(object)->Lock));
710 return;
711 }
712
713 if (vm_map_pmap_enter_print) {
714 printfdb_printf("vm_map_pmap_enter:");
715 printfdb_printf("map: %p, addr: %lx, object: %p, offset: %lx\n",
716 map, addr, object, offset);
717 }
718
719 m->busy = TRUE((boolean_t) 1);
720 vm_object_unlock(object)((void)(&(object)->Lock));
721
722 PMAP_ENTER(map->pmap, addr, m,({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection
) & ~(m)->page_lock, (((boolean_t) 0)) ); })
723 protection, FALSE)({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection
) & ~(m)->page_lock, (((boolean_t) 0)) ); })
;
724
725 vm_object_lock(object);
726 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
727 vm_page_lock_queues();
728 if (!m->active && !m->inactive)
729 vm_page_activate(m);
730 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
731 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_map.c", 731); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
732 vm_object_unlock(object)((void)(&(object)->Lock));
733
734 offset += PAGE_SIZE(1 << 12);
735 addr += PAGE_SIZE(1 << 12);
736 }
737}
738
739/*
740 * Routine: vm_map_enter
741 *
742 * Description:
743 * Allocate a range in the specified virtual address map.
744 * The resulting range will refer to memory defined by
745 * the given memory object and offset into that object.
746 *
747 * Arguments are as defined in the vm_map call.
748 */
749kern_return_t vm_map_enter(
750 map,
751 address, size, mask, anywhere,
752 object, offset, needs_copy,
753 cur_protection, max_protection, inheritance)
754 vm_map_t map;
755 vm_offset_t *address; /* IN/OUT */
756 vm_size_t size;
757 vm_offset_t mask;
758 boolean_t anywhere;
759 vm_object_t object;
760 vm_offset_t offset;
761 boolean_t needs_copy;
762 vm_prot_t cur_protection;
763 vm_prot_t max_protection;
764 vm_inherit_t inheritance;
765{
766 vm_map_entry_t entry;
767 vm_offset_t start;
768 vm_offset_t end;
769 kern_return_t result = KERN_SUCCESS0;
770
771#define RETURN(value) { result = value; goto BailOut; }
772
773 if (size == 0)
774 return KERN_INVALID_ARGUMENT4;
775
776 StartAgain: ;
777
778 start = *address;
779
780 if (anywhere) {
781 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
782
783 /*
784 * Calculate the first possible address.
785 */
786
787 if (start < map->min_offsethdr.links.start)
788 start = map->min_offsethdr.links.start;
789 if (start > map->max_offsethdr.links.end)
790 RETURN(KERN_NO_SPACE3);
791
792 /*
793 * Look for the first possible address;
794 * if there's already something at this
795 * address, we have to start after it.
796 */
797
798 if (start == map->min_offsethdr.links.start) {
799 if ((entry = map->first_free) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links))
800 start = entry->vme_endlinks.end;
801 } else {
802 vm_map_entry_t tmp_entry;
803 if (vm_map_lookup_entry(map, start, &tmp_entry))
804 start = tmp_entry->vme_endlinks.end;
805 entry = tmp_entry;
806 }
807
808 /*
809 * In any case, the "entry" always precedes
810 * the proposed new region throughout the
811 * loop:
812 */
813
814 while (TRUE((boolean_t) 1)) {
815 vm_map_entry_t next;
816
817 /*
818 * Find the end of the proposed new region.
819 * Be sure we didn't go beyond the end, or
820 * wrap around the address.
821 */
822
823 if (((start + mask) & ~mask) < start) {
824 printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_enter in %p\n"
, map); __once = 1; } })
;
825 RETURN(KERN_NO_SPACE3);
826 }
827 start = ((start + mask) & ~mask);
828 end = start + size;
829
830 if ((end > map->max_offsethdr.links.end) || (end < start)) {
831 if (map->wait_for_space) {
832 if (size <= (map->max_offsethdr.links.end -
833 map->min_offsethdr.links.start)) {
834 assert_wait((event_t) map, TRUE((boolean_t) 1));
835 vm_map_unlock(map)lock_done(&(map)->lock);
836 thread_block((void (*)()) 0);
837 goto StartAgain;
838 }
839 }
840
841 printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_enter in %p\n"
, map); __once = 1; } })
;
842 RETURN(KERN_NO_SPACE3);
843 }
844
845 /*
846 * If there are no more entries, we must win.
847 */
848
849 next = entry->vme_nextlinks.next;
850 if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links))
851 break;
852
853 /*
854 * If there is another entry, it must be
855 * after the end of the potential new region.
856 */
857
858 if (next->vme_startlinks.start >= end)
859 break;
860
861 /*
862 * Didn't fit -- move to the next entry.
863 */
864
865 entry = next;
866 start = entry->vme_endlinks.end;
867 }
868 *address = start;
869 } else {
870 vm_map_entry_t temp_entry;
871
872 /*
873 * Verify that:
874 * the address doesn't itself violate
875 * the mask requirement.
876 */
877
878 if ((start & mask) != 0)
879 return(KERN_NO_SPACE3);
880
881 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
882
883 /*
884 * ... the address is within bounds
885 */
886
887 end = start + size;
888
889 if ((start < map->min_offsethdr.links.start) ||
890 (end > map->max_offsethdr.links.end) ||
891 (start >= end)) {
892 RETURN(KERN_INVALID_ADDRESS1);
893 }
894
895 /*
896 * ... the starting address isn't allocated
897 */
898
899 if (vm_map_lookup_entry(map, start, &temp_entry))
900 RETURN(KERN_NO_SPACE3);
901
902 entry = temp_entry;
903
904 /*
905 * ... the next region doesn't overlap the
906 * end point.
907 */
908
909 if ((entry->vme_nextlinks.next != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
910 (entry->vme_nextlinks.next->vme_startlinks.start < end))
911 RETURN(KERN_NO_SPACE3);
912 }
913
914 /*
915 * At this point,
916 * "start" and "end" should define the endpoints of the
917 * available new range, and
918 * "entry" should refer to the region before the new
919 * range, and
920 *
921 * the map should be locked.
922 */
923
924 /*
925 * See whether we can avoid creating a new entry (and object) by
926 * extending one of our neighbors. [So far, we only attempt to
927 * extend from below.]
928 */
929
930 if ((object == VM_OBJECT_NULL((vm_object_t) 0)) &&
931 (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
932 (entry->vme_endlinks.end == start) &&
933 (!entry->is_shared) &&
934 (!entry->is_sub_map) &&
935 (entry->inheritance == inheritance) &&
936 (entry->protection == cur_protection) &&
937 (entry->max_protection == max_protection) &&
938 (entry->wired_count == 0) && /* implies user_wired_count == 0 */
939 (entry->projected_on == 0)) {
940 if (vm_object_coalesce(entry->object.vm_object,
941 VM_OBJECT_NULL((vm_object_t) 0),
942 entry->offset,
943 (vm_offset_t) 0,
944 (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start),
945 (vm_size_t)(end - entry->vme_endlinks.end))) {
946
947 /*
948 * Coalesced the two objects - can extend
949 * the previous map entry to include the
950 * new range.
951 */
952 map->size += (end - entry->vme_endlinks.end);
953 entry->vme_endlinks.end = end;
954 RETURN(KERN_SUCCESS0);
955 }
956 }
957
958 /*
959 * Create a new entry
960 */
961
962 /**/ {
963 vm_map_entry_t new_entry;
964
965 new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr);
966
967 new_entry->vme_startlinks.start = start;
968 new_entry->vme_endlinks.end = end;
969
970 new_entry->is_shared = FALSE((boolean_t) 0);
971 new_entry->is_sub_map = FALSE((boolean_t) 0);
972 new_entry->object.vm_object = object;
973 new_entry->offset = offset;
974
975 new_entry->needs_copy = needs_copy;
976
977 new_entry->inheritance = inheritance;
978 new_entry->protection = cur_protection;
979 new_entry->max_protection = max_protection;
980 new_entry->wired_count = 0;
981 new_entry->user_wired_count = 0;
982
983 new_entry->in_transition = FALSE((boolean_t) 0);
984 new_entry->needs_wakeup = FALSE((boolean_t) 0);
985 new_entry->projected_on = 0;
986
987 /*
988 * Insert the new entry into the list
989 */
990
991 vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links
.prev = (entry); (new_entry)->links.next = (entry)->links
.next; (new_entry)->links.prev->links.next = (new_entry
)->links.next->links.prev = (new_entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(map)->hdr)->
tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 991); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
;
992 map->size += size;
993
994 /*
995 * Update the free space hint and the lookup hint
996 */
997
998 if ((map->first_free == entry) &&
999 ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) ? map->min_offsethdr.links.start : entry->vme_endlinks.end)
1000 >= new_entry->vme_startlinks.start))
1001 map->first_free = new_entry;
1002
1003 SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ((void)(&(map)->hint_lock
));
;
1004
1005 vm_map_unlock(map)lock_done(&(map)->lock);
1006
1007 if ((object != VM_OBJECT_NULL((vm_object_t) 0)) &&
1008 (vm_map_pmap_enter_enable) &&
1009 (!anywhere) &&
1010 (!needs_copy) &&
1011 (size < (128*1024))) {
1012 vm_map_pmap_enter(map, start, end,
1013 object, offset, cur_protection);
1014 }
1015
1016 return(result);
1017 /**/ }
1018
1019 BailOut: ;
1020
1021 vm_map_unlock(map)lock_done(&(map)->lock);
1022 return(result);
1023
1024#undef RETURN
1025}
1026
1027/*
1028 * vm_map_clip_start: [ internal use only ]
1029 *
1030 * Asserts that the given entry begins at or after
1031 * the specified address; if necessary,
1032 * it splits the entry into two.
1033 */
1034#define vm_map_clip_start(map, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(startaddr)); })
\
1035 MACRO_BEGIN({ \
1036 if ((startaddr) > (entry)->vme_startlinks.start) \
1037 _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \
1038 MACRO_END})
1039
1040#define vm_map_copy_clip_start(copy, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start
(&(copy)->c_u.hdr,(entry),(startaddr)); })
\
1041 MACRO_BEGIN({ \
1042 if ((startaddr) > (entry)->vme_startlinks.start) \
1043 _vm_map_clip_start(&(copy)->cpy_hdrc_u.hdr,(entry),(startaddr)); \
1044 MACRO_END})
1045
1046/*
1047 * This routine is called only when it is known that
1048 * the entry must be split.
1049 */
1050void _vm_map_clip_start(map_header, entry, start)
1051 struct vm_map_header *map_header;
1052 vm_map_entry_t entry;
1053 vm_offset_t start;
1054{
1055 vm_map_entry_t new_entry;
1056
1057 /*
1058 * Split off the front portion --
1059 * note that we must insert the new
1060 * entry BEFORE this one, so that
1061 * this entry has the specified starting
1062 * address.
1063 */
1064
1065 new_entry = _vm_map_entry_create(map_header);
1066 vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry));
1067
1068 new_entry->vme_endlinks.end = start;
1069 entry->offset += (start - entry->vme_startlinks.start);
1070 entry->vme_startlinks.start = start;
1071
1072 _vm_map_entry_link(map_header, entry->vme_prev, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = (
entry->links.prev); (new_entry)->links.next = (entry->
links.prev)->links.next; (new_entry)->links.prev->links
.next = (new_entry)->links.next->links.prev = (new_entry
); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header
)->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 1072); }); ___prev = ___cur; ___index = rbtree_d2i(___diff)
; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(map_header)->tree, ___prev, ___index, &(new_entry
)->tree_node); }); })
;
1073
1074 if (entry->is_sub_map)
1075 vm_map_reference(new_entry->object.sub_map);
1076 else
1077 vm_object_reference(new_entry->object.vm_object);
1078}
1079
1080/*
1081 * vm_map_clip_end: [ internal use only ]
1082 *
1083 * Asserts that the given entry ends at or before
1084 * the specified address; if necessary,
1085 * it splits the entry into two.
1086 */
1087#define vm_map_clip_end(map, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end
(&(map)->hdr,(entry),(endaddr)); })
\
1088 MACRO_BEGIN({ \
1089 if ((endaddr) < (entry)->vme_endlinks.end) \
1090 _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \
1091 MACRO_END})
1092
1093#define vm_map_copy_clip_end(copy, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end
(&(copy)->c_u.hdr,(entry),(endaddr)); })
\
1094 MACRO_BEGIN({ \
1095 if ((endaddr) < (entry)->vme_endlinks.end) \
1096 _vm_map_clip_end(&(copy)->cpy_hdrc_u.hdr,(entry),(endaddr)); \
1097 MACRO_END})
1098
1099/*
1100 * This routine is called only when it is known that
1101 * the entry must be split.
1102 */
1103void _vm_map_clip_end(map_header, entry, end)
1104 struct vm_map_header *map_header;
1105 vm_map_entry_t entry;
1106 vm_offset_t end;
1107{
1108 vm_map_entry_t new_entry;
1109
1110 /*
1111 * Create a new entry and insert it
1112 * AFTER the specified entry
1113 */
1114
1115 new_entry = _vm_map_entry_create(map_header);
1116 vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry));
1117
1118 new_entry->vme_startlinks.start = entry->vme_endlinks.end = end;
1119 new_entry->offset += (end - entry->vme_startlinks.start);
1120
1121 _vm_map_entry_link(map_header, entry, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = (
entry); (new_entry)->links.next = (entry)->links.next; (
new_entry)->links.prev->links.next = (new_entry)->links
.next->links.prev = (new_entry); ({ struct rbtree_node *___cur
, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index
= -1; ___cur = (&(map_header)->tree)->root; while (
___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&
(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert
("___diff != 0", "../vm/vm_map.c", 1121); }); ___prev = ___cur
; ___index = rbtree_d2i(___diff); ___cur = ___cur->children
[___index]; } rbtree_insert_rebalance(&(map_header)->tree
, ___prev, ___index, &(new_entry)->tree_node); }); })
;
1122
1123 if (entry->is_sub_map)
1124 vm_map_reference(new_entry->object.sub_map);
1125 else
1126 vm_object_reference(new_entry->object.vm_object);
1127}
1128
1129/*
1130 * VM_MAP_RANGE_CHECK: [ internal use only ]
1131 *
1132 * Asserts that the starting and ending region
1133 * addresses fall within the valid range of the map.
1134 */
1135#define VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
\
1136 { \
1137 if (start < vm_map_min(map)((map)->hdr.links.start)) \
1138 start = vm_map_min(map)((map)->hdr.links.start); \
1139 if (end > vm_map_max(map)((map)->hdr.links.end)) \
1140 end = vm_map_max(map)((map)->hdr.links.end); \
1141 if (start > end) \
1142 start = end; \
1143 }
1144
1145/*
1146 * vm_map_submap: [ kernel use only ]
1147 *
1148 * Mark the given range as handled by a subordinate map.
1149 *
1150 * This range must have been created with vm_map_find using
1151 * the vm_submap_object, and no other operations may have been
1152 * performed on this range prior to calling vm_map_submap.
1153 *
1154 * Only a limited number of operations can be performed
1155 * within this rage after calling vm_map_submap:
1156 * vm_fault
1157 * [Don't try vm_map_copyin!]
1158 *
1159 * To remove a submapping, one must first remove the
1160 * range from the superior map, and then destroy the
1161 * submap (if desired). [Better yet, don't try it.]
1162 */
1163kern_return_t vm_map_submap(map, start, end, submap)
1164 vm_map_t map;
1165 vm_offset_t start;
1166 vm_offset_t end;
1167 vm_map_t submap;
1168{
1169 vm_map_entry_t entry;
1170 kern_return_t result = KERN_INVALID_ARGUMENT4;
1171 vm_object_t object;
1172
1173 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1174
1175 VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
;
1176
1177 if (vm_map_lookup_entry(map, start, &entry)) {
1178 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1179 }
1180 else
1181 entry = entry->vme_nextlinks.next;
1182
1183 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1184
1185 if ((entry->vme_startlinks.start == start) && (entry->vme_endlinks.end == end) &&
1186 (!entry->is_sub_map) &&
1187 ((object = entry->object.vm_object) == vm_submap_object) &&
1188 (object->resident_page_count == 0) &&
1189 (object->copy == VM_OBJECT_NULL((vm_object_t) 0)) &&
1190 (object->shadow == VM_OBJECT_NULL((vm_object_t) 0)) &&
1191 (!object->pager_created)) {
1192 entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0);
1193 vm_object_deallocate(object);
1194 entry->is_sub_map = TRUE((boolean_t) 1);
1195 vm_map_reference(entry->object.sub_map = submap);
1196 result = KERN_SUCCESS0;
1197 }
1198 vm_map_unlock(map)lock_done(&(map)->lock);
1199
1200 return(result);
1201}
1202
1203/*
1204 * vm_map_protect:
1205 *
1206 * Sets the protection of the specified address
1207 * region in the target map. If "set_max" is
1208 * specified, the maximum protection is to be set;
1209 * otherwise, only the current protection is affected.
1210 */
1211kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
1212 vm_map_t map;
1213 vm_offset_t start;
1214 vm_offset_t end;
1215 vm_prot_t new_prot;
1216 boolean_t set_max;
1217{
1218 vm_map_entry_t current;
1219 vm_map_entry_t entry;
1220
1221 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1222
1223 VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
;
1224
1225 if (vm_map_lookup_entry(map, start, &entry)) {
1226 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1227 }
1228 else
1229 entry = entry->vme_nextlinks.next;
1230
1231 /*
1232 * Make a first pass to check for protection
1233 * violations.
1234 */
1235
1236 current = entry;
1237 while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1238 (current->vme_startlinks.start < end)) {
1239
1240 if (current->is_sub_map) {
1241 vm_map_unlock(map)lock_done(&(map)->lock);
1242 return(KERN_INVALID_ARGUMENT4);
1243 }
1244 if ((new_prot & (VM_PROT_NOTIFY((vm_prot_t) 0x10) | current->max_protection))
1245 != new_prot) {
1246 vm_map_unlock(map)lock_done(&(map)->lock);
1247 return(KERN_PROTECTION_FAILURE2);
1248 }
1249
1250 current = current->vme_nextlinks.next;
1251 }
1252
1253 /*
1254 * Go back and fix up protections.
1255 * [Note that clipping is not necessary the second time.]
1256 */
1257
1258 current = entry;
1259
1260 while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1261 (current->vme_startlinks.start < end)) {
1262
1263 vm_prot_t old_prot;
1264
1265 vm_map_clip_end(map, current, end)({ if ((end) < (current)->links.end) _vm_map_clip_end(&
(map)->hdr,(current),(end)); })
;
1266
1267 old_prot = current->protection;
1268 if (set_max)
1269 current->protection =
1270 (current->max_protection = new_prot) &
1271 old_prot;
1272 else
1273 current->protection = new_prot;
1274
1275 /*
1276 * Update physical map if necessary.
1277 */
1278
1279 if (current->protection != old_prot) {
1280 pmap_protect(map->pmap, current->vme_startlinks.start,
1281 current->vme_endlinks.end,
1282 current->protection);
1283 }
1284 current = current->vme_nextlinks.next;
1285 }
1286
1287 vm_map_unlock(map)lock_done(&(map)->lock);
1288 return(KERN_SUCCESS0);
1289}
1290
1291/*
1292 * vm_map_inherit:
1293 *
1294 * Sets the inheritance of the specified address
1295 * range in the target map. Inheritance
1296 * affects how the map will be shared with
1297 * child maps at the time of vm_map_fork.
1298 */
1299kern_return_t vm_map_inherit(map, start, end, new_inheritance)
1300 vm_map_t map;
1301 vm_offset_t start;
1302 vm_offset_t end;
1303 vm_inherit_t new_inheritance;
1304{
1305 vm_map_entry_t entry;
1306 vm_map_entry_t temp_entry;
1307
1308 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1309
1310 VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
;
1311
1312 if (vm_map_lookup_entry(map, start, &temp_entry)) {
1313 entry = temp_entry;
1314 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1315 }
1316 else
1317 entry = temp_entry->vme_nextlinks.next;
1318
1319 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) {
1320 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1321
1322 entry->inheritance = new_inheritance;
1323
1324 entry = entry->vme_nextlinks.next;
1325 }
1326
1327 vm_map_unlock(map)lock_done(&(map)->lock);
1328 return(KERN_SUCCESS0);
1329}
1330
1331/*
1332 * vm_map_pageable_common:
1333 *
1334 * Sets the pageability of the specified address
1335 * range in the target map. Regions specified
1336 * as not pageable require locked-down physical
1337 * memory and physical page maps. access_type indicates
1338 * types of accesses that must not generate page faults.
1339 * This is checked against protection of memory being locked-down.
1340 * access_type of VM_PROT_NONE makes memory pageable.
1341 *
1342 * The map must not be locked, but a reference
1343 * must remain to the map throughout the call.
1344 *
1345 * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
1346 * or vm_map_pageable_user); don't call vm_map_pageable directly.
1347 */
1348kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
1349 vm_map_t map;
1350 vm_offset_t start;
1351 vm_offset_t end;
1352 vm_prot_t access_type;
1353 boolean_t user_wire;
1354{
1355 vm_map_entry_t entry;
1356 vm_map_entry_t start_entry;
1357
1358 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1359
1360 VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
;
1361
1362 if (vm_map_lookup_entry(map, start, &start_entry)) {
1363 entry = start_entry;
1364 /*
1365 * vm_map_clip_start will be done later.
1366 */
1367 }
1368 else {
1369 /*
1370 * Start address is not in map; this is fatal.
1371 */
1372 vm_map_unlock(map)lock_done(&(map)->lock);
1373 return(KERN_FAILURE5);
1374 }
1375
1376 /*
1377 * Actions are rather different for wiring and unwiring,
1378 * so we have two separate cases.
1379 */
1380
1381 if (access_type == VM_PROT_NONE((vm_prot_t) 0x00)) {
1382
1383 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1384
1385 /*
1386 * Unwiring. First ensure that the range to be
1387 * unwired is really wired down.
1388 */
1389 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1390 (entry->vme_startlinks.start < end)) {
1391
1392 if ((entry->wired_count == 0) ||
1393 ((entry->vme_endlinks.end < end) &&
1394 ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) ||
1395 (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) ||
1396 (user_wire && (entry->user_wired_count == 0))) {
1397 vm_map_unlock(map)lock_done(&(map)->lock);
1398 return(KERN_INVALID_ARGUMENT4);
1399 }
1400 entry = entry->vme_nextlinks.next;
1401 }
1402
1403 /*
1404 * Now decrement the wiring count for each region.
1405 * If a region becomes completely unwired,
1406 * unwire its physical pages and mappings.
1407 */
1408 entry = start_entry;
1409 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1410 (entry->vme_startlinks.start < end)) {
1411 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1412
1413 if (user_wire) {
1414 if (--(entry->user_wired_count) == 0)
1415 entry->wired_count--;
1416 }
1417 else {
1418 entry->wired_count--;
1419 }
1420
1421 if (entry->wired_count == 0)
1422 vm_fault_unwire(map, entry);
1423
1424 entry = entry->vme_nextlinks.next;
1425 }
1426 }
1427
1428 else {
1429 /*
1430 * Wiring. We must do this in two passes:
1431 *
1432 * 1. Holding the write lock, we create any shadow
1433 * or zero-fill objects that need to be created.
1434 * Then we clip each map entry to the region to be
1435 * wired and increment its wiring count. We
1436 * create objects before clipping the map entries
1437 * to avoid object proliferation.
1438 *
1439 * 2. We downgrade to a read lock, and call
1440 * vm_fault_wire to fault in the pages for any
1441 * newly wired area (wired_count is 1).
1442 *
1443 * Downgrading to a read lock for vm_fault_wire avoids
1444 * a possible deadlock with another thread that may have
1445 * faulted on one of the pages to be wired (it would mark
1446 * the page busy, blocking us, then in turn block on the
1447 * map lock that we hold). Because of problems in the
1448 * recursive lock package, we cannot upgrade to a write
1449 * lock in vm_map_lookup. Thus, any actions that require
1450 * the write lock must be done beforehand. Because we
1451 * keep the read lock on the map, the copy-on-write
1452 * status of the entries we modify here cannot change.
1453 */
1454
1455 /*
1456 * Pass 1.
1457 */
1458 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1459 (entry->vme_startlinks.start < end)) {
1460 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1461
1462 if (entry->wired_count == 0) {
1463
1464 /*
1465 * Perform actions of vm_map_lookup that need
1466 * the write lock on the map: create a shadow
1467 * object for a copy-on-write region, or an
1468 * object for a zero-fill region.
1469 */
1470 if (entry->needs_copy &&
1471 ((entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02)) != 0)) {
1472
1473 vm_object_shadow(&entry->object.vm_object,
1474 &entry->offset,
1475 (vm_size_t)(entry->vme_endlinks.end
1476 - entry->vme_startlinks.start));
1477 entry->needs_copy = FALSE((boolean_t) 0);
1478 }
1479 if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) {
1480 entry->object.vm_object =
1481 vm_object_allocate(
1482 (vm_size_t)(entry->vme_endlinks.end
1483 - entry->vme_startlinks.start));
1484 entry->offset = (vm_offset_t)0;
1485 }
1486 }
1487 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1488 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1489
1490 if (user_wire) {
1491 if ((entry->user_wired_count)++ == 0)
1492 entry->wired_count++;
1493 }
1494 else {
1495 entry->wired_count++;
1496 }
1497
1498 /*
1499 * Check for holes and protection mismatch.
1500 * Holes: Next entry should be contiguous unless
1501 * this is the end of the region.
1502 * Protection: Access requested must be allowed.
1503 */
1504 if (((entry->vme_endlinks.end < end) &&
1505 ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) ||
1506 (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) ||
1507 ((entry->protection & access_type) != access_type)) {
1508 /*
1509 * Found a hole or protection problem.
1510 * Object creation actions
1511 * do not need to be undone, but the
1512 * wired counts need to be restored.
1513 */
1514 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
1515 (entry->vme_endlinks.end > start)) {
1516 if (user_wire) {
1517 if (--(entry->user_wired_count) == 0)
1518 entry->wired_count--;
1519 }
1520 else {
1521 entry->wired_count--;
1522 }
1523
1524 entry = entry->vme_prevlinks.prev;
1525 }
1526
1527 vm_map_unlock(map)lock_done(&(map)->lock);
1528 return(KERN_FAILURE5);
1529 }
1530 entry = entry->vme_nextlinks.next;
1531 }
1532
1533 /*
1534 * Pass 2.
1535 */
1536
1537 /*
1538 * HACK HACK HACK HACK
1539 *
1540 * If we are wiring in the kernel map or a submap of it,
1541 * unlock the map to avoid deadlocks. We trust that the
1542 * kernel threads are well-behaved, and therefore will
1543 * not do anything destructive to this region of the map
1544 * while we have it unlocked. We cannot trust user threads
1545 * to do the same.
1546 *
1547 * HACK HACK HACK HACK
1548 */
1549 if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) {
1550 vm_map_unlock(map)lock_done(&(map)->lock); /* trust me ... */
1551 }
1552 else {
1553 vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock);
1554 vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock);
1555 }
1556
1557 entry = start_entry;
1558 while (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) &&
1559 entry->vme_startlinks.start < end) {
1560 /*
1561 * Wiring cases:
1562 * Kernel: wired == 1 && user_wired == 0
1563 * User: wired == 1 && user_wired == 1
1564 *
1565 * Don't need to wire if either is > 1. wired = 0 &&
1566 * user_wired == 1 can't happen.
1567 */
1568
1569 /*
1570 * XXX This assumes that the faults always succeed.
1571 */
1572 if ((entry->wired_count == 1) &&
1573 (entry->user_wired_count <= 1)) {
1574 vm_fault_wire(map, entry);
1575 }
1576 entry = entry->vme_nextlinks.next;
1577 }
1578
1579 if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) {
1580 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1581 }
1582 else {
1583 vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock);
1584 }
1585 }
1586
1587 vm_map_unlock(map)lock_done(&(map)->lock);
1588
1589 return(KERN_SUCCESS0);
1590}
1591
1592/*
1593 * vm_map_entry_delete: [ internal use only ]
1594 *
1595 * Deallocate the given entry from the target map.
1596 */
1597void vm_map_entry_delete(map, entry)
1598 vm_map_t map;
1599 vm_map_entry_t entry;
1600{
1601 vm_offset_t s, e;
1602 vm_object_t object;
1603 extern vm_object_t kernel_object;
1604
1605 s = entry->vme_startlinks.start;
1606 e = entry->vme_endlinks.end;
1607
1608 /*Check if projected buffer*/
1609 if (map != kernel_map && entry->projected_on != 0) {
1610 /*Check if projected kernel entry is persistent;
1611 may only manipulate directly if it is*/
1612 if (entry->projected_on->projected_on == 0)
1613 entry->wired_count = 0; /*Avoid unwire fault*/
1614 else
1615 return;
1616 }
1617
1618 /*
1619 * Get the object. Null objects cannot have pmap entries.
1620 */
1621
1622 if ((object = entry->object.vm_object) != VM_OBJECT_NULL((vm_object_t) 0)) {
1623
1624 /*
1625 * Unwire before removing addresses from the pmap;
1626 * otherwise, unwiring will put the entries back in
1627 * the pmap.
1628 */
1629
1630 if (entry->wired_count != 0) {
1631 vm_fault_unwire(map, entry);
1632 entry->wired_count = 0;
1633 entry->user_wired_count = 0;
1634 }
1635
1636 /*
1637 * If the object is shared, we must remove
1638 * *all* references to this data, since we can't
1639 * find all of the physical maps which are sharing
1640 * it.
1641 */
1642
1643 if (object == kernel_object) {
1644 vm_object_lock(object);
1645 vm_object_page_remove(object, entry->offset,
1646 entry->offset + (e - s));
1647 vm_object_unlock(object)((void)(&(object)->Lock));
1648 } else if (entry->is_shared) {
1649 vm_object_pmap_remove(object,
1650 entry->offset,
1651 entry->offset + (e - s));
1652 }
1653 else {
1654 pmap_remove(map->pmap, s, e);
1655 }
1656 }
1657
1658 /*
1659 * Deallocate the object only after removing all
1660 * pmap entries pointing to its pages.
1661 */
1662
1663 if (entry->is_sub_map)
1664 vm_map_deallocate(entry->object.sub_map);
1665 else
1666 vm_object_deallocate(entry->object.vm_object);
1667
1668 vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next
->links.prev = (entry)->links.prev; (entry)->links.prev
->links.next = (entry)->links.next; rbtree_remove(&
(&(map)->hdr)->tree, &(entry)->tree_node); }
)
;
1669 map->size -= e - s;
1670
1671 vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry));
1672}
1673
1674/*
1675 * vm_map_delete: [ internal use only ]
1676 *
1677 * Deallocates the given address range from the target
1678 * map.
1679 */
1680
1681kern_return_t vm_map_delete(map, start, end)
1682 vm_map_t map;
1683 vm_offset_t start;
1684 vm_offset_t end;
1685{
1686 vm_map_entry_t entry;
1687 vm_map_entry_t first_entry;
1688
1689 /*
1690 * Find the start of the region, and clip it
1691 */
1692
1693 if (!vm_map_lookup_entry(map, start, &first_entry))
1694 entry = first_entry->vme_nextlinks.next;
1695 else {
1696 entry = first_entry;
1697 vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start
(&(map)->hdr,(entry),(start)); })
;
1698
1699 /*
1700 * Fix the lookup hint now, rather than each
1701 * time though the loop.
1702 */
1703
1704 SAVE_HINT(map, entry->vme_prev); (map)->hint = (entry->links.prev); ((void)(&(map)
->hint_lock));
;
1705 }
1706
1707 /*
1708 * Save the free space hint
1709 */
1710
1711 if (map->first_free->vme_startlinks.start >= start)
1712 map->first_free = entry->vme_prevlinks.prev;
1713
1714 /*
1715 * Step through all entries in this region
1716 */
1717
1718 while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) {
1719 vm_map_entry_t next;
1720
1721 vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(&
(map)->hdr,(entry),(end)); })
;
1722
1723 /*
1724 * If the entry is in transition, we must wait
1725 * for it to exit that state. It could be clipped
1726 * while we leave the map unlocked.
1727 */
1728 if(entry->in_transition) {
1729 /*
1730 * Say that we are waiting, and wait for entry.
1731 */
1732 entry->needs_wakeup = TRUE((boolean_t) 1);
1733 vm_map_entry_wait(map, FALSE)({ assert_wait((event_t)&(map)->hdr, ((boolean_t) 0));
lock_done(&(map)->lock); thread_block((void (*)()) 0)
; })
;
1734 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1735
1736 /*
1737 * The entry could have been clipped or it
1738 * may not exist anymore. look it up again.
1739 */
1740 if(!vm_map_lookup_entry(map, start, &entry)) {
1741 entry = entry->vme_nextlinks.next;
1742 }
1743 continue;
1744 }
1745
1746 next = entry->vme_nextlinks.next;
1747
1748 vm_map_entry_delete(map, entry);
1749 entry = next;
1750 }
1751
1752 if (map->wait_for_space)
1753 thread_wakeup((event_t) map)thread_wakeup_prim(((event_t) map), ((boolean_t) 0), 0);
1754
1755 return(KERN_SUCCESS0);
1756}
1757
1758/*
1759 * vm_map_remove:
1760 *
1761 * Remove the given address range from the target map.
1762 * This is the exported form of vm_map_delete.
1763 */
1764kern_return_t vm_map_remove(map, start, end)
1765 vm_map_t map;
1766 vm_offset_t start;
1767 vm_offset_t end;
1768{
1769 kern_return_t result;
1770
1771 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
1772 VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)->
hdr.links.start); if (end > ((map)->hdr.links.end)) end
= ((map)->hdr.links.end); if (start > end) start = end
; }
;
1773 result = vm_map_delete(map, start, end);
1774 vm_map_unlock(map)lock_done(&(map)->lock);
1775
1776 return(result);
1777}
1778
1779
1780/*
1781 * vm_map_copy_steal_pages:
1782 *
1783 * Steal all the pages from a vm_map_copy page_list by copying ones
1784 * that have not already been stolen.
1785 */
1786void
1787vm_map_copy_steal_pages(copy)
1788vm_map_copy_t copy;
1789{
1790 vm_page_t m, new_m;
1791 int i;
1792 vm_object_t object;
1793
1794 for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) {
1795
1796 /*
1797 * If the page is not tabled, then it's already stolen.
1798 */
1799 m = copy->cpy_page_listc_u.c_p.page_list[i];
1800 if (!m->tabled)
1801 continue;
1802
1803 /*
1804 * Page was not stolen, get a new
1805 * one and do the copy now.
1806 */
1807 while ((new_m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0)) {
1808 VM_PAGE_WAIT((void(*)()) 0)vm_page_wait((void(*)()) 0);
1809 }
1810
1811 vm_page_copy(m, new_m);
1812
1813 object = m->object;
1814 vm_object_lock(object);
1815 vm_page_lock_queues();
1816 if (!m->active && !m->inactive)
1817 vm_page_activate(m);
1818 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1819 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1820 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_map.c", 1820); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
1821 vm_object_unlock(object)((void)(&(object)->Lock));
1822
1823 copy->cpy_page_listc_u.c_p.page_list[i] = new_m;
1824 }
1825}
1826
1827/*
1828 * vm_map_copy_page_discard:
1829 *
1830 * Get rid of the pages in a page_list copy. If the pages are
1831 * stolen, they are freed. If the pages are not stolen, they
1832 * are unbusied, and associated state is cleaned up.
1833 */
1834void vm_map_copy_page_discard(copy)
1835vm_map_copy_t copy;
1836{
1837 while (copy->cpy_npagesc_u.c_p.npages > 0) {
1838 vm_page_t m;
1839
1840 if((m = copy->cpy_page_listc_u.c_p.page_list[--(copy->cpy_npagesc_u.c_p.npages)]) !=
1841 VM_PAGE_NULL((vm_page_t) 0)) {
1842
1843 /*
1844 * If it's not in the table, then it's
1845 * a stolen page that goes back
1846 * to the free list. Else it belongs
1847 * to some object, and we hold a
1848 * paging reference on that object.
1849 */
1850 if (!m->tabled) {
1851 VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); });
1852 }
1853 else {
1854 vm_object_t object;
1855
1856 object = m->object;
1857
1858 vm_object_lock(object);
1859 vm_page_lock_queues();
1860 if (!m->active && !m->inactive)
1861 vm_page_activate(m);
1862 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
1863
1864 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
1865 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_map.c", 1865); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
1866 vm_object_unlock(object)((void)(&(object)->Lock));
1867 }
1868 }
1869 }
1870}
1871
1872/*
1873 * Routine: vm_map_copy_discard
1874 *
1875 * Description:
1876 * Dispose of a map copy object (returned by
1877 * vm_map_copyin).
1878 */
1879void
1880vm_map_copy_discard(copy)
1881 vm_map_copy_t copy;
1882{
1883free_next_copy:
1884 if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0))
1885 return;
1886
1887 switch (copy->type) {
1888 case VM_MAP_COPY_ENTRY_LIST1:
1889 while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) !=
1890 vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) {
1891 vm_map_entry_t entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next);
1892
1893 vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links
.next->links.prev = (entry)->links.prev; (entry)->links
.prev->links.next = (entry)->links.next; rbtree_remove(
&(&(copy)->c_u.hdr)->tree, &(entry)->tree_node
); })
;
1894 vm_object_deallocate(entry->object.vm_object);
1895 vm_map_copy_entry_dispose(copy, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry));
1896 }
1897 break;
1898 case VM_MAP_COPY_OBJECT2:
1899 vm_object_deallocate(copy->cpy_objectc_u.c_o.object);
1900 break;
1901 case VM_MAP_COPY_PAGE_LIST3:
1902
1903 /*
1904 * To clean this up, we have to unbusy all the pages
1905 * and release the paging references in their objects.
1906 */
1907 if (copy->cpy_npagesc_u.c_p.npages > 0)
1908 vm_map_copy_page_discard(copy);
1909
1910 /*
1911 * If there's a continuation, abort it. The
1912 * abort routine releases any storage.
1913 */
1914 if (vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
1915
1916 /*
1917 * Special case: recognize
1918 * vm_map_copy_discard_cont and optimize
1919 * here to avoid tail recursion.
1920 */
1921 if (copy->cpy_contc_u.c_p.cont == vm_map_copy_discard_cont) {
1922 vm_map_copy_t new_copy;
1923
1924 new_copy = (vm_map_copy_t) copy->cpy_cont_argsc_u.c_p.cont_args;
1925 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
1926 copy = new_copy;
1927 goto free_next_copy;
1928 }
1929 else {
1930 vm_map_copy_abort_cont(copy)({ vm_map_copy_page_discard(copy); (*((copy)->c_u.c_p.cont
))((copy)->c_u.c_p.cont_args, (vm_map_copy_t *) 0); (copy)
->c_u.c_p.cont = (kern_return_t (*)()) 0; (copy)->c_u.c_p
.cont_args = (char *) 0; })
;
1931 }
1932 }
1933
1934 break;
1935 }
1936 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
1937}
1938
1939/*
1940 * Routine: vm_map_copy_copy
1941 *
1942 * Description:
1943 * Move the information in a map copy object to
1944 * a new map copy object, leaving the old one
1945 * empty.
1946 *
1947 * This is used by kernel routines that need
1948 * to look at out-of-line data (in copyin form)
1949 * before deciding whether to return SUCCESS.
1950 * If the routine returns FAILURE, the original
1951 * copy object will be deallocated; therefore,
1952 * these routines must make a copy of the copy
1953 * object and leave the original empty so that
1954 * deallocation will not fail.
1955 */
1956vm_map_copy_t
1957vm_map_copy_copy(copy)
1958 vm_map_copy_t copy;
1959{
1960 vm_map_copy_t new_copy;
1961
1962 if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0))
1963 return VM_MAP_COPY_NULL((vm_map_copy_t) 0);
1964
1965 /*
1966 * Allocate a new copy object, and copy the information
1967 * from the old one into it.
1968 */
1969
1970 new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
1971 *new_copy = *copy;
1972
1973 if (copy->type == VM_MAP_COPY_ENTRY_LIST1) {
1974 /*
1975 * The links in the entry chain must be
1976 * changed to point to the new copy object.
1977 */
1978 vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)->vme_prevlinks.prev
1979 = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links);
1980 vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)->vme_nextlinks.next
1981 = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links);
1982 }
1983
1984 /*
1985 * Change the old copy object into one that contains
1986 * nothing to be deallocated.
1987 */
1988 copy->type = VM_MAP_COPY_OBJECT2;
1989 copy->cpy_objectc_u.c_o.object = VM_OBJECT_NULL((vm_object_t) 0);
1990
1991 /*
1992 * Return the new object.
1993 */
1994 return new_copy;
1995}
1996
1997/*
1998 * Routine: vm_map_copy_discard_cont
1999 *
2000 * Description:
2001 * A version of vm_map_copy_discard that can be called
2002 * as a continuation from a vm_map_copy page list.
2003 */
2004kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
2005vm_map_copyin_args_t cont_args;
2006vm_map_copy_t *copy_result; /* OUT */
2007{
2008 vm_map_copy_discard((vm_map_copy_t) cont_args);
2009 if (copy_result != (vm_map_copy_t *)0)
2010 *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0);
2011 return(KERN_SUCCESS0);
2012}
2013
2014/*
2015 * Routine: vm_map_copy_overwrite
2016 *
2017 * Description:
2018 * Copy the memory described by the map copy
2019 * object (copy; returned by vm_map_copyin) onto
2020 * the specified destination region (dst_map, dst_addr).
2021 * The destination must be writeable.
2022 *
2023 * Unlike vm_map_copyout, this routine actually
2024 * writes over previously-mapped memory. If the
2025 * previous mapping was to a permanent (user-supplied)
2026 * memory object, it is preserved.
2027 *
2028 * The attributes (protection and inheritance) of the
2029 * destination region are preserved.
2030 *
2031 * If successful, consumes the copy object.
2032 * Otherwise, the caller is responsible for it.
2033 *
2034 * Implementation notes:
2035 * To overwrite temporary virtual memory, it is
2036 * sufficient to remove the previous mapping and insert
2037 * the new copy. This replacement is done either on
2038 * the whole region (if no permanent virtual memory
2039 * objects are embedded in the destination region) or
2040 * in individual map entries.
2041 *
2042 * To overwrite permanent virtual memory, it is
2043 * necessary to copy each page, as the external
2044 * memory management interface currently does not
2045 * provide any optimizations.
2046 *
2047 * Once a page of permanent memory has been overwritten,
2048 * it is impossible to interrupt this function; otherwise,
2049 * the call would be neither atomic nor location-independent.
2050 * The kernel-state portion of a user thread must be
2051 * interruptible.
2052 *
2053 * It may be expensive to forward all requests that might
2054 * overwrite permanent memory (vm_write, vm_copy) to
2055 * uninterruptible kernel threads. This routine may be
2056 * called by interruptible threads; however, success is
2057 * not guaranteed -- if the request cannot be performed
2058 * atomically and interruptibly, an error indication is
2059 * returned.
2060 */
2061kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
2062 vm_map_t dst_map;
2063 vm_offset_t dst_addr;
2064 vm_map_copy_t copy;
2065 boolean_t interruptible;
2066{
2067 vm_size_t size;
2068 vm_offset_t start;
2069 vm_map_entry_t tmp_entry;
2070 vm_map_entry_t entry;
2071
2072 boolean_t contains_permanent_objects = FALSE((boolean_t) 0);
2073
2074 interruptible = FALSE((boolean_t) 0); /* XXX */
2075
2076 /*
2077 * Check for null copy object.
2078 */
2079
2080 if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0))
2081 return(KERN_SUCCESS0);
2082
2083 /*
2084 * Only works for entry lists at the moment. Will
2085 * support page lists LATER.
2086 */
2087
2088 assert(copy->type == VM_MAP_COPY_ENTRY_LIST)({ if (!(copy->type == 1)) Assert("copy->type == VM_MAP_COPY_ENTRY_LIST"
, "../vm/vm_map.c", 2088); })
;
2089
2090 /*
2091 * Currently this routine only handles page-aligned
2092 * regions. Eventually, it should handle misalignments
2093 * by actually copying pages.
2094 */
2095
2096 if (!page_aligned(copy->offset)((((vm_offset_t) (copy->offset)) & ((1 << 12)-1)
) == 0)
||
2097 !page_aligned(copy->size)((((vm_offset_t) (copy->size)) & ((1 << 12)-1)) ==
0)
||
2098 !page_aligned(dst_addr)((((vm_offset_t) (dst_addr)) & ((1 << 12)-1)) == 0))
2099 return(KERN_INVALID_ARGUMENT4);
2100
2101 size = copy->size;
2102
2103 if (size == 0) {
2104 vm_map_copy_discard(copy);
2105 return(KERN_SUCCESS0);
2106 }
2107
2108 /*
2109 * Verify that the destination is all writeable
2110 * initially.
2111 */
2112start_pass_1:
2113 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2114 if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) {
2115 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2116 return(KERN_INVALID_ADDRESS1);
2117 }
2118 vm_map_clip_start(dst_map, tmp_entry, dst_addr)({ if ((dst_addr) > (tmp_entry)->links.start) _vm_map_clip_start
(&(dst_map)->hdr,(tmp_entry),(dst_addr)); })
;
2119 for (entry = tmp_entry;;) {
2120 vm_size_t sub_size = (entry->vme_endlinks.end - entry->vme_startlinks.start);
2121 vm_map_entry_t next = entry->vme_nextlinks.next;
2122
2123 if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) {
2124 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2125 return(KERN_PROTECTION_FAILURE2);
2126 }
2127
2128 /*
2129 * If the entry is in transition, we must wait
2130 * for it to exit that state. Anything could happen
2131 * when we unlock the map, so start over.
2132 */
2133 if (entry->in_transition) {
2134
2135 /*
2136 * Say that we are waiting, and wait for entry.
2137 */
2138 entry->needs_wakeup = TRUE((boolean_t) 1);
2139 vm_map_entry_wait(dst_map, FALSE)({ assert_wait((event_t)&(dst_map)->hdr, ((boolean_t) 0
)); lock_done(&(dst_map)->lock); thread_block((void (*
)()) 0); })
;
2140
2141 goto start_pass_1;
2142 }
2143
2144 if (size <= sub_size)
2145 break;
2146
2147 if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ||
2148 (next->vme_startlinks.start != entry->vme_endlinks.end)) {
2149 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2150 return(KERN_INVALID_ADDRESS1);
2151 }
2152
2153
2154 /*
2155 * Check for permanent objects in the destination.
2156 */
2157
2158 if ((entry->object.vm_object != VM_OBJECT_NULL((vm_object_t) 0)) &&
2159 !entry->object.vm_object->temporary)
2160 contains_permanent_objects = TRUE((boolean_t) 1);
2161
2162 size -= sub_size;
2163 entry = next;
2164 }
2165
2166 /*
2167 * If there are permanent objects in the destination, then
2168 * the copy cannot be interrupted.
2169 */
2170
2171 if (interruptible && contains_permanent_objects) {
2172 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2173 return(KERN_FAILURE5); /* XXX */
2174 }
2175
2176 /*
2177 * XXXO If there are no permanent objects in the destination,
2178 * XXXO and the source and destination map entry caches match,
2179 * XXXO and the destination map entry is not shared,
2180 * XXXO then the map entries can be deleted and replaced
2181 * XXXO with those from the copy. The following code is the
2182 * XXXO basic idea of what to do, but there are lots of annoying
2183 * XXXO little details about getting protection and inheritance
2184 * XXXO right. Should add protection, inheritance, and sharing checks
2185 * XXXO to the above pass and make sure that no wiring is involved.
2186 */
2187/*
2188 * if (!contains_permanent_objects &&
2189 * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) {
2190 *
2191 * *
2192 * * Run over copy and adjust entries. Steal code
2193 * * from vm_map_copyout() to do this.
2194 * *
2195 *
2196 * tmp_entry = tmp_entry->vme_prev;
2197 * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size);
2198 * vm_map_copy_insert(dst_map, tmp_entry, copy);
2199 *
2200 * vm_map_unlock(dst_map);
2201 * vm_map_copy_discard(copy);
2202 * }
2203 */
2204 /*
2205 *
2206 * Make a second pass, overwriting the data
2207 * At the beginning of each loop iteration,
2208 * the next entry to be overwritten is "tmp_entry"
2209 * (initially, the value returned from the lookup above),
2210 * and the starting address expected in that entry
2211 * is "start".
2212 */
2213
2214 start = dst_addr;
2215
2216 while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) {
2217 vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next);
2218 vm_size_t copy_size = (copy_entry->vme_endlinks.end - copy_entry->vme_startlinks.start);
2219 vm_object_t object;
2220
2221 entry = tmp_entry;
2222 size = (entry->vme_endlinks.end - entry->vme_startlinks.start);
2223 /*
2224 * Make sure that no holes popped up in the
2225 * address map, and that the protection is
2226 * still valid, in case the map was unlocked
2227 * earlier.
2228 */
2229
2230 if (entry->vme_startlinks.start != start) {
2231 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2232 return(KERN_INVALID_ADDRESS1);
2233 }
2234 assert(entry != vm_map_to_entry(dst_map))({ if (!(entry != ((struct vm_map_entry *) &(dst_map)->
hdr.links))) Assert("entry != vm_map_to_entry(dst_map)", "../vm/vm_map.c"
, 2234); })
;
2235
2236 /*
2237 * Check protection again
2238 */
2239
2240 if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) {
2241 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2242 return(KERN_PROTECTION_FAILURE2);
2243 }
2244
2245 /*
2246 * Adjust to source size first
2247 */
2248
2249 if (copy_size < size) {
2250 vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size)({ if ((entry->links.start + copy_size) < (entry)->links
.end) _vm_map_clip_end(&(dst_map)->hdr,(entry),(entry->
links.start + copy_size)); })
;
2251 size = copy_size;
2252 }
2253
2254 /*
2255 * Adjust to destination size
2256 */
2257
2258 if (size < copy_size) {
2259 vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + size) < (copy_entry)->
links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry
),(copy_entry->links.start + size)); })
2260 copy_entry->vme_start + size)({ if ((copy_entry->links.start + size) < (copy_entry)->
links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry
),(copy_entry->links.start + size)); })
;
2261 copy_size = size;
2262 }
2263
2264 assert((entry->vme_end - entry->vme_start) == size)({ if (!((entry->links.end - entry->links.start) == size
)) Assert("(entry->vme_end - entry->vme_start) == size"
, "../vm/vm_map.c", 2264); })
;
2265 assert((tmp_entry->vme_end - tmp_entry->vme_start) == size)({ if (!((tmp_entry->links.end - tmp_entry->links.start
) == size)) Assert("(tmp_entry->vme_end - tmp_entry->vme_start) == size"
, "../vm/vm_map.c", 2265); })
;
2266 assert((copy_entry->vme_end - copy_entry->vme_start) == size)({ if (!((copy_entry->links.end - copy_entry->links.start
) == size)) Assert("(copy_entry->vme_end - copy_entry->vme_start) == size"
, "../vm/vm_map.c", 2266); })
;
2267
2268 /*
2269 * If the destination contains temporary unshared memory,
2270 * we can perform the copy by throwing it away and
2271 * installing the source data.
2272 */
2273
2274 object = entry->object.vm_object;
2275 if (!entry->is_shared &&
2276 ((object == VM_OBJECT_NULL((vm_object_t) 0)) || object->temporary)) {
2277 vm_object_t old_object = entry->object.vm_object;
2278 vm_offset_t old_offset = entry->offset;
2279
2280 entry->object = copy_entry->object;
2281 entry->offset = copy_entry->offset;
2282 entry->needs_copy = copy_entry->needs_copy;
2283 entry->wired_count = 0;
2284 entry->user_wired_count = 0;
2285
2286 vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)->
links.next->links.prev = (copy_entry)->links.prev; (copy_entry
)->links.prev->links.next = (copy_entry)->links.next
; rbtree_remove(&(&(copy)->c_u.hdr)->tree, &
(copy_entry)->tree_node); })
;
2287 vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry));
2288
2289 vm_object_pmap_protect(
2290 old_object,
2291 old_offset,
2292 size,
2293 dst_map->pmap,
2294 tmp_entry->vme_startlinks.start,
2295 VM_PROT_NONE((vm_prot_t) 0x00));
2296
2297 vm_object_deallocate(old_object);
2298
2299 /*
2300 * Set up for the next iteration. The map
2301 * has not been unlocked, so the next
2302 * address should be at the end of this
2303 * entry, and the next map entry should be
2304 * the one following it.
2305 */
2306
2307 start = tmp_entry->vme_endlinks.end;
2308 tmp_entry = tmp_entry->vme_nextlinks.next;
2309 } else {
2310 vm_map_version_t version;
2311 vm_object_t dst_object = entry->object.vm_object;
2312 vm_offset_t dst_offset = entry->offset;
2313 kern_return_t r;
2314
2315 /*
2316 * Take an object reference, and record
2317 * the map version information so that the
2318 * map can be safely unlocked.
2319 */
2320
2321 vm_object_reference(dst_object);
2322
2323 version.main_timestamp = dst_map->timestamp;
2324
2325 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2326
2327 /*
2328 * Copy as much as possible in one pass
2329 */
2330
2331 copy_size = size;
2332 r = vm_fault_copy(
2333 copy_entry->object.vm_object,
2334 copy_entry->offset,
2335 &copy_size,
2336 dst_object,
2337 dst_offset,
2338 dst_map,
2339 &version,
2340 FALSE((boolean_t) 0) /* XXX interruptible */ );
2341
2342 /*
2343 * Release the object reference
2344 */
2345
2346 vm_object_deallocate(dst_object);
2347
2348 /*
2349 * If a hard error occurred, return it now
2350 */
2351
2352 if (r != KERN_SUCCESS0)
2353 return(r);
2354
2355 if (copy_size != 0) {
2356 /*
2357 * Dispose of the copied region
2358 */
2359
2360 vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + copy_size) < (copy_entry
)->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry
),(copy_entry->links.start + copy_size)); })
2361 copy_entry->vme_start + copy_size)({ if ((copy_entry->links.start + copy_size) < (copy_entry
)->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry
),(copy_entry->links.start + copy_size)); })
;
2362 vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)->
links.next->links.prev = (copy_entry)->links.prev; (copy_entry
)->links.prev->links.next = (copy_entry)->links.next
; rbtree_remove(&(&(copy)->c_u.hdr)->tree, &
(copy_entry)->tree_node); })
;
2363 vm_object_deallocate(copy_entry->object.vm_object);
2364 vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry));
2365 }
2366
2367 /*
2368 * Pick up in the destination map where we left off.
2369 *
2370 * Use the version information to avoid a lookup
2371 * in the normal case.
2372 */
2373
2374 start += copy_size;
2375 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2376 if ((version.main_timestamp + 1) == dst_map->timestamp) {
2377 /* We can safely use saved tmp_entry value */
2378
2379 vm_map_clip_end(dst_map, tmp_entry, start)({ if ((start) < (tmp_entry)->links.end) _vm_map_clip_end
(&(dst_map)->hdr,(tmp_entry),(start)); })
;
2380 tmp_entry = tmp_entry->vme_nextlinks.next;
2381 } else {
2382 /* Must do lookup of tmp_entry */
2383
2384 if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) {
2385 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2386 return(KERN_INVALID_ADDRESS1);
2387 }
2388 vm_map_clip_start(dst_map, tmp_entry, start)({ if ((start) > (tmp_entry)->links.start) _vm_map_clip_start
(&(dst_map)->hdr,(tmp_entry),(start)); })
;
2389 }
2390 }
2391
2392 }
2393 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2394
2395 /*
2396 * Throw away the vm_map_copy object
2397 */
2398 vm_map_copy_discard(copy);
2399
2400 return(KERN_SUCCESS0);
2401}
2402
2403/*
2404 * Macro: vm_map_copy_insert
2405 *
2406 * Description:
2407 * Link a copy chain ("copy") into a map at the
2408 * specified location (after "where").
2409 * Side effects:
2410 * The copy chain is destroyed.
2411 * Warning:
2412 * The arguments are evaluated multiple times.
2413 */
2414#define vm_map_copy_insert(map, where, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest
(&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink(
node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink
(node)) ({ struct rbtree_node *___cur, *___prev; int ___diff,
___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&
(map)->hdr.tree)->root; while (___cur != ((void *) 0)) {
___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff
!= 0)) Assert("___diff != 0", "../vm/vm_map.c", 2414); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(map)->
hdr.tree, ___prev, ___index, node); }); (((where)->links.next
)->links.prev = ((copy)->c_u.hdr.links.prev)) ->links
.next = ((where)->links.next); ((where)->links.next = (
(copy)->c_u.hdr.links.next)) ->links.prev = (where); (map
)->hdr.nentries += (copy)->c_u.hdr.nentries; kmem_cache_free
(&vm_map_copy_cache, (vm_offset_t) copy); })
\
2415 MACRO_BEGIN({ \
2416 struct rbtree_node *node, *tmp; \
2417 rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp)for (node = rbtree_postwalk_deepest(&(copy)->c_u.hdr.tree
), tmp = rbtree_postwalk_unlink(node); node != ((void *) 0); node
= tmp, tmp = rbtree_postwalk_unlink(node))
\
2418 rbtree_insert(&(map)->hdr.tree, node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map)
->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 2419); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(map)->
hdr.tree, ___prev, ___index, node); })
2419 vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index
; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map)
->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 2419); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(map)->
hdr.tree, ___prev, ___index, node); })
; \
2420 (((where)->vme_nextlinks.next)->vme_prevlinks.prev = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)) \
2421 ->vme_nextlinks.next = ((where)->vme_nextlinks.next); \
2422 ((where)->vme_nextlinks.next = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)) \
2423 ->vme_prevlinks.prev = (where); \
2424 (map)->hdr.nentries += (copy)->cpy_hdrc_u.hdr.nentries; \
2425 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \
2426 MACRO_END})
2427
2428/*
2429 * Routine: vm_map_copyout
2430 *
2431 * Description:
2432 * Copy out a copy chain ("copy") into newly-allocated
2433 * space in the destination map.
2434 *
2435 * If successful, consumes the copy object.
2436 * Otherwise, the caller is responsible for it.
2437 */
2438kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
2439 vm_map_t dst_map;
2440 vm_offset_t *dst_addr; /* OUT */
2441 vm_map_copy_t copy;
2442{
2443 vm_size_t size;
2444 vm_size_t adjustment;
2445 vm_offset_t start;
2446 vm_offset_t vm_copy_start;
2447 vm_map_entry_t last;
2448 vm_map_entry_t entry;
2449
2450 /*
2451 * Check for null copy object.
2452 */
2453
2454 if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) {
2455 *dst_addr = 0;
2456 return(KERN_SUCCESS0);
2457 }
2458
2459 /*
2460 * Check for special copy object, created
2461 * by vm_map_copyin_object.
2462 */
2463
2464 if (copy->type == VM_MAP_COPY_OBJECT2) {
2465 vm_object_t object = copy->cpy_objectc_u.c_o.object;
2466 vm_size_t offset = copy->offset;
2467 vm_size_t tmp_size = copy->size;
2468 kern_return_t kr;
2469
2470 *dst_addr = 0;
2471 kr = vm_map_enter(dst_map, dst_addr, tmp_size,
2472 (vm_offset_t) 0, TRUE((boolean_t) 1),
2473 object, offset, FALSE((boolean_t) 0),
2474 VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)),
2475 VM_INHERIT_DEFAULT((vm_inherit_t) 1));
2476 if (kr != KERN_SUCCESS0)
2477 return(kr);
2478 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
2479 return(KERN_SUCCESS0);
2480 }
2481
2482 if (copy->type == VM_MAP_COPY_PAGE_LIST3)
2483 return(vm_map_copyout_page_list(dst_map, dst_addr, copy));
2484
2485 /*
2486 * Find space for the data
2487 */
2488
2489 vm_copy_start = trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 <<
12)-1)))
;
2490 size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size
)) + ((1 << 12)-1)) & ~((1 << 12)-1)))
- vm_copy_start;
2491
2492 StartAgain: ;
2493
2494 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2495 start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ?
2496 vm_map_min(dst_map)((dst_map)->hdr.links.start) : last->vme_endlinks.end;
2497
2498 while (TRUE((boolean_t) 1)) {
2499 vm_map_entry_t next = last->vme_nextlinks.next;
2500 vm_offset_t end = start + size;
2501
2502 if ((end > dst_map->max_offsethdr.links.end) || (end < start)) {
2503 if (dst_map->wait_for_space) {
2504 if (size <= (dst_map->max_offsethdr.links.end - dst_map->min_offsethdr.links.start)) {
2505 assert_wait((event_t) dst_map, TRUE((boolean_t) 1));
2506 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2507 thread_block((void (*)()) 0);
2508 goto StartAgain;
2509 }
2510 }
2511 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2512 printf_once("no more room for vm_map_copyout in %p\n", dst_map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_copyout in %p\n"
, dst_map); __once = 1; } })
;
2513 return(KERN_NO_SPACE3);
2514 }
2515
2516 if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ||
2517 (next->vme_startlinks.start >= end))
2518 break;
2519
2520 last = next;
2521 start = last->vme_endlinks.end;
2522 }
2523
2524 /*
2525 * Since we're going to just drop the map
2526 * entries from the copy into the destination
2527 * map, they must come from the same pool.
2528 */
2529
2530 if (copy->cpy_hdrc_u.hdr.entries_pageable != dst_map->hdr.entries_pageable) {
2531 /*
2532 * Mismatches occur when dealing with the default
2533 * pager.
2534 */
2535 kmem_cache_t old_cache;
2536 vm_map_entry_t next, new;
2537
2538 /*
2539 * Find the cache that the copies were allocated from
2540 */
2541 old_cache = (copy->cpy_hdrc_u.hdr.entries_pageable)
2542 ? &vm_map_entry_cache
2543 : &vm_map_kentry_cache;
2544 entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next);
2545
2546 /*
2547 * Reinitialize the copy so that vm_map_copy_entry_link
2548 * will work.
2549 */
2550 copy->cpy_hdrc_u.hdr.nentries = 0;
2551 copy->cpy_hdrc_u.hdr.entries_pageable = dst_map->hdr.entries_pageable;
2552 vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) =
2553 vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) =
2554 vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links);
2555
2556 /*
2557 * Copy each entry.
2558 */
2559 while (entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) {
2560 new = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr);
2561 vm_map_entry_copy_full(new, entry)(*(new) = *(entry));
2562 vm_map_copy_entry_link(copy,({ (&(copy)->c_u.hdr)->nentries++; (new)->links.
prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next
= (((copy)->c_u.hdr.links.prev))->links.next; (new)->
links.prev->links.next = (new)->links.next->links.prev
= (new); ({ struct rbtree_node *___cur, *___prev; int ___diff
, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (
&(&(copy)->c_u.hdr)->tree)->root; while (___cur
!= ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new
)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0"
, "../vm/vm_map.c", 2564); }); ___prev = ___cur; ___index = rbtree_d2i
(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(new)->tree_node); }); })
2563 vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new)->links.
prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next
= (((copy)->c_u.hdr.links.prev))->links.next; (new)->
links.prev->links.next = (new)->links.next->links.prev
= (new); ({ struct rbtree_node *___cur, *___prev; int ___diff
, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (
&(&(copy)->c_u.hdr)->tree)->root; while (___cur
!= ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new
)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0"
, "../vm/vm_map.c", 2564); }); ___prev = ___cur; ___index = rbtree_d2i
(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(new)->tree_node); }); })
2564 new)({ (&(copy)->c_u.hdr)->nentries++; (new)->links.
prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next
= (((copy)->c_u.hdr.links.prev))->links.next; (new)->
links.prev->links.next = (new)->links.next->links.prev
= (new); ({ struct rbtree_node *___cur, *___prev; int ___diff
, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (
&(&(copy)->c_u.hdr)->tree)->root; while (___cur
!= ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new
)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0"
, "../vm/vm_map.c", 2564); }); ___prev = ___cur; ___index = rbtree_d2i
(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(new)->tree_node); }); })
;
2565 next = entry->vme_nextlinks.next;
2566 kmem_cache_free(old_cache, (vm_offset_t) entry);
2567 entry = next;
2568 }
2569 }
2570
2571 /*
2572 * Adjust the addresses in the copy chain, and
2573 * reset the region attributes.
2574 */
2575
2576 adjustment = start - vm_copy_start;
2577 for (entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next);
2578 entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links);
2579 entry = entry->vme_nextlinks.next) {
2580 entry->vme_startlinks.start += adjustment;
2581 entry->vme_endlinks.end += adjustment;
2582
2583 entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1);
2584 entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02));
2585 entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
2586 entry->projected_on = 0;
2587
2588 /*
2589 * If the entry is now wired,
2590 * map the pages into the destination map.
2591 */
2592 if (entry->wired_count != 0) {
2593 vm_offset_t va;
2594 vm_offset_t offset;
2595 vm_object_t object;
2596
2597 object = entry->object.vm_object;
2598 offset = entry->offset;
2599 va = entry->vme_startlinks.start;
2600
2601 pmap_pageable(dst_map->pmap,
2602 entry->vme_startlinks.start,
2603 entry->vme_endlinks.end,
2604 TRUE((boolean_t) 1));
2605
2606 while (va < entry->vme_endlinks.end) {
2607 vm_page_t m;
2608
2609 /*
2610 * Look up the page in the object.
2611 * Assert that the page will be found in the
2612 * top object:
2613 * either
2614 * the object was newly created by
2615 * vm_object_copy_slowly, and has
2616 * copies of all of the pages from
2617 * the source object
2618 * or
2619 * the object was moved from the old
2620 * map entry; because the old map
2621 * entry was wired, all of the pages
2622 * were in the top-level object.
2623 * (XXX not true if we wire pages for
2624 * reading)
2625 */
2626 vm_object_lock(object);
2627 vm_object_paging_begin(object)((object)->paging_in_progress++);
2628
2629 m = vm_page_lookup(object, offset);
2630 if (m == VM_PAGE_NULL((vm_page_t) 0) || m->wire_count == 0 ||
2631 m->absent)
2632 panic("vm_map_copyout: wiring 0x%x", m);
2633
2634 m->busy = TRUE((boolean_t) 1);
2635 vm_object_unlock(object)((void)(&(object)->Lock));
2636
2637 PMAP_ENTER(dst_map->pmap, va, m,({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, (
entry->protection) & ~(m)->page_lock, (((boolean_t)
1)) ); })
2638 entry->protection, TRUE)({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, (
entry->protection) & ~(m)->page_lock, (((boolean_t)
1)) ); })
;
2639
2640 vm_object_lock(object);
2641 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
2642 /* the page is wired, so we don't have to activate */
2643 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/vm_map.c", 2643); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
2644 vm_object_unlock(object)((void)(&(object)->Lock));
2645
2646 offset += PAGE_SIZE(1 << 12);
2647 va += PAGE_SIZE(1 << 12);
2648 }
2649 }
2650
2651
2652 }
2653
2654 /*
2655 * Correct the page alignment for the result
2656 */
2657
2658 *dst_addr = start + (copy->offset - vm_copy_start);
2659
2660 /*
2661 * Update the hints and the map size
2662 */
2663
2664 if (dst_map->first_free == last)
2665 dst_map->first_free = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev);
2666 SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); (dst_map)->hint = (((copy)->c_u.hdr.links.prev)); ((void
)(&(dst_map)->hint_lock));
;
2667
2668 dst_map->size += size;
2669
2670 /*
2671 * Link in the copy
2672 */
2673
2674 vm_map_copy_insert(dst_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest
(&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink(
node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink
(node)) ({ struct rbtree_node *___cur, *___prev; int ___diff,
___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&
(dst_map)->hdr.tree)->root; while (___cur != ((void *) 0
)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (
!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2674
); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur
= ___cur->children[___index]; } rbtree_insert_rebalance(&
(dst_map)->hdr.tree, ___prev, ___index, node); }); (((last
)->links.next)->links.prev = ((copy)->c_u.hdr.links.
prev)) ->links.next = ((last)->links.next); ((last)->
links.next = ((copy)->c_u.hdr.links.next)) ->links.prev
= (last); (dst_map)->hdr.nentries += (copy)->c_u.hdr.nentries
; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy)
; })
;
2675
2676 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2677
2678 /*
2679 * XXX If wiring_required, call vm_map_pageable
2680 */
2681
2682 return(KERN_SUCCESS0);
2683}
2684
2685/*
2686 *
2687 * vm_map_copyout_page_list:
2688 *
2689 * Version of vm_map_copyout() for page list vm map copies.
2690 *
2691 */
2692kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
2693 vm_map_t dst_map;
2694 vm_offset_t *dst_addr; /* OUT */
2695 vm_map_copy_t copy;
2696{
2697 vm_size_t size;
2698 vm_offset_t start;
2699 vm_offset_t end;
2700 vm_offset_t offset;
2701 vm_map_entry_t last;
2702 vm_object_t object;
2703 vm_page_t *page_list, m;
2704 vm_map_entry_t entry;
2705 vm_offset_t old_last_offset;
2706 boolean_t cont_invoked, needs_wakeup = FALSE((boolean_t) 0);
2707 kern_return_t result = KERN_SUCCESS0;
2708 vm_map_copy_t orig_copy;
2709 vm_offset_t dst_offset;
2710 boolean_t must_wire;
2711
2712 /*
2713 * Make sure the pages are stolen, because we are
2714 * going to put them in a new object. Assume that
2715 * all pages are identical to first in this regard.
2716 */
2717
2718 page_list = &copy->cpy_page_listc_u.c_p.page_list[0];
2719 if ((*page_list)->tabled)
2720 vm_map_copy_steal_pages(copy);
2721
2722 /*
2723 * Find space for the data
2724 */
2725
2726 size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size
)) + ((1 << 12)-1)) & ~((1 << 12)-1)))
-
2727 trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 <<
12)-1)))
;
2728StartAgain:
2729 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2730 must_wire = dst_map->wiring_required;
2731
2732 last = dst_map->first_free;
2733 if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) {
2734 start = vm_map_min(dst_map)((dst_map)->hdr.links.start);
2735 } else {
2736 start = last->vme_endlinks.end;
2737 }
2738
2739 while (TRUE((boolean_t) 1)) {
2740 vm_map_entry_t next = last->vme_nextlinks.next;
2741 end = start + size;
2742
2743 if ((end > dst_map->max_offsethdr.links.end) || (end < start)) {
2744 if (dst_map->wait_for_space) {
2745 if (size <= (dst_map->max_offsethdr.links.end -
2746 dst_map->min_offsethdr.links.start)) {
2747 assert_wait((event_t) dst_map, TRUE((boolean_t) 1));
2748 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2749 thread_block((void (*)()) 0);
2750 goto StartAgain;
2751 }
2752 }
2753 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2754 printf_once("no more room for vm_map_copyout_page_list in %p\n", dst_map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_copyout_page_list in %p\n"
, dst_map); __once = 1; } })
;
2755 return(KERN_NO_SPACE3);
2756 }
2757
2758 if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ||
2759 (next->vme_startlinks.start >= end)) {
2760 break;
2761 }
2762
2763 last = next;
2764 start = last->vme_endlinks.end;
2765 }
2766
2767 /*
2768 * See whether we can avoid creating a new entry (and object) by
2769 * extending one of our neighbors. [So far, we only attempt to
2770 * extend from below.]
2771 *
2772 * The code path below here is a bit twisted. If any of the
2773 * extension checks fails, we branch to create_object. If
2774 * it all works, we fall out the bottom and goto insert_pages.
2775 */
2776 if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links) ||
2777 last->vme_endlinks.end != start ||
2778 last->is_shared != FALSE((boolean_t) 0) ||
2779 last->is_sub_map != FALSE((boolean_t) 0) ||
2780 last->inheritance != VM_INHERIT_DEFAULT((vm_inherit_t) 1) ||
2781 last->protection != VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)) ||
2782 last->max_protection != VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) ||
2783 (must_wire ? (last->wired_count != 1 ||
2784 last->user_wired_count != 1) :
2785 (last->wired_count != 0))) {
2786 goto create_object;
2787 }
2788
2789 /*
2790 * If this entry needs an object, make one.
2791 */
2792 if (last->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) {
2793 object = vm_object_allocate(
2794 (vm_size_t)(last->vme_endlinks.end - last->vme_startlinks.start + size));
2795 last->object.vm_object = object;
2796 last->offset = 0;
2797 vm_object_lock(object);
2798 }
2799 else {
2800 vm_offset_t prev_offset = last->offset;
2801 vm_size_t prev_size = start - last->vme_startlinks.start;
2802 vm_size_t new_size;
2803
2804 /*
2805 * This is basically vm_object_coalesce.
2806 */
2807
2808 object = last->object.vm_object;
2809 vm_object_lock(object);
2810
2811 /*
2812 * Try to collapse the object first
2813 */
2814 vm_object_collapse(object);
2815
2816 /*
2817 * Can't coalesce if pages not mapped to
2818 * last may be in use anyway:
2819 * . more than one reference
2820 * . paged out
2821 * . shadows another object
2822 * . has a copy elsewhere
2823 * . paging references (pages might be in page-list)
2824 */
2825
2826 if ((object->ref_count > 1) ||
2827 object->pager_created ||
2828 (object->shadow != VM_OBJECT_NULL((vm_object_t) 0)) ||
2829 (object->copy != VM_OBJECT_NULL((vm_object_t) 0)) ||
2830 (object->paging_in_progress != 0)) {
2831 vm_object_unlock(object)((void)(&(object)->Lock));
2832 goto create_object;
2833 }
2834
2835 /*
2836 * Extend the object if necessary. Don't have to call
2837 * vm_object_page_remove because the pages aren't mapped,
2838 * and vm_page_replace will free up any old ones it encounters.
2839 */
2840 new_size = prev_offset + prev_size + size;
2841 if (new_size > object->size)
2842 object->size = new_size;
2843 }
2844
2845 /*
2846 * Coalesced the two objects - can extend
2847 * the previous map entry to include the
2848 * new range.
2849 */
2850 dst_map->size += size;
2851 last->vme_endlinks.end = end;
2852
2853 SAVE_HINT(dst_map, last); (dst_map)->hint = (last); ((void)(&(dst_map)->hint_lock
));
;
2854
2855 goto insert_pages;
2856
2857create_object:
2858
2859 /*
2860 * Create object
2861 */
2862 object = vm_object_allocate(size);
2863
2864 /*
2865 * Create entry
2866 */
2867
2868 entry = vm_map_entry_create(dst_map)_vm_map_entry_create(&(dst_map)->hdr);
2869
2870 entry->object.vm_object = object;
2871 entry->offset = 0;
2872
2873 entry->is_shared = FALSE((boolean_t) 0);
2874 entry->is_sub_map = FALSE((boolean_t) 0);
2875 entry->needs_copy = FALSE((boolean_t) 0);
2876
2877 if (must_wire) {
2878 entry->wired_count = 1;
2879 entry->user_wired_count = 1;
2880 } else {
2881 entry->wired_count = 0;
2882 entry->user_wired_count = 0;
2883 }
2884
2885 entry->in_transition = TRUE((boolean_t) 1);
2886 entry->needs_wakeup = FALSE((boolean_t) 0);
2887
2888 entry->vme_startlinks.start = start;
2889 entry->vme_endlinks.end = start + size;
2890
2891 entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1);
2892 entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02));
2893 entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
2894 entry->projected_on = 0;
2895
2896 vm_object_lock(object);
2897
2898 /*
2899 * Update the hints and the map size
2900 */
2901 if (dst_map->first_free == last) {
2902 dst_map->first_free = entry;
2903 }
2904 SAVE_HINT(dst_map, entry); (dst_map)->hint = (entry); ((void)(&(dst_map)->hint_lock
));
;
2905 dst_map->size += size;
2906
2907 /*
2908 * Link in the entry
2909 */
2910 vm_map_entry_link(dst_map, last, entry)({ (&(dst_map)->hdr)->nentries++; (entry)->links
.prev = (last); (entry)->links.next = (last)->links.next
; (entry)->links.prev->links.next = (entry)->links.next
->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(dst_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(entry)->tree_node, ___cur); ({ if (!(___diff != 0))
Assert("___diff != 0", "../vm/vm_map.c", 2910); }); ___prev =
___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(dst_map
)->hdr)->tree, ___prev, ___index, &(entry)->tree_node
); }); })
;
2911 last = entry;
2912
2913 /*
2914 * Transfer pages into new object.
2915 * Scan page list in vm_map_copy.
2916 */
2917insert_pages:
2918 dst_offset = copy->offset & PAGE_MASK((1 << 12)-1);
2919 cont_invoked = FALSE((boolean_t) 0);
2920 orig_copy = copy;
2921 last->in_transition = TRUE((boolean_t) 1);
2922 old_last_offset = last->offset
2923 + (start - last->vme_startlinks.start);
2924
2925 vm_page_lock_queues();
2926
2927 for (offset = 0; offset < size; offset += PAGE_SIZE(1 << 12)) {
2928 m = *page_list;
2929 assert(m && !m->tabled)({ if (!(m && !m->tabled)) Assert("m && !m->tabled"
, "../vm/vm_map.c", 2929); })
;
2930
2931 /*
2932 * Must clear busy bit in page before inserting it.
2933 * Ok to skip wakeup logic because nobody else
2934 * can possibly know about this page.
2935 * The page is dirty in its new object.
2936 */
2937
2938 assert(!m->wanted)({ if (!(!m->wanted)) Assert("!m->wanted", "../vm/vm_map.c"
, 2938); })
;
2939
2940 m->busy = FALSE((boolean_t) 0);
2941 m->dirty = TRUE((boolean_t) 1);
2942 vm_page_replace(m, object, old_last_offset + offset);
2943 if (must_wire) {
2944 vm_page_wire(m);
2945 PMAP_ENTER(dst_map->pmap,({ pmap_enter( (dst_map->pmap), (last->links.start + m->
offset - last->offset), (m)->phys_addr, (last->protection
) & ~(m)->page_lock, (((boolean_t) 1)) ); })
2946 last->vme_start + m->offset - last->offset,({ pmap_enter( (dst_map->pmap), (last->links.start + m->
offset - last->offset), (m)->phys_addr, (last->protection
) & ~(m)->page_lock, (((boolean_t) 1)) ); })
2947 m, last->protection, TRUE)({ pmap_enter( (dst_map->pmap), (last->links.start + m->
offset - last->offset), (m)->phys_addr, (last->protection
) & ~(m)->page_lock, (((boolean_t) 1)) ); })
;
2948 } else {
2949 vm_page_activate(m);
2950 }
2951
2952 *page_list++ = VM_PAGE_NULL((vm_page_t) 0);
2953 if (--(copy->cpy_npagesc_u.c_p.npages) == 0 &&
2954 vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
2955 vm_map_copy_t new_copy;
2956
2957 /*
2958 * Ok to unlock map because entry is
2959 * marked in_transition.
2960 */
2961 cont_invoked = TRUE((boolean_t) 1);
2962 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
2963 vm_object_unlock(object)((void)(&(object)->Lock));
2964 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
2965 vm_map_copy_invoke_cont(copy, &new_copy, &result)({ vm_map_copy_page_discard(copy); *&result = (*((copy)->
c_u.c_p.cont))((copy)->c_u.c_p.cont_args, &new_copy); (
copy)->c_u.c_p.cont = (kern_return_t (*)()) 0; })
;
2966
2967 if (result == KERN_SUCCESS0) {
2968
2969 /*
2970 * If we got back a copy with real pages,
2971 * steal them now. Either all of the
2972 * pages in the list are tabled or none
2973 * of them are; mixtures are not possible.
2974 *
2975 * Save original copy for consume on
2976 * success logic at end of routine.
2977 */
2978 if (copy != orig_copy)
2979 vm_map_copy_discard(copy);
2980
2981 if ((copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0)) {
2982 page_list = &copy->cpy_page_listc_u.c_p.page_list[0];
2983 if ((*page_list)->tabled)
2984 vm_map_copy_steal_pages(copy);
2985 }
2986 }
2987 else {
2988 /*
2989 * Continuation failed.
2990 */
2991 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2992 goto error;
2993 }
2994
2995 vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp
++; })
;
2996 vm_object_lock(object);
2997 vm_page_lock_queues();
2998 }
2999 }
3000
3001 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
3002 vm_object_unlock(object)((void)(&(object)->Lock));
3003
3004 *dst_addr = start + dst_offset;
3005
3006 /*
3007 * Clear the in transition bits. This is easy if we
3008 * didn't have a continuation.
3009 */
3010error:
3011 if (!cont_invoked) {
3012 /*
3013 * We didn't unlock the map, so nobody could
3014 * be waiting.
3015 */
3016 last->in_transition = FALSE((boolean_t) 0);
3017 assert(!last->needs_wakeup)({ if (!(!last->needs_wakeup)) Assert("!last->needs_wakeup"
, "../vm/vm_map.c", 3017); })
;
3018 needs_wakeup = FALSE((boolean_t) 0);
3019 }
3020 else {
3021 if (!vm_map_lookup_entry(dst_map, start, &entry))
3022 panic("vm_map_copyout_page_list: missing entry");
3023
3024 /*
3025 * Clear transition bit for all constituent entries that
3026 * were in the original entry. Also check for waiters.
3027 */
3028 while((entry != vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) &&
3029 (entry->vme_startlinks.start < end)) {
3030 assert(entry->in_transition)({ if (!(entry->in_transition)) Assert("entry->in_transition"
, "../vm/vm_map.c", 3030); })
;
3031 entry->in_transition = FALSE((boolean_t) 0);
3032 if(entry->needs_wakeup) {
3033 entry->needs_wakeup = FALSE((boolean_t) 0);
3034 needs_wakeup = TRUE((boolean_t) 1);
3035 }
3036 entry = entry->vme_nextlinks.next;
3037 }
3038 }
3039
3040 if (result != KERN_SUCCESS0)
3041 vm_map_delete(dst_map, start, end);
3042
3043 vm_map_unlock(dst_map)lock_done(&(dst_map)->lock);
3044
3045 if (needs_wakeup)
3046 vm_map_entry_wakeup(dst_map)thread_wakeup_prim(((event_t)&(dst_map)->hdr), ((boolean_t
) 0), 0)
;
3047
3048 /*
3049 * Consume on success logic.
3050 */
3051 if (copy != orig_copy) {
3052 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
3053 }
3054 if (result == KERN_SUCCESS0) {
3055 kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy);
3056 }
3057
3058 return(result);
3059}
3060
3061/*
3062 * Routine: vm_map_copyin
3063 *
3064 * Description:
3065 * Copy the specified region (src_addr, len) from the
3066 * source address space (src_map), possibly removing
3067 * the region from the source address space (src_destroy).
3068 *
3069 * Returns:
3070 * A vm_map_copy_t object (copy_result), suitable for
3071 * insertion into another address space (using vm_map_copyout),
3072 * copying over another address space region (using
3073 * vm_map_copy_overwrite). If the copy is unused, it
3074 * should be destroyed (using vm_map_copy_discard).
3075 *
3076 * In/out conditions:
3077 * The source map should not be locked on entry.
3078 */
3079kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
3080 vm_map_t src_map;
3081 vm_offset_t src_addr;
3082 vm_size_t len;
3083 boolean_t src_destroy;
3084 vm_map_copy_t *copy_result; /* OUT */
3085{
3086 vm_map_entry_t tmp_entry; /* Result of last map lookup --
3087 * in multi-level lookup, this
3088 * entry contains the actual
3089 * vm_object/offset.
3090 */
3091
3092 vm_offset_t src_start; /* Start of current entry --
3093 * where copy is taking place now
3094 */
3095 vm_offset_t src_end; /* End of entire region to be
3096 * copied */
3097
3098 vm_map_copy_t copy; /* Resulting copy */
3099
3100 /*
3101 * Check for copies of zero bytes.
3102 */
3103
3104 if (len == 0) {
3105 *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0);
3106 return(KERN_SUCCESS0);
3107 }
3108
3109 /*
3110 * Compute start and end of region
3111 */
3112
3113 src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12
)-1)))
;
3114 src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 <<
12)-1)) & ~((1 << 12)-1)))
;
3115
3116 /*
3117 * Check that the end address doesn't overflow
3118 */
3119
3120 if (src_end <= src_start)
3121 if ((src_end < src_start) || (src_start != 0))
3122 return(KERN_INVALID_ADDRESS1);
3123
3124 /*
3125 * Allocate a header element for the list.
3126 *
3127 * Use the start and end in the header to
3128 * remember the endpoints prior to rounding.
3129 */
3130
3131 copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
3132 vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) =
3133 vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links);
3134 copy->type = VM_MAP_COPY_ENTRY_LIST1;
3135 copy->cpy_hdrc_u.hdr.nentries = 0;
3136 copy->cpy_hdrc_u.hdr.entries_pageable = TRUE((boolean_t) 1);
3137 rbtree_init(&copy->cpy_hdrc_u.hdr.tree);
3138
3139 copy->offset = src_addr;
3140 copy->size = len;
3141
3142#define RETURN(x) \
3143 MACRO_BEGIN({ \
3144 vm_map_unlock(src_map)lock_done(&(src_map)->lock); \
3145 vm_map_copy_discard(copy); \
3146 MACRO_RETURNif (((boolean_t) 1)) return(x); \
3147 MACRO_END})
3148
3149 /*
3150 * Find the beginning of the region.
3151 */
3152
3153 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
3154
3155 if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry))
3156 RETURN(KERN_INVALID_ADDRESS1);
3157 vm_map_clip_start(src_map, tmp_entry, src_start)({ if ((src_start) > (tmp_entry)->links.start) _vm_map_clip_start
(&(src_map)->hdr,(tmp_entry),(src_start)); })
;
3158
3159 /*
3160 * Go through entries until we get to the end.
3161 */
3162
3163 while (TRUE((boolean_t) 1)) {
3164 vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
3165 vm_size_t src_size; /* Size of source
3166 * map entry (in both
3167 * maps)
3168 */
3169
3170 vm_object_t src_object; /* Object to copy */
3171 vm_offset_t src_offset;
3172
3173 boolean_t src_needs_copy; /* Should source map
3174 * be made read-only
3175 * for copy-on-write?
3176 */
3177
3178 vm_map_entry_t new_entry; /* Map entry for copy */
3179 boolean_t new_entry_needs_copy; /* Will new entry be COW? */
3180
3181 boolean_t was_wired; /* Was source wired? */
3182 vm_map_version_t version; /* Version before locks
3183 * dropped to make copy
3184 */
3185
3186 /*
3187 * Verify that the region can be read.
3188 */
3189
3190 if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01)))
3191 RETURN(KERN_PROTECTION_FAILURE2);
3192
3193 /*
3194 * Clip against the endpoints of the entire region.
3195 */
3196
3197 vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end
(&(src_map)->hdr,(src_entry),(src_end)); })
;
3198
3199 src_size = src_entry->vme_endlinks.end - src_start;
3200 src_object = src_entry->object.vm_object;
3201 src_offset = src_entry->offset;
3202 was_wired = (src_entry->wired_count != 0);
3203
3204 /*
3205 * Create a new address map entry to
3206 * hold the result. Fill in the fields from
3207 * the appropriate source entries.
3208 */
3209
3210 new_entry = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr);
3211 vm_map_entry_copy(new_entry, src_entry)({ *(new_entry) = *(src_entry); (new_entry)->is_shared = (
(boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0
); (new_entry)->in_transition = ((boolean_t) 0); (new_entry
)->wired_count = 0; (new_entry)->user_wired_count = 0; }
)
;
3212
3213 /*
3214 * Attempt non-blocking copy-on-write optimizations.
3215 */
3216
3217 if (src_destroy &&
3218 (src_object == VM_OBJECT_NULL((vm_object_t) 0) ||
3219 (src_object->temporary && !src_object->use_shared_copy)))
3220 {
3221 /*
3222 * If we are destroying the source, and the object
3223 * is temporary, and not shared writable,
3224 * we can move the object reference
3225 * from the source to the copy. The copy is
3226 * copy-on-write only if the source is.
3227 * We make another reference to the object, because
3228 * destroying the source entry will deallocate it.
3229 */
3230 vm_object_reference(src_object);
3231
3232 /*
3233 * Copy is always unwired. vm_map_copy_entry
3234 * set its wired count to zero.
3235 */
3236
3237 goto CopySuccessful;
3238 }
3239
3240 if (!was_wired &&
3241 vm_object_copy_temporary(
3242 &new_entry->object.vm_object,
3243 &new_entry->offset,
3244 &src_needs_copy,
3245 &new_entry_needs_copy)) {
3246
3247 new_entry->needs_copy = new_entry_needs_copy;
3248
3249 /*
3250 * Handle copy-on-write obligations
3251 */
3252
3253 if (src_needs_copy && !tmp_entry->needs_copy) {
3254 vm_object_pmap_protect(
3255 src_object,
3256 src_offset,
3257 src_size,
3258 (src_entry->is_shared ? PMAP_NULL((pmap_t) 0)
3259 : src_map->pmap),
3260 src_entry->vme_startlinks.start,
3261 src_entry->protection &
3262 ~VM_PROT_WRITE((vm_prot_t) 0x02));
3263
3264 tmp_entry->needs_copy = TRUE((boolean_t) 1);
3265 }
3266
3267 /*
3268 * The map has never been unlocked, so it's safe to
3269 * move to the next entry rather than doing another
3270 * lookup.
3271 */
3272
3273 goto CopySuccessful;
3274 }
3275
3276 new_entry->needs_copy = FALSE((boolean_t) 0);
3277
3278 /*
3279 * Take an object reference, so that we may
3280 * release the map lock(s).
3281 */
3282
3283 assert(src_object != VM_OBJECT_NULL)({ if (!(src_object != ((vm_object_t) 0))) Assert("src_object != VM_OBJECT_NULL"
, "../vm/vm_map.c", 3283); })
;
3284 vm_object_reference(src_object);
3285
3286 /*
3287 * Record the timestamp for later verification.
3288 * Unlock the map.
3289 */
3290
3291 version.main_timestamp = src_map->timestamp;
3292 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
3293
3294 /*
3295 * Perform the copy
3296 */
3297
3298 if (was_wired) {
3299 vm_object_lock(src_object);
3300 (void) vm_object_copy_slowly(
3301 src_object,
3302 src_offset,
3303 src_size,
3304 FALSE((boolean_t) 0),
3305 &new_entry->object.vm_object);
3306 new_entry->offset = 0;
3307 new_entry->needs_copy = FALSE((boolean_t) 0);
3308 } else {
3309 kern_return_t result;
3310
3311 result = vm_object_copy_strategically(src_object,
3312 src_offset,
3313 src_size,
3314 &new_entry->object.vm_object,
3315 &new_entry->offset,
3316 &new_entry_needs_copy);
3317
3318 new_entry->needs_copy = new_entry_needs_copy;
3319
3320
3321 if (result != KERN_SUCCESS0) {
3322 vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry));
3323
3324 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
3325 RETURN(result);
3326 }
3327
3328 }
3329
3330 /*
3331 * Throw away the extra reference
3332 */
3333
3334 vm_object_deallocate(src_object);
3335
3336 /*
3337 * Verify that the map has not substantially
3338 * changed while the copy was being made.
3339 */
3340
3341 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
; /* Increments timestamp once! */
3342
3343 if ((version.main_timestamp + 1) == src_map->timestamp)
3344 goto CopySuccessful;
3345
3346 /*
3347 * Simple version comparison failed.
3348 *
3349 * Retry the lookup and verify that the
3350 * same object/offset are still present.
3351 *
3352 * [Note: a memory manager that colludes with
3353 * the calling task can detect that we have
3354 * cheated. While the map was unlocked, the
3355 * mapping could have been changed and restored.]
3356 */
3357
3358 if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) {
3359 vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry));
3360 RETURN(KERN_INVALID_ADDRESS1);
3361 }
3362
3363 src_entry = tmp_entry;
3364 vm_map_clip_start(src_map, src_entry, src_start)({ if ((src_start) > (src_entry)->links.start) _vm_map_clip_start
(&(src_map)->hdr,(src_entry),(src_start)); })
;
3365
3366 if ((src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01)) == VM_PROT_NONE((vm_prot_t) 0x00))
3367 goto VerificationFailed;
3368
3369 if (src_entry->vme_endlinks.end < new_entry->vme_endlinks.end)
3370 src_size = (new_entry->vme_endlinks.end = src_entry->vme_endlinks.end) - src_start;
Value stored to 'src_size' is never read
3371
3372 if ((src_entry->object.vm_object != src_object) ||
3373 (src_entry->offset != src_offset) ) {
3374
3375 /*
3376 * Verification failed.
3377 *
3378 * Start over with this top-level entry.
3379 */
3380
3381 VerificationFailed: ;
3382
3383 vm_object_deallocate(new_entry->object.vm_object);
3384 vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry));
3385 tmp_entry = src_entry;
3386 continue;
3387 }
3388
3389 /*
3390 * Verification succeeded.
3391 */
3392
3393 CopySuccessful: ;
3394
3395 /*
3396 * Link in the new copy entry.
3397 */
3398
3399 vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new_entry)->
links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)->
links.next = (((copy)->c_u.hdr.links.prev))->links.next
; (new_entry)->links.prev->links.next = (new_entry)->
links.next->links.prev = (new_entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 3400); }); ___prev = ___cur; ___index = rbtree_d2i(___diff)
; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(new_entry)->tree_node); }); })
3400 new_entry)({ (&(copy)->c_u.hdr)->nentries++; (new_entry)->
links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)->
links.next = (((copy)->c_u.hdr.links.prev))->links.next
; (new_entry)->links.prev->links.next = (new_entry)->
links.next->links.prev = (new_entry); ({ struct rbtree_node
*___cur, *___prev; int ___diff, ___index; ___prev = ((void *
) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr)
->tree)->root; while (___cur != ((void *) 0)) { ___diff
= vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur
); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c"
, 3400); }); ___prev = ___cur; ___index = rbtree_d2i(___diff)
; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance
(&(&(copy)->c_u.hdr)->tree, ___prev, ___index, &
(new_entry)->tree_node); }); })
;
3401
3402 /*
3403 * Determine whether the entire region
3404 * has been copied.
3405 */
3406 src_start = new_entry->vme_endlinks.end;
3407 if ((src_start >= src_end) && (src_end != 0))
3408 break;
3409
3410 /*
3411 * Verify that there are no gaps in the region
3412 */
3413
3414 tmp_entry = src_entry->vme_nextlinks.next;
3415 if (tmp_entry->vme_startlinks.start != src_start)
3416 RETURN(KERN_INVALID_ADDRESS1);
3417 }
3418
3419 /*
3420 * If the source should be destroyed, do it now, since the
3421 * copy was successful.
3422 */
3423 if (src_destroy)
3424 (void) vm_map_delete(src_map, trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12
)-1)))
, src_end);
3425
3426 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
3427
3428 *copy_result = copy;
3429 return(KERN_SUCCESS0);
3430
3431#undef RETURN
3432}
3433
3434/*
3435 * vm_map_copyin_object:
3436 *
3437 * Create a copy object from an object.
3438 * Our caller donates an object reference.
3439 */
3440
3441kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
3442 vm_object_t object;
3443 vm_offset_t offset; /* offset of region in object */
3444 vm_size_t size; /* size of region in object */
3445 vm_map_copy_t *copy_result; /* OUT */
3446{
3447 vm_map_copy_t copy; /* Resulting copy */
3448
3449 /*
3450 * We drop the object into a special copy object
3451 * that contains the object directly. These copy objects
3452 * are distinguished by entries_pageable == FALSE
3453 * and null links.
3454 */
3455
3456 copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
3457 vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) =
3458 vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = VM_MAP_ENTRY_NULL((vm_map_entry_t) 0);
3459 copy->type = VM_MAP_COPY_OBJECT2;
3460 copy->cpy_objectc_u.c_o.object = object;
3461 copy->offset = offset;
3462 copy->size = size;
3463
3464 *copy_result = copy;
3465 return(KERN_SUCCESS0);
3466}
3467
3468/*
3469 * vm_map_copyin_page_list_cont:
3470 *
3471 * Continuation routine for vm_map_copyin_page_list.
3472 *
3473 * If vm_map_copyin_page_list can't fit the entire vm range
3474 * into a single page list object, it creates a continuation.
3475 * When the target of the operation has used the pages in the
3476 * initial page list, it invokes the continuation, which calls
3477 * this routine. If an error happens, the continuation is aborted
3478 * (abort arg to this routine is TRUE). To avoid deadlocks, the
3479 * pages are discarded from the initial page list before invoking
3480 * the continuation.
3481 *
3482 * NOTE: This is not the same sort of continuation used by
3483 * the scheduler.
3484 */
3485
3486kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
3487vm_map_copyin_args_t cont_args;
3488vm_map_copy_t *copy_result; /* OUT */
3489{
3490 kern_return_t result = 0; /* '=0' to quiet gcc warnings */
3491 boolean_t do_abort, src_destroy, src_destroy_only;
3492
3493 /*
3494 * Check for cases that only require memory destruction.
3495 */
3496 do_abort = (copy_result == (vm_map_copy_t *) 0);
3497 src_destroy = (cont_args->destroy_len != (vm_size_t) 0);
3498 src_destroy_only = (cont_args->src_len == (vm_size_t) 0);
3499
3500 if (do_abort || src_destroy_only) {
3501 if (src_destroy)
3502 result = vm_map_remove(cont_args->map,
3503 cont_args->destroy_addr,
3504 cont_args->destroy_addr + cont_args->destroy_len);
3505 if (!do_abort)
3506 *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0);
3507 }
3508 else {
3509 result = vm_map_copyin_page_list(cont_args->map,
3510 cont_args->src_addr, cont_args->src_len, src_destroy,
3511 cont_args->steal_pages, copy_result, TRUE((boolean_t) 1));
3512
3513 if (src_destroy && !cont_args->steal_pages &&
3514 vm_map_copy_has_cont(*copy_result)(((*copy_result)->c_u.c_p.cont) != (kern_return_t (*)()) 0
)
) {
3515 vm_map_copyin_args_t new_args;
3516 /*
3517 * Transfer old destroy info.
3518 */
3519 new_args = (vm_map_copyin_args_t)
3520 (*copy_result)->cpy_cont_argsc_u.c_p.cont_args;
3521 new_args->destroy_addr = cont_args->destroy_addr;
3522 new_args->destroy_len = cont_args->destroy_len;
3523 }
3524 }
3525
3526 vm_map_deallocate(cont_args->map);
3527 kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t));
3528
3529 return(result);
3530}
3531
3532/*
3533 * vm_map_copyin_page_list:
3534 *
3535 * This is a variant of vm_map_copyin that copies in a list of pages.
3536 * If steal_pages is TRUE, the pages are only in the returned list.
3537 * If steal_pages is FALSE, the pages are busy and still in their
3538 * objects. A continuation may be returned if not all the pages fit:
3539 * the recipient of this copy_result must be prepared to deal with it.
3540 */
3541
3542kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
3543 steal_pages, copy_result, is_cont)
3544 vm_map_t src_map;
3545 vm_offset_t src_addr;
3546 vm_size_t len;
3547 boolean_t src_destroy;
3548 boolean_t steal_pages;
3549 vm_map_copy_t *copy_result; /* OUT */
3550 boolean_t is_cont;
3551{
3552 vm_map_entry_t src_entry;
3553 vm_page_t m;
3554 vm_offset_t src_start;
3555 vm_offset_t src_end;
3556 vm_size_t src_size;
3557 vm_object_t src_object;
3558 vm_offset_t src_offset;
3559 vm_offset_t src_last_offset;
3560 vm_map_copy_t copy; /* Resulting copy */
3561 kern_return_t result = KERN_SUCCESS0;
3562 boolean_t need_map_lookup;
3563 vm_map_copyin_args_t cont_args;
3564
3565 /*
3566 * If steal_pages is FALSE, this leaves busy pages in
3567 * the object. A continuation must be used if src_destroy
3568 * is true in this case (!steal_pages && src_destroy).
3569 *
3570 * XXX Still have a more general problem of what happens
3571 * XXX if the same page occurs twice in a list. Deadlock
3572 * XXX can happen if vm_fault_page was called. A
3573 * XXX possible solution is to use a continuation if vm_fault_page
3574 * XXX is called and we cross a map entry boundary.
3575 */
3576
3577 /*
3578 * Check for copies of zero bytes.
3579 */
3580
3581 if (len == 0) {
3582 *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0);
3583 return(KERN_SUCCESS0);
3584 }
3585
3586 /*
3587 * Compute start and end of region
3588 */
3589
3590 src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12
)-1)))
;
3591 src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 <<
12)-1)) & ~((1 << 12)-1)))
;
3592
3593 /*
3594 * Check that the end address doesn't overflow
3595 */
3596
3597 if (src_end <= src_start && (src_end < src_start || src_start != 0)) {
3598 return KERN_INVALID_ADDRESS1;
3599 }
3600
3601 /*
3602 * Allocate a header element for the page list.
3603 *
3604 * Record original offset and size, as caller may not
3605 * be page-aligned.
3606 */
3607
3608 copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache);
3609 copy->type = VM_MAP_COPY_PAGE_LIST3;
3610 copy->cpy_npagesc_u.c_p.npages = 0;
3611 copy->offset = src_addr;
3612 copy->size = len;
3613 copy->cpy_contc_u.c_p.cont = ((kern_return_t (*)()) 0);
3614 copy->cpy_cont_argsc_u.c_p.cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL((vm_map_copyin_args_t) 0);
3615
3616 /*
3617 * Find the beginning of the region.
3618 */
3619
3620do_map_lookup:
3621
3622 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
3623
3624 if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) {
3625 result = KERN_INVALID_ADDRESS1;
3626 goto error;
3627 }
3628 need_map_lookup = FALSE((boolean_t) 0);
3629
3630 /*
3631 * Go through entries until we get to the end.
3632 */
3633
3634 while (TRUE((boolean_t) 1)) {
3635
3636 if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) {
3637 result = KERN_PROTECTION_FAILURE2;
3638 goto error;
3639 }
3640
3641 if (src_end > src_entry->vme_endlinks.end)
3642 src_size = src_entry->vme_endlinks.end - src_start;
3643 else
3644 src_size = src_end - src_start;
3645
3646 src_object = src_entry->object.vm_object;
3647 src_offset = src_entry->offset +
3648 (src_start - src_entry->vme_startlinks.start);
3649
3650 /*
3651 * If src_object is NULL, allocate it now;
3652 * we're going to fault on it shortly.
3653 */
3654 if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) {
3655 src_object = vm_object_allocate((vm_size_t)
3656 src_entry->vme_endlinks.end -
3657 src_entry->vme_startlinks.start);
3658 src_entry->object.vm_object = src_object;
3659 }
3660
3661 /*
3662 * Iterate over pages. Fault in ones that aren't present.
3663 */
3664 src_last_offset = src_offset + src_size;
3665 for (; (src_offset < src_last_offset && !need_map_lookup);
3666 src_offset += PAGE_SIZE(1 << 12), src_start += PAGE_SIZE(1 << 12)) {
3667
3668 if (copy->cpy_npagesc_u.c_p.npages == VM_MAP_COPY_PAGE_LIST_MAX64) {
3669make_continuation:
3670 /*
3671 * At this point we have the max number of
3672 * pages busy for this thread that we're
3673 * willing to allow. Stop here and record
3674 * arguments for the remainder. Note:
3675 * this means that this routine isn't atomic,
3676 * but that's the breaks. Note that only
3677 * the first vm_map_copy_t that comes back
3678 * from this routine has the right offset
3679 * and size; those from continuations are
3680 * page rounded, and short by the amount
3681 * already done.
3682 *
3683 * Reset src_end so the src_destroy
3684 * code at the bottom doesn't do
3685 * something stupid.
3686 */
3687
3688 cont_args = (vm_map_copyin_args_t)
3689 kalloc(sizeof(vm_map_copyin_args_data_t));
3690 cont_args->map = src_map;
3691 vm_map_reference(src_map);
3692 cont_args->src_addr = src_start;
3693 cont_args->src_len = len - (src_start - src_addr);
3694 if (src_destroy) {
3695 cont_args->destroy_addr = cont_args->src_addr;
3696 cont_args->destroy_len = cont_args->src_len;
3697 }
3698 else {
3699 cont_args->destroy_addr = (vm_offset_t) 0;
3700 cont_args->destroy_len = (vm_offset_t) 0;
3701 }
3702 cont_args->steal_pages = steal_pages;
3703
3704 copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args;
3705 copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont;
3706
3707 src_end = src_start;
3708 vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end
(&(src_map)->hdr,(src_entry),(src_end)); })
;
3709 break;
3710 }
3711
3712 /*
3713 * Try to find the page of data.
3714 */
3715 vm_object_lock(src_object);
3716 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
3717 if (((m = vm_page_lookup(src_object, src_offset)) !=
3718 VM_PAGE_NULL((vm_page_t) 0)) && !m->busy && !m->fictitious &&
3719 !m->absent && !m->error) {
3720
3721 /*
3722 * This is the page. Mark it busy
3723 * and keep the paging reference on
3724 * the object whilst we do our thing.
3725 */
3726 m->busy = TRUE((boolean_t) 1);
3727
3728 /*
3729 * Also write-protect the page, so
3730 * that the map`s owner cannot change
3731 * the data. The busy bit will prevent
3732 * faults on the page from succeeding
3733 * until the copy is released; after
3734 * that, the page can be re-entered
3735 * as writable, since we didn`t alter
3736 * the map entry. This scheme is a
3737 * cheap copy-on-write.
3738 *
3739 * Don`t forget the protection and
3740 * the page_lock value!
3741 *
3742 * If the source is being destroyed
3743 * AND not shared writable, we don`t
3744 * have to protect the page, since
3745 * we will destroy the (only)
3746 * writable mapping later.
3747 */
3748 if (!src_destroy ||
3749 src_object->use_shared_copy)
3750 {
3751 pmap_page_protect(m->phys_addr,
3752 src_entry->protection
3753 & ~m->page_lock
3754 & ~VM_PROT_WRITE((vm_prot_t) 0x02));
3755 }
3756
3757 }
3758 else {
3759 vm_prot_t result_prot;
3760 vm_page_t top_page;
3761 kern_return_t kr;
3762
3763 /*
3764 * Have to fault the page in; must
3765 * unlock the map to do so. While
3766 * the map is unlocked, anything
3767 * can happen, we must lookup the
3768 * map entry before continuing.
3769 */
3770 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
3771 need_map_lookup = TRUE((boolean_t) 1);
3772retry:
3773 result_prot = VM_PROT_READ((vm_prot_t) 0x01);
3774
3775 kr = vm_fault_page(src_object, src_offset,
3776 VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), FALSE((boolean_t) 0),
3777 &result_prot, &m, &top_page,
3778 FALSE((boolean_t) 0), (void (*)()) 0);
3779 /*
3780 * Cope with what happened.
3781 */
3782 switch (kr) {
3783 case VM_FAULT_SUCCESS0:
3784 break;
3785 case VM_FAULT_INTERRUPTED2: /* ??? */
3786 case VM_FAULT_RETRY1:
3787 vm_object_lock(src_object);
3788 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
3789 goto retry;
3790 case VM_FAULT_MEMORY_SHORTAGE3:
3791 VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0);
3792 vm_object_lock(src_object);
3793 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
3794 goto retry;
3795 case VM_FAULT_FICTITIOUS_SHORTAGE4:
3796 vm_page_more_fictitious();
3797 vm_object_lock(src_object);
3798 vm_object_paging_begin(src_object)((src_object)->paging_in_progress++);
3799 goto retry;
3800 case VM_FAULT_MEMORY_ERROR5:
3801 /*
3802 * Something broke. If this
3803 * is a continuation, return
3804 * a partial result if possible,
3805 * else fail the whole thing.
3806 * In the continuation case, the
3807 * next continuation call will
3808 * get this error if it persists.
3809 */
3810 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
3811 if (is_cont &&
3812 copy->cpy_npagesc_u.c_p.npages != 0)
3813 goto make_continuation;
3814
3815 result = KERN_MEMORY_ERROR10;
3816 goto error;
3817 }
3818
3819 if (top_page != VM_PAGE_NULL((vm_page_t) 0)) {
3820 vm_object_lock(src_object);
3821 VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ((void)(&vm_page_queue_lock)
); })
;
3822 vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert
("(src_object)->paging_in_progress != 0", "../vm/vm_map.c"
, 3822); }); if (--(src_object)->paging_in_progress == 0) {
({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim
(((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0
), 0); (src_object)->all_wanted &= ~(1 << (2)); }
); } })
;
3823 vm_object_unlock(src_object)((void)(&(src_object)->Lock));
3824 }
3825
3826 /*
3827 * We do not need to write-protect
3828 * the page, since it cannot have
3829 * been in the pmap (and we did not
3830 * enter it above). The busy bit
3831 * will protect the page from being
3832 * entered as writable until it is
3833 * unlocked.
3834 */
3835
3836 }
3837
3838 /*
3839 * The page is busy, its object is locked, and
3840 * we have a paging reference on it. Either
3841 * the map is locked, or need_map_lookup is
3842 * TRUE.
3843 *
3844 * Put the page in the page list.
3845 */
3846 copy->cpy_page_listc_u.c_p.page_list[copy->cpy_npagesc_u.c_p.npages++] = m;
3847 vm_object_unlock(m->object)((void)(&(m->object)->Lock));
3848 }
3849
3850 /*
3851 * DETERMINE whether the entire region
3852 * has been copied.
3853 */
3854 if (src_start >= src_end && src_end != 0) {
3855 if (need_map_lookup)
3856 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
3857 break;
3858 }
3859
3860 /*
3861 * If need_map_lookup is TRUE, have to start over with
3862 * another map lookup. Note that we dropped the map
3863 * lock (to call vm_fault_page) above only in this case.
3864 */
3865 if (need_map_lookup)
3866 goto do_map_lookup;
3867
3868 /*
3869 * Verify that there are no gaps in the region
3870 */
3871
3872 src_start = src_entry->vme_endlinks.end;
3873 src_entry = src_entry->vme_nextlinks.next;
3874 if (src_entry->vme_startlinks.start != src_start) {
3875 result = KERN_INVALID_ADDRESS1;
3876 goto error;
3877 }
3878 }
3879
3880 /*
3881 * If steal_pages is true, make sure all
3882 * pages in the copy are not in any object
3883 * We try to remove them from the original
3884 * object, but we may have to copy them.
3885 *
3886 * At this point every page in the list is busy
3887 * and holds a paging reference to its object.
3888 * When we're done stealing, every page is busy,
3889 * and in no object (m->tabled == FALSE).
3890 */
3891 src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12
)-1)))
;
3892 if (steal_pages) {
3893 int i;
3894 vm_offset_t unwire_end;
3895
3896 unwire_end = src_start;
3897 for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) {
3898
3899 /*
3900 * Remove the page from its object if it
3901 * can be stolen. It can be stolen if:
3902 *
3903 * (1) The source is being destroyed,
3904 * the object is temporary, and
3905 * not shared.
3906 * (2) The page is not precious.
3907 *
3908 * The not shared check consists of two
3909 * parts: (a) there are no objects that
3910 * shadow this object. (b) it is not the
3911 * object in any shared map entries (i.e.,
3912 * use_shared_copy is not set).
3913 *
3914 * The first check (a) means that we can't
3915 * steal pages from objects that are not
3916 * at the top of their shadow chains. This
3917 * should not be a frequent occurrence.
3918 *
3919 * Stealing wired pages requires telling the
3920 * pmap module to let go of them.
3921 *
3922 * NOTE: stealing clean pages from objects
3923 * whose mappings survive requires a call to
3924 * the pmap module. Maybe later.
3925 */
3926 m = copy->cpy_page_listc_u.c_p.page_list[i];
3927 src_object = m->object;
3928 vm_object_lock(src_object);
3929
3930 if (src_destroy &&
3931 src_object->temporary &&
3932 (!src_object->shadowed) &&
3933 (!src_object->use_shared_copy) &&
3934 !m->precious) {
3935 vm_offset_t page_vaddr;
3936
3937 page_vaddr = src_start + (i * PAGE_SIZE(1 << 12));
3938 if (m->wire_count > 0) {
3939
3940 assert(m->wire_count == 1)({ if (!(m->wire_count == 1)) Assert("m->wire_count == 1"
, "../vm/vm_map.c", 3940); })
;
3941 /*
3942 * In order to steal a wired
3943 * page, we have to unwire it
3944 * first. We do this inline
3945 * here because we have the page.
3946 *
3947 * Step 1: Unwire the map entry.
3948 * Also tell the pmap module
3949 * that this piece of the
3950 * pmap is pageable.
3951 */
3952 vm_object_unlock(src_object)((void)(&(src_object)->Lock));
3953 if (page_vaddr >= unwire_end) {
3954 if (!vm_map_lookup_entry(src_map,
3955 page_vaddr, &src_entry))
3956 panic("vm_map_copyin_page_list: missing wired map entry");
3957
3958 vm_map_clip_start(src_map, src_entry,({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start
(&(src_map)->hdr,(src_entry),(page_vaddr)); })
3959 page_vaddr)({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start
(&(src_map)->hdr,(src_entry),(page_vaddr)); })
;
3960 vm_map_clip_end(src_map, src_entry,({ if ((src_start + src_size) < (src_entry)->links.end)
_vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start
+ src_size)); })
3961 src_start + src_size)({ if ((src_start + src_size) < (src_entry)->links.end)
_vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start
+ src_size)); })
;
3962
3963 assert(src_entry->wired_count > 0)({ if (!(src_entry->wired_count > 0)) Assert("src_entry->wired_count > 0"
, "../vm/vm_map.c", 3963); })
;
3964 src_entry->wired_count = 0;
3965 src_entry->user_wired_count = 0;
3966 unwire_end = src_entry->vme_endlinks.end;
3967 pmap_pageable(vm_map_pmap(src_map)((src_map)->pmap),
3968 page_vaddr, unwire_end, TRUE((boolean_t) 1));
3969 }
3970
3971 /*
3972 * Step 2: Unwire the page.
3973 * pmap_remove handles this for us.
3974 */
3975 vm_object_lock(src_object);
3976 }
3977
3978 /*
3979 * Don't need to remove the mapping;
3980 * vm_map_delete will handle it.
3981 *
3982 * Steal the page. Setting the wire count
3983 * to zero is vm_page_unwire without
3984 * activating the page.
3985 */
3986 vm_page_lock_queues();
3987 vm_page_remove(m);
3988 if (m->wire_count > 0) {
3989 m->wire_count = 0;
3990 vm_page_wire_count--;
3991 } else {
3992 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m)
->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t
next, prev; next = (m)->pageq.next; prev = (m)->pageq.
prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive
)->prev = prev; else ((vm_page_t)next)->pageq.prev = prev
; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive
)->next = next; else ((vm_page_t)prev)->pageq.next = next
; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count
--; } })
;
3993 }
3994 vm_page_unlock_queues()((void)(&vm_page_queue_lock));
3995 }
3996 else {
3997 /*
3998 * Have to copy this page. Have to
3999 * unlock the map while copying,
4000 * hence no further page stealing.
4001 * Hence just copy all the pages.
4002 * Unlock the map while copying;
4003 * This means no further page stealing.
4004 */
4005 vm_object_unlock(src_object)((void)(&(src_object)->Lock));
4006 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
4007
4008 vm_map_copy_steal_pages(copy);
4009
4010 vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp
++; })
;
4011 break;
4012 }
4013
4014 vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert
("(src_object)->paging_in_progress != 0", "../vm/vm_map.c"
, 4014); }); if (--(src_object)->paging_in_progress == 0) {
({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim
(((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0
), 0); (src_object)->all_wanted &= ~(1 << (2)); }
); } })
;
4015 vm_object_unlock(src_object)((void)(&(src_object)->Lock));
4016 }
4017
4018 /*
4019 * If the source should be destroyed, do it now, since the
4020 * copy was successful.
4021 */
4022
4023 if (src_destroy) {
4024 (void) vm_map_delete(src_map, src_start, src_end);
4025 }
4026 }
4027 else {
4028 /*
4029 * !steal_pages leaves busy pages in the map.
4030 * This will cause src_destroy to hang. Use
4031 * a continuation to prevent this.
4032 */
4033 if (src_destroy && !vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
4034 cont_args = (vm_map_copyin_args_t)
4035 kalloc(sizeof(vm_map_copyin_args_data_t));
4036 vm_map_reference(src_map);
4037 cont_args->map = src_map;
4038 cont_args->src_addr = (vm_offset_t) 0;
4039 cont_args->src_len = (vm_size_t) 0;
4040 cont_args->destroy_addr = src_start;
4041 cont_args->destroy_len = src_end - src_start;
4042 cont_args->steal_pages = FALSE((boolean_t) 0);
4043
4044 copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args;
4045 copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont;
4046 }
4047
4048 }
4049
4050 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
4051
4052 *copy_result = copy;
4053 return(result);
4054
4055error:
4056 vm_map_unlock(src_map)lock_done(&(src_map)->lock);
4057 vm_map_copy_discard(copy);
4058 return(result);
4059}
4060
4061/*
4062 * vm_map_fork:
4063 *
4064 * Create and return a new map based on the old
4065 * map, according to the inheritance values on the
4066 * regions in that map.
4067 *
4068 * The source map must not be locked.
4069 */
4070vm_map_t vm_map_fork(old_map)
4071 vm_map_t old_map;
4072{
4073 vm_map_t new_map;
4074 vm_map_entry_t old_entry;
4075 vm_map_entry_t new_entry;
4076 pmap_t new_pmap = pmap_create((vm_size_t) 0);
4077 vm_size_t new_size = 0;
4078 vm_size_t entry_size;
4079 vm_object_t object;
4080
4081 vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp
++; })
;
4082
4083 new_map = vm_map_create(new_pmap,
4084 old_map->min_offsethdr.links.start,
4085 old_map->max_offsethdr.links.end,
4086 old_map->hdr.entries_pageable);
4087
4088 for (
4089 old_entry = vm_map_first_entry(old_map)((old_map)->hdr.links.next);
4090 old_entry != vm_map_to_entry(old_map)((struct vm_map_entry *) &(old_map)->hdr.links);
4091 ) {
4092 if (old_entry->is_sub_map)
4093 panic("vm_map_fork: encountered a submap");
4094
4095 entry_size = (old_entry->vme_endlinks.end - old_entry->vme_startlinks.start);
4096
4097 switch (old_entry->inheritance) {
4098 case VM_INHERIT_NONE((vm_inherit_t) 2):
4099 break;
4100
4101 case VM_INHERIT_SHARE((vm_inherit_t) 0):
4102 /*
4103 * New sharing code. New map entry
4104 * references original object. Temporary
4105 * objects use asynchronous copy algorithm for
4106 * future copies. First make sure we have
4107 * the right object. If we need a shadow,
4108 * or someone else already has one, then
4109 * make a new shadow and share it.
4110 */
4111
4112 object = old_entry->object.vm_object;
4113 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
4114 object = vm_object_allocate(
4115 (vm_size_t)(old_entry->vme_endlinks.end -
4116 old_entry->vme_startlinks.start));
4117 old_entry->offset = 0;
4118 old_entry->object.vm_object = object;
4119 assert(!old_entry->needs_copy)({ if (!(!old_entry->needs_copy)) Assert("!old_entry->needs_copy"
, "../vm/vm_map.c", 4119); })
;
4120 }
4121 else if (old_entry->needs_copy || object->shadowed ||
4122 (object->temporary && !old_entry->is_shared &&
4123 object->size > (vm_size_t)(old_entry->vme_endlinks.end -
4124 old_entry->vme_startlinks.start))) {
4125
4126 assert(object->temporary)({ if (!(object->temporary)) Assert("object->temporary"
, "../vm/vm_map.c", 4126); })
;
4127 assert(!(object->shadowed && old_entry->is_shared))({ if (!(!(object->shadowed && old_entry->is_shared
))) Assert("!(object->shadowed && old_entry->is_shared)"
, "../vm/vm_map.c", 4127); })
;
4128 vm_object_shadow(
4129 &old_entry->object.vm_object,
4130 &old_entry->offset,
4131 (vm_size_t) (old_entry->vme_endlinks.end -
4132 old_entry->vme_startlinks.start));
4133
4134 /*
4135 * If we're making a shadow for other than
4136 * copy on write reasons, then we have
4137 * to remove write permission.
4138 */
4139
4140 if (!old_entry->needs_copy &&
4141 (old_entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) {
4142 pmap_protect(vm_map_pmap(old_map)((old_map)->pmap),
4143 old_entry->vme_startlinks.start,
4144 old_entry->vme_endlinks.end,
4145 old_entry->protection &
4146 ~VM_PROT_WRITE((vm_prot_t) 0x02));
4147 }
4148 old_entry->needs_copy = FALSE((boolean_t) 0);
4149 object = old_entry->object.vm_object;
4150 }
4151
4152 /*
4153 * Set use_shared_copy to indicate that
4154 * object must use shared (delayed) copy-on
4155 * write. This is ignored for permanent objects.
4156 * Bump the reference count for the new entry
4157 */
4158
4159 vm_object_lock(object);
4160 object->use_shared_copy = TRUE((boolean_t) 1);
4161 object->ref_count++;
4162 vm_object_unlock(object)((void)(&(object)->Lock));
4163
4164 new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr);
4165
4166 if (old_entry->projected_on != 0) {
4167 /*
4168 * If entry is projected buffer, clone the
4169 * entry exactly.
4170 */
4171
4172 vm_map_entry_copy_full(new_entry, old_entry)(*(new_entry) = *(old_entry));
4173
4174 } else {
4175 /*
4176 * Clone the entry, using object ref from above.
4177 * Mark both entries as shared.
4178 */
4179
4180 vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = (
(boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0
); (new_entry)->in_transition = ((boolean_t) 0); (new_entry
)->wired_count = 0; (new_entry)->user_wired_count = 0; }
)
;
4181 old_entry->is_shared = TRUE((boolean_t) 1);
4182 new_entry->is_shared = TRUE((boolean_t) 1);
4183 }
4184
4185 /*
4186 * Insert the entry into the new map -- we
4187 * know we're inserting at the end of the new
4188 * map.
4189 */
4190
4191 vm_map_entry_link(({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4194); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
4192 new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4194); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
4193 vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4194); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
4194 new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4194); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
;
4195
4196 /*
4197 * Update the physical map
4198 */
4199
4200 pmap_copy(new_map->pmap, old_map->pmap,
4201 new_entry->vme_start,
4202 entry_size,
4203 old_entry->vme_start);
4204
4205 new_size += entry_size;
4206 break;
4207
4208 case VM_INHERIT_COPY((vm_inherit_t) 1):
4209 if (old_entry->wired_count == 0) {
4210 boolean_t src_needs_copy;
4211 boolean_t new_entry_needs_copy;
4212
4213 new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr);
4214 vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = (
(boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0
); (new_entry)->in_transition = ((boolean_t) 0); (new_entry
)->wired_count = 0; (new_entry)->user_wired_count = 0; }
)
;
4215
4216 if (vm_object_copy_temporary(
4217 &new_entry->object.vm_object,
4218 &new_entry->offset,
4219 &src_needs_copy,
4220 &new_entry_needs_copy)) {
4221
4222 /*
4223 * Handle copy-on-write obligations
4224 */
4225
4226 if (src_needs_copy && !old_entry->needs_copy) {
4227 vm_object_pmap_protect(
4228 old_entry->object.vm_object,
4229 old_entry->offset,
4230 entry_size,
4231 (old_entry->is_shared ?
4232 PMAP_NULL((pmap_t) 0) :
4233 old_map->pmap),
4234 old_entry->vme_startlinks.start,
4235 old_entry->protection &
4236 ~VM_PROT_WRITE((vm_prot_t) 0x02));
4237
4238 old_entry->needs_copy = TRUE((boolean_t) 1);
4239 }
4240
4241 new_entry->needs_copy = new_entry_needs_copy;
4242
4243 /*
4244 * Insert the entry at the end
4245 * of the map.
4246 */
4247
4248 vm_map_entry_link(new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4250); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
4249 vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4250); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
4250 new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links
.prev = (((new_map)->hdr.links.prev)); (new_entry)->links
.next = (((new_map)->hdr.links.prev))->links.next; (new_entry
)->links.prev->links.next = (new_entry)->links.next->
links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev
; int ___diff, ___index; ___prev = ((void *) 0); ___index = -
1; ___cur = (&(&(new_map)->hdr)->tree)->root
; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert
(&(new_entry)->tree_node, ___cur); ({ if (!(___diff !=
0)) Assert("___diff != 0", "../vm/vm_map.c", 4250); }); ___prev
= ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->
children[___index]; } rbtree_insert_rebalance(&(&(new_map
)->hdr)->tree, ___prev, ___index, &(new_entry)->
tree_node); }); })
;
4251
4252
4253 new_size += entry_size;
4254 break;
4255 }
4256
4257 vm_map_entry_dispose(new_map, new_entry)_vm_map_entry_dispose(&(new_map)->hdr, (new_entry));
4258 }
4259
4260 /* INNER BLOCK (copy cannot be optimized) */ {
4261
4262 vm_offset_t start = old_entry->vme_startlinks.start;
4263 vm_map_copy_t copy;
4264 vm_map_entry_t last = vm_map_last_entry(new_map)((new_map)->hdr.links.prev);
4265
4266 vm_map_unlock(old_map)lock_done(&(old_map)->lock);
4267 if (vm_map_copyin(old_map,
4268 start,
4269 entry_size,
4270 FALSE((boolean_t) 0),
4271 &copy)
4272 != KERN_SUCCESS0) {
4273 vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp
++; })
;
4274 if (!vm_map_lookup_entry(old_map, start, &last))
4275 last = last->vme_nextlinks.next;
4276 old_entry = last;
4277 /*
4278 * For some error returns, want to
4279 * skip to the next element.
4280 */
4281
4282 continue;
4283 }
4284
4285 /*
4286 * Insert the copy into the new map
4287 */
4288
4289 vm_map_copy_insert(new_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest
(&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink(
node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink
(node)) ({ struct rbtree_node *___cur, *___prev; int ___diff,
___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&
(new_map)->hdr.tree)->root; while (___cur != ((void *) 0
)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (
!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4289
); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur
= ___cur->children[___index]; } rbtree_insert_rebalance(&
(new_map)->hdr.tree, ___prev, ___index, node); }); (((last
)->links.next)->links.prev = ((copy)->c_u.hdr.links.
prev)) ->links.next = ((last)->links.next); ((last)->
links.next = ((copy)->c_u.hdr.links.next)) ->links.prev
= (last); (new_map)->hdr.nentries += (copy)->c_u.hdr.nentries
; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy)
; })
;
4290 new_size += entry_size;
4291
4292 /*
4293 * Pick up the traversal at the end of
4294 * the copied region.
4295 */
4296
4297 vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp
++; })
;
4298 start += entry_size;
4299 if (!vm_map_lookup_entry(old_map, start, &last))
4300 last = last->vme_nextlinks.next;
4301 else
4302 vm_map_clip_start(old_map, last, start)({ if ((start) > (last)->links.start) _vm_map_clip_start
(&(old_map)->hdr,(last),(start)); })
;
4303 old_entry = last;
4304
4305 continue;
4306 /* INNER BLOCK (copy cannot be optimized) */ }
4307 }
4308 old_entry = old_entry->vme_nextlinks.next;
4309 }
4310
4311 new_map->size = new_size;
4312 vm_map_unlock(old_map)lock_done(&(old_map)->lock);
4313
4314 return(new_map);
4315}
4316
4317/*
4318 * vm_map_lookup:
4319 *
4320 * Finds the VM object, offset, and
4321 * protection for a given virtual address in the
4322 * specified map, assuming a page fault of the
4323 * type specified.
4324 *
4325 * Returns the (object, offset, protection) for
4326 * this address, whether it is wired down, and whether
4327 * this map has the only reference to the data in question.
4328 * In order to later verify this lookup, a "version"
4329 * is returned.
4330 *
4331 * The map should not be locked; it will not be
4332 * locked on exit. In order to guarantee the
4333 * existence of the returned object, it is returned
4334 * locked.
4335 *
4336 * If a lookup is requested with "write protection"
4337 * specified, the map may be changed to perform virtual
4338 * copying operations, although the data referenced will
4339 * remain the same.
4340 */
4341kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
4342 object, offset, out_prot, wired)
4343 vm_map_t *var_map; /* IN/OUT */
4344 vm_offset_t vaddr;
4345 vm_prot_t fault_type;
4346
4347 vm_map_version_t *out_version; /* OUT */
4348 vm_object_t *object; /* OUT */
4349 vm_offset_t *offset; /* OUT */
4350 vm_prot_t *out_prot; /* OUT */
4351 boolean_t *wired; /* OUT */
4352{
4353 vm_map_entry_t entry;
4354 vm_map_t map = *var_map;
4355 vm_prot_t prot;
4356
4357 RetryLookup: ;
4358
4359 /*
4360 * Lookup the faulting address.
4361 */
4362
4363 vm_map_lock_read(map)lock_read(&(map)->lock);
4364
4365#define RETURN(why) \
4366 { \
4367 vm_map_unlock_read(map)lock_done(&(map)->lock); \
4368 return(why); \
4369 }
4370
4371 /*
4372 * If the map has an interesting hint, try it before calling
4373 * full blown lookup routine.
4374 */
4375
4376 simple_lock(&map->hint_lock);
4377 entry = map->hint;
4378 simple_unlock(&map->hint_lock)((void)(&map->hint_lock));
4379
4380 if ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) ||
4381 (vaddr < entry->vme_startlinks.start) || (vaddr >= entry->vme_endlinks.end)) {
4382 vm_map_entry_t tmp_entry;
4383
4384 /*
4385 * Entry was either not a valid hint, or the vaddr
4386 * was not contained in the entry, so do a full lookup.
4387 */
4388 if (!vm_map_lookup_entry(map, vaddr, &tmp_entry))
4389 RETURN(KERN_INVALID_ADDRESS1);
4390
4391 entry = tmp_entry;
4392 }
4393
4394 /*
4395 * Handle submaps.
4396 */
4397
4398 if (entry->is_sub_map) {
4399 vm_map_t old_map = map;
4400
4401 *var_map = map = entry->object.sub_map;
4402 vm_map_unlock_read(old_map)lock_done(&(old_map)->lock);
4403 goto RetryLookup;
4404 }
4405
4406 /*
4407 * Check whether this task is allowed to have
4408 * this page.
4409 */
4410
4411 prot = entry->protection;
4412
4413 if ((fault_type & (prot)) != fault_type) {
4414 if ((prot & VM_PROT_NOTIFY((vm_prot_t) 0x10)) && (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) {
4415 RETURN(KERN_WRITE_PROTECTION_FAILURE24);
4416 } else {
4417 RETURN(KERN_PROTECTION_FAILURE2);
4418 }
4419 }
4420
4421 /*
4422 * If this page is not pageable, we have to get
4423 * it for all possible accesses.
4424 */
4425
4426 if ((*wired = (entry->wired_count != 0)))
4427 prot = fault_type = entry->protection;
4428
4429 /*
4430 * If the entry was copy-on-write, we either ...
4431 */
4432
4433 if (entry->needs_copy) {
4434 /*
4435 * If we want to write the page, we may as well
4436 * handle that now since we've got the map locked.
4437 *
4438 * If we don't need to write the page, we just
4439 * demote the permissions allowed.
4440 */
4441
4442 if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) {
4443 /*
4444 * Make a new object, and place it in the
4445 * object chain. Note that no new references
4446 * have appeared -- one just moved from the
4447 * map to the new object.
4448 */
4449
4450 if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp
++), 0))
) {
4451 goto RetryLookup;
4452 }
4453 map->timestamp++;
4454
4455 vm_object_shadow(
4456 &entry->object.vm_object,
4457 &entry->offset,
4458 (vm_size_t) (entry->vme_endlinks.end - entry->vme_startlinks.start));
4459
4460 entry->needs_copy = FALSE((boolean_t) 0);
4461
4462 vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock);
4463 }
4464 else {
4465 /*
4466 * We're attempting to read a copy-on-write
4467 * page -- don't allow writes.
4468 */
4469
4470 prot &= (~VM_PROT_WRITE((vm_prot_t) 0x02));
4471 }
4472 }
4473
4474 /*
4475 * Create an object if necessary.
4476 */
4477 if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) {
4478
4479 if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp
++), 0))
) {
4480 goto RetryLookup;
4481 }
4482
4483 entry->object.vm_object = vm_object_allocate(
4484 (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start));
4485 entry->offset = 0;
4486 vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock);
4487 }
4488
4489 /*
4490 * Return the object/offset from this entry. If the entry
4491 * was copy-on-write or empty, it has been fixed up. Also
4492 * return the protection.
4493 */
4494
4495 *offset = (vaddr - entry->vme_startlinks.start) + entry->offset;
4496 *object = entry->object.vm_object;
4497 *out_prot = prot;
4498
4499 /*
4500 * Lock the object to prevent it from disappearing
4501 */
4502
4503 vm_object_lock(*object);
4504
4505 /*
4506 * Save the version number and unlock the map.
4507 */
4508
4509 out_version->main_timestamp = map->timestamp;
4510
4511 RETURN(KERN_SUCCESS0);
4512
4513#undef RETURN
4514}
4515
4516/*
4517 * vm_map_verify:
4518 *
4519 * Verifies that the map in question has not changed
4520 * since the given version. If successful, the map
4521 * will not change until vm_map_verify_done() is called.
4522 */
4523boolean_t vm_map_verify(map, version)
4524 vm_map_t map;
4525 vm_map_version_t *version; /* REF */
4526{
4527 boolean_t result;
4528
4529 vm_map_lock_read(map)lock_read(&(map)->lock);
4530 result = (map->timestamp == version->main_timestamp);
4531
4532 if (!result)
4533 vm_map_unlock_read(map)lock_done(&(map)->lock);
4534
4535 return(result);
4536}
4537
4538/*
4539 * vm_map_verify_done:
4540 *
4541 * Releases locks acquired by a vm_map_verify.
4542 *
4543 * This is now a macro in vm/vm_map.h. It does a
4544 * vm_map_unlock_read on the map.
4545 */
4546
4547/*
4548 * vm_region:
4549 *
4550 * User call to obtain information about a region in
4551 * a task's address map.
4552 */
4553
4554kern_return_t vm_region(map, address, size,
4555 protection, max_protection,
4556 inheritance, is_shared,
4557 object_name, offset_in_object)
4558 vm_map_t map;
4559 vm_offset_t *address; /* IN/OUT */
4560 vm_size_t *size; /* OUT */
4561 vm_prot_t *protection; /* OUT */
4562 vm_prot_t *max_protection; /* OUT */
4563 vm_inherit_t *inheritance; /* OUT */
4564 boolean_t *is_shared; /* OUT */
4565 ipc_port_t *object_name; /* OUT */
4566 vm_offset_t *offset_in_object; /* OUT */
4567{
4568 vm_map_entry_t tmp_entry;
4569 vm_map_entry_t entry;
4570 vm_offset_t tmp_offset;
4571 vm_offset_t start;
4572
4573 if (map == VM_MAP_NULL((vm_map_t) 0))
4574 return(KERN_INVALID_ARGUMENT4);
4575
4576 start = *address;
4577
4578 vm_map_lock_read(map)lock_read(&(map)->lock);
4579 if (!vm_map_lookup_entry(map, start, &tmp_entry)) {
4580 if ((entry = tmp_entry->vme_nextlinks.next) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) {
4581 vm_map_unlock_read(map)lock_done(&(map)->lock);
4582 return(KERN_NO_SPACE3);
4583 }
4584 } else {
4585 entry = tmp_entry;
4586 }
4587
4588 start = entry->vme_startlinks.start;
4589 *protection = entry->protection;
4590 *max_protection = entry->max_protection;
4591 *inheritance = entry->inheritance;
4592 *address = start;
4593 *size = (entry->vme_endlinks.end - start);
4594
4595 tmp_offset = entry->offset;
4596
4597
4598 if (entry->is_sub_map) {
4599 *is_shared = FALSE((boolean_t) 0);
4600 *object_name = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
4601 *offset_in_object = tmp_offset;
4602 } else {
4603 *is_shared = entry->is_shared;
4604 *object_name = vm_object_name(entry->object.vm_object);
4605 *offset_in_object = tmp_offset;
4606 }
4607
4608 vm_map_unlock_read(map)lock_done(&(map)->lock);
4609
4610 return(KERN_SUCCESS0);
4611}
4612
4613/*
4614 * Routine: vm_map_simplify
4615 *
4616 * Description:
4617 * Attempt to simplify the map representation in
4618 * the vicinity of the given starting address.
4619 * Note:
4620 * This routine is intended primarily to keep the
4621 * kernel maps more compact -- they generally don't
4622 * benefit from the "expand a map entry" technology
4623 * at allocation time because the adjacent entry
4624 * is often wired down.
4625 */
4626void vm_map_simplify(map, start)
4627 vm_map_t map;
4628 vm_offset_t start;
4629{
4630 vm_map_entry_t this_entry;
4631 vm_map_entry_t prev_entry;
4632
4633 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
4634 if (
4635 (vm_map_lookup_entry(map, start, &this_entry)) &&
4636 ((prev_entry = this_entry->vme_prevlinks.prev) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) &&
4637
4638 (prev_entry->vme_endlinks.end == start) &&
4639
4640 (prev_entry->is_shared == FALSE((boolean_t) 0)) &&
4641 (prev_entry->is_sub_map == FALSE((boolean_t) 0)) &&
4642
4643 (this_entry->is_shared == FALSE((boolean_t) 0)) &&
4644 (this_entry->is_sub_map == FALSE((boolean_t) 0)) &&
4645
4646 (prev_entry->inheritance == this_entry->inheritance) &&
4647 (prev_entry->protection == this_entry->protection) &&
4648 (prev_entry->max_protection == this_entry->max_protection) &&
4649 (prev_entry->wired_count == this_entry->wired_count) &&
4650 (prev_entry->user_wired_count == this_entry->user_wired_count) &&
4651
4652 (prev_entry->needs_copy == this_entry->needs_copy) &&
4653
4654 (prev_entry->object.vm_object == this_entry->object.vm_object) &&
4655 ((prev_entry->offset + (prev_entry->vme_endlinks.end - prev_entry->vme_startlinks.start))
4656 == this_entry->offset) &&
4657 (prev_entry->projected_on == 0) &&
4658 (this_entry->projected_on == 0)
4659 ) {
4660 if (map->first_free == this_entry)
4661 map->first_free = prev_entry;
4662
4663 SAVE_HINT(map, prev_entry); (map)->hint = (prev_entry); ((void)(&(map)->hint_lock
));
;
4664 vm_map_entry_unlink(map, this_entry)({ (&(map)->hdr)->nentries--; (this_entry)->links
.next->links.prev = (this_entry)->links.prev; (this_entry
)->links.prev->links.next = (this_entry)->links.next
; rbtree_remove(&(&(map)->hdr)->tree, &(this_entry
)->tree_node); })
;
4665 prev_entry->vme_endlinks.end = this_entry->vme_endlinks.end;
4666 vm_object_deallocate(this_entry->object.vm_object);
4667 vm_map_entry_dispose(map, this_entry)_vm_map_entry_dispose(&(map)->hdr, (this_entry));
4668 }
4669 vm_map_unlock(map)lock_done(&(map)->lock);
4670}
4671
4672
4673/*
4674 * Routine: vm_map_machine_attribute
4675 * Purpose:
4676 * Provide machine-specific attributes to mappings,
4677 * such as cachability etc. for machines that provide
4678 * them. NUMA architectures and machines with big/strange
4679 * caches will use this.
4680 * Note:
4681 * Responsibilities for locking and checking are handled here,
4682 * everything else in the pmap module. If any non-volatile
4683 * information must be kept, the pmap module should handle
4684 * it itself. [This assumes that attributes do not
4685 * need to be inherited, which seems ok to me]
4686 */
4687kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
4688 vm_map_t map;
4689 vm_offset_t address;
4690 vm_size_t size;
4691 vm_machine_attribute_t attribute;
4692 vm_machine_attribute_val_t* value; /* IN/OUT */
4693{
4694 kern_return_t ret;
4695
4696 if (address < vm_map_min(map)((map)->hdr.links.start) ||
4697 (address + size) > vm_map_max(map)((map)->hdr.links.end))
4698 return KERN_INVALID_ARGUMENT4;
4699
4700 vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; });
4701
4702 ret = pmap_attribute(map->pmap, address, size, attribute, value)(1);
4703
4704 vm_map_unlock(map)lock_done(&(map)->lock);
4705
4706 return ret;
4707}
4708
4709
4710#if MACH_KDB1
4711
4712#define printfdb_printf kdbprintfdb_printf
4713
4714/*
4715 * vm_map_print: [ debug ]
4716 */
4717void vm_map_print(map)
4718 vm_map_t map;
4719{
4720 vm_map_entry_t entry;
4721
4722 iprintf("Task map 0x%X: pmap=0x%X,",
4723 (vm_offset_t) map, (vm_offset_t) (map->pmap));
4724 printfdb_printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries);
4725 printfdb_printf("version=%d\n", map->timestamp);
4726 indent += 2;
4727 for (entry = vm_map_first_entry(map)((map)->hdr.links.next);
4728 entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links);
4729 entry = entry->vme_nextlinks.next) {
4730 static char *inheritance_name[3] = { "share", "copy", "none"};
4731
4732 iprintf("map entry 0x%X: ", (vm_offset_t) entry);
4733 printfdb_printf("start=0x%X, end=0x%X, ",
4734 (vm_offset_t) entry->vme_startlinks.start, (vm_offset_t) entry->vme_endlinks.end);
4735 printfdb_printf("prot=%X/%X/%s, ",
4736 entry->protection,
4737 entry->max_protection,
4738 inheritance_name[entry->inheritance]);
4739 if (entry->wired_count != 0) {
4740 printfdb_printf("wired(");
4741 if (entry->user_wired_count != 0)
4742 printfdb_printf("u");
4743 if (entry->wired_count >
4744 ((entry->user_wired_count == 0) ? 0 : 1))
4745 printfdb_printf("k");
4746 printfdb_printf(") ");
4747 }
4748 if (entry->in_transition) {
4749 printfdb_printf("in transition");
4750 if (entry->needs_wakeup)
4751 printfdb_printf("(wake request)");
4752 printfdb_printf(", ");
4753 }
4754 if (entry->is_sub_map) {
4755 printfdb_printf("submap=0x%X, offset=0x%X\n",
4756 (vm_offset_t) entry->object.sub_map,
4757 (vm_offset_t) entry->offset);
4758 } else {
4759 printfdb_printf("object=0x%X, offset=0x%X",
4760 (vm_offset_t) entry->object.vm_object,
4761 (vm_offset_t) entry->offset);
4762 if (entry->is_shared)
4763 printfdb_printf(", shared");
4764 if (entry->needs_copy)
4765 printfdb_printf(", copy needed");
4766 printfdb_printf("\n");
4767
4768 if ((entry->vme_prevlinks.prev == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) ||
4769 (entry->vme_prevlinks.prev->object.vm_object != entry->object.vm_object)) {
4770 indent += 2;
4771 vm_object_print(entry->object.vm_object);
4772 indent -= 2;
4773 }
4774 }
4775 }
4776 indent -= 2;
4777}
4778
4779/*
4780 * Routine: vm_map_copy_print
4781 * Purpose:
4782 * Pretty-print a copy object for ddb.
4783 */
4784
4785void vm_map_copy_print(copy)
4786 const vm_map_copy_t copy;
4787{
4788 int i, npages;
4789
4790 printfdb_printf("copy object 0x%x\n", copy);
4791
4792 indent += 2;
4793
4794 iprintf("type=%d", copy->type);
4795 switch (copy->type) {
4796 case VM_MAP_COPY_ENTRY_LIST1:
4797 printfdb_printf("[entry_list]");
4798 break;
4799
4800 case VM_MAP_COPY_OBJECT2:
4801 printfdb_printf("[object]");
4802 break;
4803
4804 case VM_MAP_COPY_PAGE_LIST3:
4805 printfdb_printf("[page_list]");
4806 break;
4807
4808 default:
4809 printfdb_printf("[bad type]");
4810 break;
4811 }
4812 printfdb_printf(", offset=0x%x", copy->offset);
4813 printfdb_printf(", size=0x%x\n", copy->size);
4814
4815 switch (copy->type) {
4816 case VM_MAP_COPY_ENTRY_LIST1:
4817 /* XXX add stuff here */
4818 break;
4819
4820 case VM_MAP_COPY_OBJECT2:
4821 iprintf("object=0x%x\n", copy->cpy_objectc_u.c_o.object);
4822 break;
4823
4824 case VM_MAP_COPY_PAGE_LIST3:
4825 iprintf("npages=%d", copy->cpy_npagesc_u.c_p.npages);
4826 printfdb_printf(", cont=%x", copy->cpy_contc_u.c_p.cont);
4827 printfdb_printf(", cont_args=%x\n", copy->cpy_cont_argsc_u.c_p.cont_args);
4828 if (copy->cpy_npagesc_u.c_p.npages < 0) {
4829 npages = 0;
4830 } else if (copy->cpy_npagesc_u.c_p.npages > VM_MAP_COPY_PAGE_LIST_MAX64) {
4831 npages = VM_MAP_COPY_PAGE_LIST_MAX64;
4832 } else {
4833 npages = copy->cpy_npagesc_u.c_p.npages;
4834 }
4835 iprintf("copy->cpy_page_list[0..%d] = {", npages);
4836 for (i = 0; i < npages - 1; i++) {
4837 printfdb_printf("0x%x, ", copy->cpy_page_listc_u.c_p.page_list[i]);
4838 }
4839 if (npages > 0) {
4840 printfdb_printf("0x%x", copy->cpy_page_listc_u.c_p.page_list[npages - 1]);
4841 }
4842 printfdb_printf("}\n");
4843 break;
4844 }
4845
4846 indent -= 2;
4847}
4848#endif /* MACH_KDB */