File: | obj-scan-build/../vm/vm_map.c |
Location: | line 3374, column 4 |
Description: | Value stored to 'src_size' is never read |
1 | /* |
2 | * Mach Operating System |
3 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. |
4 | * Copyright (c) 1993,1994 The University of Utah and |
5 | * the Computer Systems Laboratory (CSL). |
6 | * All rights reserved. |
7 | * |
8 | * Permission to use, copy, modify and distribute this software and its |
9 | * documentation is hereby granted, provided that both the copyright |
10 | * notice and this permission notice appear in all copies of the |
11 | * software, derivative works or modified versions, and any portions |
12 | * thereof, and that both notices appear in supporting documentation. |
13 | * |
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF |
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY |
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF |
17 | * THIS SOFTWARE. |
18 | * |
19 | * Carnegie Mellon requests users of this software to return to |
20 | * |
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
22 | * School of Computer Science |
23 | * Carnegie Mellon University |
24 | * Pittsburgh PA 15213-3890 |
25 | * |
26 | * any improvements or extensions that they make and grant Carnegie Mellon |
27 | * the rights to redistribute these changes. |
28 | */ |
29 | /* |
30 | * File: vm/vm_map.c |
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
32 | * Date: 1985 |
33 | * |
34 | * Virtual memory mapping module. |
35 | */ |
36 | |
37 | #include <kern/printf.h> |
38 | #include <mach/kern_return.h> |
39 | #include <mach/port.h> |
40 | #include <mach/vm_attributes.h> |
41 | #include <mach/vm_param.h> |
42 | #include <kern/assert.h> |
43 | #include <kern/debug.h> |
44 | #include <kern/kalloc.h> |
45 | #include <kern/rbtree.h> |
46 | #include <kern/slab.h> |
47 | #include <vm/pmap.h> |
48 | #include <vm/vm_fault.h> |
49 | #include <vm/vm_map.h> |
50 | #include <vm/vm_object.h> |
51 | #include <vm/vm_page.h> |
52 | #include <vm/vm_resident.h> |
53 | #include <vm/vm_kern.h> |
54 | #include <ipc/ipc_port.h> |
55 | |
56 | #if MACH_KDB0 |
57 | #include <ddb/db_output.h> |
58 | #include <vm/vm_print.h> |
59 | #endif /* MACH_KDB */ |
60 | |
61 | /* |
62 | * Macros to copy a vm_map_entry. We must be careful to correctly |
63 | * manage the wired page count. vm_map_entry_copy() creates a new |
64 | * map entry to the same memory - the wired count in the new entry |
65 | * must be set to zero. vm_map_entry_copy_full() creates a new |
66 | * entry that is identical to the old entry. This preserves the |
67 | * wire count; it's used for map splitting and cache changing in |
68 | * vm_map_copyout. |
69 | */ |
70 | #define vm_map_entry_copy(NEW,OLD)({ *(NEW) = *(OLD); (NEW)->is_shared = ((boolean_t) 0); (NEW )->needs_wakeup = ((boolean_t) 0); (NEW)->in_transition = ((boolean_t) 0); (NEW)->wired_count = 0; (NEW)->user_wired_count = 0; }) \({ |
71 | MACRO_BEGIN({ \ |
72 | *(NEW) = *(OLD); \ |
73 | (NEW)->is_shared = FALSE((boolean_t) 0); \ |
74 | (NEW)->needs_wakeup = FALSE((boolean_t) 0); \ |
75 | (NEW)->in_transition = FALSE((boolean_t) 0); \ |
76 | (NEW)->wired_count = 0; \ |
77 | (NEW)->user_wired_count = 0; \}) |
78 | MACRO_END}) |
79 | |
80 | #define vm_map_entry_copy_full(NEW,OLD)(*(NEW) = *(OLD)) (*(NEW) = *(OLD)) |
81 | |
82 | /* |
83 | * Virtual memory maps provide for the mapping, protection, |
84 | * and sharing of virtual memory objects. In addition, |
85 | * this module provides for an efficient virtual copy of |
86 | * memory from one map to another. |
87 | * |
88 | * Synchronization is required prior to most operations. |
89 | * |
90 | * Maps consist of an ordered doubly-linked list of simple |
91 | * entries; a hint and a red-black tree are used to speed up lookups. |
92 | * |
93 | * Sharing maps have been deleted from this version of Mach. |
94 | * All shared objects are now mapped directly into the respective |
95 | * maps. This requires a change in the copy on write strategy; |
96 | * the asymmetric (delayed) strategy is used for shared temporary |
97 | * objects instead of the symmetric (shadow) strategy. This is |
98 | * selected by the (new) use_shared_copy bit in the object. See |
99 | * vm_object_copy_temporary in vm_object.c for details. All maps |
100 | * are now "top level" maps (either task map, kernel map or submap |
101 | * of the kernel map). |
102 | * |
103 | * Since portions of maps are specified by start/end addreses, |
104 | * which may not align with existing map entries, all |
105 | * routines merely "clip" entries to these start/end values. |
106 | * [That is, an entry is split into two, bordering at a |
107 | * start or end value.] Note that these clippings may not |
108 | * always be necessary (as the two resulting entries are then |
109 | * not changed); however, the clipping is done for convenience. |
110 | * No attempt is currently made to "glue back together" two |
111 | * abutting entries. |
112 | * |
113 | * The symmetric (shadow) copy strategy implements virtual copy |
114 | * by copying VM object references from one map to |
115 | * another, and then marking both regions as copy-on-write. |
116 | * It is important to note that only one writeable reference |
117 | * to a VM object region exists in any map when this strategy |
118 | * is used -- this means that shadow object creation can be |
119 | * delayed until a write operation occurs. The asymmetric (delayed) |
120 | * strategy allows multiple maps to have writeable references to |
121 | * the same region of a vm object, and hence cannot delay creating |
122 | * its copy objects. See vm_object_copy_temporary() in vm_object.c. |
123 | * Copying of permanent objects is completely different; see |
124 | * vm_object_copy_strategically() in vm_object.c. |
125 | */ |
126 | |
127 | struct kmem_cache vm_map_cache; /* cache for vm_map structures */ |
128 | struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ |
129 | struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ |
130 | struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ |
131 | |
132 | /* |
133 | * Placeholder object for submap operations. This object is dropped |
134 | * into the range by a call to vm_map_find, and removed when |
135 | * vm_map_submap creates the submap. |
136 | */ |
137 | |
138 | static struct vm_object vm_submap_object_store; |
139 | vm_object_t vm_submap_object = &vm_submap_object_store; |
140 | |
141 | /* |
142 | * vm_map_init: |
143 | * |
144 | * Initialize the vm_map module. Must be called before |
145 | * any other vm_map routines. |
146 | * |
147 | * Map and entry structures are allocated from caches -- we must |
148 | * initialize those caches. |
149 | * |
150 | * There are three caches of interest: |
151 | * |
152 | * vm_map_cache: used to allocate maps. |
153 | * vm_map_entry_cache: used to allocate map entries. |
154 | * vm_map_kentry_cache: used to allocate map entries for the kernel. |
155 | * |
156 | * Kernel map entries are allocated from a special cache, using a custom |
157 | * page allocation function to avoid recursion. It would be difficult |
158 | * (perhaps impossible) for the kernel to allocate more memory to an entry |
159 | * cache when it became empty since the very act of allocating memory |
160 | * implies the creation of a new entry. |
161 | */ |
162 | |
163 | vm_offset_t kentry_data; |
164 | vm_size_t kentry_data_size = KENTRY_DATA_SIZE(256*(1 << 12)); |
165 | |
166 | static vm_offset_t kentry_pagealloc(vm_size_t size) |
167 | { |
168 | vm_offset_t result; |
169 | |
170 | if (size > kentry_data_size) |
171 | panic("vm_map: kentry memory exhausted"); |
172 | |
173 | result = kentry_data; |
174 | kentry_data += size; |
175 | kentry_data_size -= size; |
176 | return result; |
177 | } |
178 | |
179 | void vm_map_init(void) |
180 | { |
181 | kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, |
182 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
183 | kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", |
184 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
185 | kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", |
186 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), kentry_pagealloc, |
187 | NULL((void *) 0), KMEM_CACHE_NOCPUPOOL0x1 | KMEM_CACHE_NOOFFSLAB0x2 |
188 | | KMEM_CACHE_NORECLAIM0x4); |
189 | kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", |
190 | sizeof(struct vm_map_copy), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
191 | |
192 | /* |
193 | * Submap object is initialized by vm_object_init. |
194 | */ |
195 | } |
196 | |
197 | void vm_map_setup(map, pmap, min, max, pageable) |
198 | vm_map_t map; |
199 | pmap_t pmap; |
200 | vm_offset_t min, max; |
201 | boolean_t pageable; |
202 | { |
203 | vm_map_first_entry(map)((map)->hdr.links.next) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
204 | vm_map_last_entry(map)((map)->hdr.links.prev) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
205 | map->hdr.nentries = 0; |
206 | map->hdr.entries_pageable = pageable; |
207 | rbtree_init(&map->hdr.tree); |
208 | |
209 | map->size = 0; |
210 | map->ref_count = 1; |
211 | map->pmap = pmap; |
212 | map->min_offsethdr.links.start = min; |
213 | map->max_offsethdr.links.end = max; |
214 | map->wiring_required = FALSE((boolean_t) 0); |
215 | map->wait_for_space = FALSE((boolean_t) 0); |
216 | map->first_free = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
217 | map->hint = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
218 | vm_map_lock_init(map)({ lock_init(&(map)->lock, ((boolean_t) 1)); (map)-> timestamp = 0; }); |
219 | simple_lock_init(&map->ref_lock); |
220 | simple_lock_init(&map->hint_lock); |
221 | } |
222 | |
223 | /* |
224 | * vm_map_create: |
225 | * |
226 | * Creates and returns a new empty VM map with |
227 | * the given physical map structure, and having |
228 | * the given lower and upper address bounds. |
229 | */ |
230 | vm_map_t vm_map_create(pmap, min, max, pageable) |
231 | pmap_t pmap; |
232 | vm_offset_t min, max; |
233 | boolean_t pageable; |
234 | { |
235 | vm_map_t result; |
236 | |
237 | result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); |
238 | if (result == VM_MAP_NULL((vm_map_t) 0)) |
239 | panic("vm_map_create"); |
240 | |
241 | vm_map_setup(result, pmap, min, max, pageable); |
242 | |
243 | return(result); |
244 | } |
245 | |
246 | /* |
247 | * vm_map_entry_create: [ internal use only ] |
248 | * |
249 | * Allocates a VM map entry for insertion in the |
250 | * given map (or map copy). No fields are filled. |
251 | */ |
252 | #define vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr) \ |
253 | _vm_map_entry_create(&(map)->hdr) |
254 | |
255 | #define vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr) \ |
256 | _vm_map_entry_create(&(copy)->cpy_hdrc_u.hdr) |
257 | |
258 | vm_map_entry_t _vm_map_entry_create(map_header) |
259 | struct vm_map_header *map_header; |
260 | { |
261 | kmem_cache_t cache; |
262 | vm_map_entry_t entry; |
263 | |
264 | if (map_header->entries_pageable) |
265 | cache = &vm_map_entry_cache; |
266 | else |
267 | cache = &vm_map_kentry_cache; |
268 | |
269 | entry = (vm_map_entry_t) kmem_cache_alloc(cache); |
270 | if (entry == VM_MAP_ENTRY_NULL((vm_map_entry_t) 0)) |
271 | panic("vm_map_entry_create"); |
272 | |
273 | return(entry); |
274 | } |
275 | |
276 | /* |
277 | * vm_map_entry_dispose: [ internal use only ] |
278 | * |
279 | * Inverse of vm_map_entry_create. |
280 | */ |
281 | #define vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)) \ |
282 | _vm_map_entry_dispose(&(map)->hdr, (entry)) |
283 | |
284 | #define vm_map_copy_entry_dispose(map, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)) \ |
285 | _vm_map_entry_dispose(&(copy)->cpy_hdrc_u.hdr, (entry)) |
286 | |
287 | void _vm_map_entry_dispose(map_header, entry) |
288 | struct vm_map_header *map_header; |
289 | vm_map_entry_t entry; |
290 | { |
291 | kmem_cache_t cache; |
292 | |
293 | if (map_header->entries_pageable) |
294 | cache = &vm_map_entry_cache; |
295 | else |
296 | cache = &vm_map_kentry_cache; |
297 | |
298 | kmem_cache_free(cache, (vm_offset_t) entry); |
299 | } |
300 | |
301 | /* |
302 | * Red-black tree lookup/insert comparison functions |
303 | */ |
304 | static inline int vm_map_entry_cmp_lookup(vm_offset_t addr, |
305 | const struct rbtree_node *node) |
306 | { |
307 | struct vm_map_entry *entry; |
308 | |
309 | entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); |
310 | |
311 | if (addr < entry->vme_startlinks.start) |
312 | return -1; |
313 | else if (addr < entry->vme_endlinks.end) |
314 | return 0; |
315 | else |
316 | return 1; |
317 | } |
318 | |
319 | static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a, |
320 | const struct rbtree_node *b) |
321 | { |
322 | struct vm_map_entry *entry; |
323 | |
324 | entry = rbtree_entry(a, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)a - __builtin_offsetof (struct vm_map_entry, tree_node))); |
325 | return vm_map_entry_cmp_lookup(entry->vme_startlinks.start, b); |
326 | } |
327 | |
328 | /* |
329 | * vm_map_entry_{un,}link: |
330 | * |
331 | * Insert/remove entries from maps (or map copies). |
332 | * |
333 | * The start and end addresses of the entries must be properly set |
334 | * before using these macros. |
335 | */ |
336 | #define vm_map_entry_link(map, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 336); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ |
337 | _vm_map_entry_link(&(map)->hdr, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 337); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) |
338 | |
339 | #define vm_map_copy_entry_link(copy, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 339); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) \ |
340 | _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 340); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) |
341 | |
342 | #define _vm_map_entry_link(hdr, after_where, entry)({ (hdr)->nentries++; (entry)->links.prev = (after_where ); (entry)->links.next = (after_where)->links.next; (entry )->links.prev->links.next = (entry)->links.next-> links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(hdr)->tree)->root; while (___cur != ( (void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry) ->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 342); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ |
343 | MACRO_BEGIN({ \ |
344 | (hdr)->nentries++; \ |
345 | (entry)->vme_prevlinks.prev = (after_where); \ |
346 | (entry)->vme_nextlinks.next = (after_where)->vme_nextlinks.next; \ |
347 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = \ |
348 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry); \ |
349 | rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 350); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }) |
350 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 350); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); \ |
351 | MACRO_END}) |
352 | |
353 | #define vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) \ |
354 | _vm_map_entry_unlink(&(map)->hdr, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) |
355 | |
356 | #define vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) \ |
357 | _vm_map_entry_unlink(&(copy)->cpy_hdr, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) |
358 | |
359 | #define _vm_map_entry_unlink(hdr, entry)({ (hdr)->nentries--; (entry)->links.next->links.prev = (entry)->links.prev; (entry)->links.prev->links.next = (entry)->links.next; rbtree_remove(&(hdr)->tree, &(entry)->tree_node); }) \ |
360 | MACRO_BEGIN({ \ |
361 | (hdr)->nentries--; \ |
362 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry)->vme_prevlinks.prev; \ |
363 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = (entry)->vme_nextlinks.next; \ |
364 | rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \ |
365 | MACRO_END}) |
366 | |
367 | /* |
368 | * vm_map_reference: |
369 | * |
370 | * Creates another valid reference to the given map. |
371 | * |
372 | */ |
373 | void vm_map_reference(map) |
374 | vm_map_t map; |
375 | { |
376 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
377 | return; |
378 | |
379 | simple_lock(&map->ref_lock); |
380 | map->ref_count++; |
381 | simple_unlock(&map->ref_lock); |
382 | } |
383 | |
384 | /* |
385 | * vm_map_deallocate: |
386 | * |
387 | * Removes a reference from the specified map, |
388 | * destroying it if no references remain. |
389 | * The map should not be locked. |
390 | */ |
391 | void vm_map_deallocate(map) |
392 | vm_map_t map; |
393 | { |
394 | int c; |
395 | |
396 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
397 | return; |
398 | |
399 | simple_lock(&map->ref_lock); |
400 | c = --map->ref_count; |
401 | simple_unlock(&map->ref_lock); |
402 | |
403 | if (c > 0) { |
404 | return; |
405 | } |
406 | |
407 | projected_buffer_collect(map); |
408 | (void) vm_map_delete(map, map->min_offsethdr.links.start, map->max_offsethdr.links.end); |
409 | |
410 | pmap_destroy(map->pmap); |
411 | |
412 | kmem_cache_free(&vm_map_cache, (vm_offset_t) map); |
413 | } |
414 | |
415 | /* |
416 | * SAVE_HINT: |
417 | * |
418 | * Saves the specified entry as the hint for |
419 | * future lookups. Performs necessary interlocks. |
420 | */ |
421 | #define SAVE_HINT(map,value); (map)->hint = (value); ; \ |
422 | simple_lock(&(map)->hint_lock); \ |
423 | (map)->hint = (value); \ |
424 | simple_unlock(&(map)->hint_lock); |
425 | |
426 | /* |
427 | * vm_map_lookup_entry: [ internal use only ] |
428 | * |
429 | * Finds the map entry containing (or |
430 | * immediately preceding) the specified address |
431 | * in the given map; the entry is returned |
432 | * in the "entry" parameter. The boolean |
433 | * result indicates whether the address is |
434 | * actually contained in the map. |
435 | */ |
436 | boolean_t vm_map_lookup_entry(map, address, entry) |
437 | vm_map_t map; |
438 | vm_offset_t address; |
439 | vm_map_entry_t *entry; /* OUT */ |
440 | { |
441 | struct rbtree_node *node; |
442 | vm_map_entry_t hint; |
443 | |
444 | /* |
445 | * First, make a quick check to see if we are already |
446 | * looking at the entry we want (which is often the case). |
447 | */ |
448 | |
449 | simple_lock(&map->hint_lock); |
450 | hint = map->hint; |
451 | simple_unlock(&map->hint_lock); |
452 | |
453 | if ((hint != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (address >= hint->vme_startlinks.start)) { |
454 | if (address < hint->vme_endlinks.end) { |
455 | *entry = hint; |
456 | return(TRUE((boolean_t) 1)); |
457 | } else { |
458 | vm_map_entry_t next = hint->vme_nextlinks.next; |
459 | |
460 | if ((next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
461 | || (address < next->vme_startlinks.start)) { |
462 | *entry = hint; |
463 | return(FALSE((boolean_t) 0)); |
464 | } |
465 | } |
466 | } |
467 | |
468 | /* |
469 | * If the hint didn't help, use the red-black tree. |
470 | */ |
471 | |
472 | node = rbtree_lookup_nearest(&map->hdr.tree, address,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }) |
473 | vm_map_entry_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }); |
474 | |
475 | if (node == NULL((void *) 0)) { |
476 | *entry = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
477 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ;; |
478 | return(FALSE((boolean_t) 0)); |
479 | } else { |
480 | *entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); |
481 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ;; |
482 | return((address < (*entry)->vme_endlinks.end) ? TRUE((boolean_t) 1) : FALSE((boolean_t) 0)); |
483 | } |
484 | } |
485 | |
486 | /* |
487 | * Routine: invalid_user_access |
488 | * |
489 | * Verifies whether user access is valid. |
490 | */ |
491 | |
492 | boolean_t |
493 | invalid_user_access(map, start, end, prot) |
494 | vm_map_t map; |
495 | vm_offset_t start, end; |
496 | vm_prot_t prot; |
497 | { |
498 | vm_map_entry_t entry; |
499 | |
500 | return (map == VM_MAP_NULL((vm_map_t) 0) || map == kernel_map || |
501 | !vm_map_lookup_entry(map, start, &entry) || |
502 | entry->vme_endlinks.end < end || |
503 | (prot & ~(entry->protection))); |
504 | } |
505 | |
506 | |
507 | /* |
508 | * Routine: vm_map_find_entry |
509 | * Purpose: |
510 | * Allocate a range in the specified virtual address map, |
511 | * returning the entry allocated for that range. |
512 | * Used by kmem_alloc, etc. Returns wired entries. |
513 | * |
514 | * The map must be locked. |
515 | * |
516 | * If an entry is allocated, the object/offset fields |
517 | * are initialized to zero. If an object is supplied, |
518 | * then an existing entry may be extended. |
519 | */ |
520 | kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) |
521 | vm_map_t map; |
522 | vm_offset_t *address; /* OUT */ |
523 | vm_size_t size; |
524 | vm_offset_t mask; |
525 | vm_object_t object; |
526 | vm_map_entry_t *o_entry; /* OUT */ |
527 | { |
528 | vm_map_entry_t entry, new_entry; |
529 | vm_offset_t start; |
530 | vm_offset_t end; |
531 | |
532 | /* |
533 | * Look for the first possible address; |
534 | * if there's already something at this |
535 | * address, we have to start after it. |
536 | */ |
537 | |
538 | if ((entry = map->first_free) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
539 | start = map->min_offsethdr.links.start; |
540 | else |
541 | start = entry->vme_endlinks.end; |
542 | |
543 | /* |
544 | * In any case, the "entry" always precedes |
545 | * the proposed new region throughout the loop: |
546 | */ |
547 | |
548 | while (TRUE((boolean_t) 1)) { |
549 | vm_map_entry_t next; |
550 | |
551 | /* |
552 | * Find the end of the proposed new region. |
553 | * Be sure we didn't go beyond the end, or |
554 | * wrap around the address. |
555 | */ |
556 | |
557 | if (((start + mask) & ~mask) < start) { |
558 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); |
559 | return(KERN_NO_SPACE3); |
560 | } |
561 | start = ((start + mask) & ~mask); |
562 | end = start + size; |
563 | |
564 | if ((end > map->max_offsethdr.links.end) || (end < start)) { |
565 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); |
566 | return(KERN_NO_SPACE3); |
567 | } |
568 | |
569 | /* |
570 | * If there are no more entries, we must win. |
571 | */ |
572 | |
573 | next = entry->vme_nextlinks.next; |
574 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
575 | break; |
576 | |
577 | /* |
578 | * If there is another entry, it must be |
579 | * after the end of the potential new region. |
580 | */ |
581 | |
582 | if (next->vme_startlinks.start >= end) |
583 | break; |
584 | |
585 | /* |
586 | * Didn't fit -- move to the next entry. |
587 | */ |
588 | |
589 | entry = next; |
590 | start = entry->vme_endlinks.end; |
591 | } |
592 | |
593 | /* |
594 | * At this point, |
595 | * "start" and "end" should define the endpoints of the |
596 | * available new range, and |
597 | * "entry" should refer to the region before the new |
598 | * range, and |
599 | * |
600 | * the map should be locked. |
601 | */ |
602 | |
603 | *address = start; |
604 | |
605 | /* |
606 | * See whether we can avoid creating a new entry by |
607 | * extending one of our neighbors. [So far, we only attempt to |
608 | * extend from below.] |
609 | */ |
610 | |
611 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && |
612 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
613 | (entry->vme_endlinks.end == start) && |
614 | (!entry->is_shared) && |
615 | (!entry->is_sub_map) && |
616 | (entry->object.vm_object == object) && |
617 | (entry->needs_copy == FALSE((boolean_t) 0)) && |
618 | (entry->inheritance == VM_INHERIT_DEFAULT((vm_inherit_t) 1)) && |
619 | (entry->protection == VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02))) && |
620 | (entry->max_protection == VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) && |
621 | (entry->wired_count == 1) && |
622 | (entry->user_wired_count == 0) && |
623 | (entry->projected_on == 0)) { |
624 | /* |
625 | * Because this is a special case, |
626 | * we don't need to use vm_object_coalesce. |
627 | */ |
628 | |
629 | entry->vme_endlinks.end = end; |
630 | new_entry = entry; |
631 | } else { |
632 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); |
633 | |
634 | new_entry->vme_startlinks.start = start; |
635 | new_entry->vme_endlinks.end = end; |
636 | |
637 | new_entry->is_shared = FALSE((boolean_t) 0); |
638 | new_entry->is_sub_map = FALSE((boolean_t) 0); |
639 | new_entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); |
640 | new_entry->offset = (vm_offset_t) 0; |
641 | |
642 | new_entry->needs_copy = FALSE((boolean_t) 0); |
643 | |
644 | new_entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
645 | new_entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
646 | new_entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
647 | new_entry->wired_count = 1; |
648 | new_entry->user_wired_count = 0; |
649 | |
650 | new_entry->in_transition = FALSE((boolean_t) 0); |
651 | new_entry->needs_wakeup = FALSE((boolean_t) 0); |
652 | new_entry->projected_on = 0; |
653 | |
654 | /* |
655 | * Insert the new entry into the list |
656 | */ |
657 | |
658 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 658); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
659 | } |
660 | |
661 | map->size += size; |
662 | |
663 | /* |
664 | * Update the free space hint and the lookup hint |
665 | */ |
666 | |
667 | map->first_free = new_entry; |
668 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ;; |
669 | |
670 | *o_entry = new_entry; |
671 | return(KERN_SUCCESS0); |
672 | } |
673 | |
674 | int vm_map_pmap_enter_print = FALSE((boolean_t) 0); |
675 | int vm_map_pmap_enter_enable = FALSE((boolean_t) 0); |
676 | |
677 | /* |
678 | * Routine: vm_map_pmap_enter |
679 | * |
680 | * Description: |
681 | * Force pages from the specified object to be entered into |
682 | * the pmap at the specified address if they are present. |
683 | * As soon as a page not found in the object the scan ends. |
684 | * |
685 | * Returns: |
686 | * Nothing. |
687 | * |
688 | * In/out conditions: |
689 | * The source map should not be locked on entry. |
690 | */ |
691 | void |
692 | vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) |
693 | vm_map_t map; |
694 | vm_offset_t addr; |
695 | vm_offset_t end_addr; |
696 | vm_object_t object; |
697 | vm_offset_t offset; |
698 | vm_prot_t protection; |
699 | { |
700 | while (addr < end_addr) { |
701 | vm_page_t m; |
702 | |
703 | vm_object_lock(object); |
704 | vm_object_paging_begin(object)((object)->paging_in_progress++); |
705 | |
706 | m = vm_page_lookup(object, offset); |
707 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->absent) { |
708 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 708); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
709 | vm_object_unlock(object); |
710 | return; |
711 | } |
712 | |
713 | if (vm_map_pmap_enter_print) { |
714 | printf("vm_map_pmap_enter:"); |
715 | printf("map: %p, addr: %lx, object: %p, offset: %lx\n", |
716 | map, addr, object, offset); |
717 | } |
718 | |
719 | m->busy = TRUE((boolean_t) 1); |
720 | vm_object_unlock(object); |
721 | |
722 | PMAP_ENTER(map->pmap, addr, m,({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }) |
723 | protection, FALSE)({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }); |
724 | |
725 | vm_object_lock(object); |
726 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
727 | vm_page_lock_queues(); |
728 | if (!m->active && !m->inactive) |
729 | vm_page_activate(m); |
730 | vm_page_unlock_queues(); |
731 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 731); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
732 | vm_object_unlock(object); |
733 | |
734 | offset += PAGE_SIZE(1 << 12); |
735 | addr += PAGE_SIZE(1 << 12); |
736 | } |
737 | } |
738 | |
739 | /* |
740 | * Routine: vm_map_enter |
741 | * |
742 | * Description: |
743 | * Allocate a range in the specified virtual address map. |
744 | * The resulting range will refer to memory defined by |
745 | * the given memory object and offset into that object. |
746 | * |
747 | * Arguments are as defined in the vm_map call. |
748 | */ |
749 | kern_return_t vm_map_enter( |
750 | map, |
751 | address, size, mask, anywhere, |
752 | object, offset, needs_copy, |
753 | cur_protection, max_protection, inheritance) |
754 | vm_map_t map; |
755 | vm_offset_t *address; /* IN/OUT */ |
756 | vm_size_t size; |
757 | vm_offset_t mask; |
758 | boolean_t anywhere; |
759 | vm_object_t object; |
760 | vm_offset_t offset; |
761 | boolean_t needs_copy; |
762 | vm_prot_t cur_protection; |
763 | vm_prot_t max_protection; |
764 | vm_inherit_t inheritance; |
765 | { |
766 | vm_map_entry_t entry; |
767 | vm_offset_t start; |
768 | vm_offset_t end; |
769 | kern_return_t result = KERN_SUCCESS0; |
770 | |
771 | #define RETURN(value) { result = value; goto BailOut; } |
772 | |
773 | if (size == 0) |
774 | return KERN_INVALID_ARGUMENT4; |
775 | |
776 | StartAgain: ; |
777 | |
778 | start = *address; |
779 | |
780 | if (anywhere) { |
781 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
782 | |
783 | /* |
784 | * Calculate the first possible address. |
785 | */ |
786 | |
787 | if (start < map->min_offsethdr.links.start) |
788 | start = map->min_offsethdr.links.start; |
789 | if (start > map->max_offsethdr.links.end) |
790 | RETURN(KERN_NO_SPACE3); |
791 | |
792 | /* |
793 | * Look for the first possible address; |
794 | * if there's already something at this |
795 | * address, we have to start after it. |
796 | */ |
797 | |
798 | if (start == map->min_offsethdr.links.start) { |
799 | if ((entry = map->first_free) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
800 | start = entry->vme_endlinks.end; |
801 | } else { |
802 | vm_map_entry_t tmp_entry; |
803 | if (vm_map_lookup_entry(map, start, &tmp_entry)) |
804 | start = tmp_entry->vme_endlinks.end; |
805 | entry = tmp_entry; |
806 | } |
807 | |
808 | /* |
809 | * In any case, the "entry" always precedes |
810 | * the proposed new region throughout the |
811 | * loop: |
812 | */ |
813 | |
814 | while (TRUE((boolean_t) 1)) { |
815 | vm_map_entry_t next; |
816 | |
817 | /* |
818 | * Find the end of the proposed new region. |
819 | * Be sure we didn't go beyond the end, or |
820 | * wrap around the address. |
821 | */ |
822 | |
823 | if (((start + mask) & ~mask) < start) { |
824 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); |
825 | RETURN(KERN_NO_SPACE3); |
826 | } |
827 | start = ((start + mask) & ~mask); |
828 | end = start + size; |
829 | |
830 | if ((end > map->max_offsethdr.links.end) || (end < start)) { |
831 | if (map->wait_for_space) { |
832 | if (size <= (map->max_offsethdr.links.end - |
833 | map->min_offsethdr.links.start)) { |
834 | assert_wait((event_t) map, TRUE((boolean_t) 1)); |
835 | vm_map_unlock(map)lock_done(&(map)->lock); |
836 | thread_block((void (*)()) 0); |
837 | goto StartAgain; |
838 | } |
839 | } |
840 | |
841 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); |
842 | RETURN(KERN_NO_SPACE3); |
843 | } |
844 | |
845 | /* |
846 | * If there are no more entries, we must win. |
847 | */ |
848 | |
849 | next = entry->vme_nextlinks.next; |
850 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
851 | break; |
852 | |
853 | /* |
854 | * If there is another entry, it must be |
855 | * after the end of the potential new region. |
856 | */ |
857 | |
858 | if (next->vme_startlinks.start >= end) |
859 | break; |
860 | |
861 | /* |
862 | * Didn't fit -- move to the next entry. |
863 | */ |
864 | |
865 | entry = next; |
866 | start = entry->vme_endlinks.end; |
867 | } |
868 | *address = start; |
869 | } else { |
870 | vm_map_entry_t temp_entry; |
871 | |
872 | /* |
873 | * Verify that: |
874 | * the address doesn't itself violate |
875 | * the mask requirement. |
876 | */ |
877 | |
878 | if ((start & mask) != 0) |
879 | return(KERN_NO_SPACE3); |
880 | |
881 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
882 | |
883 | /* |
884 | * ... the address is within bounds |
885 | */ |
886 | |
887 | end = start + size; |
888 | |
889 | if ((start < map->min_offsethdr.links.start) || |
890 | (end > map->max_offsethdr.links.end) || |
891 | (start >= end)) { |
892 | RETURN(KERN_INVALID_ADDRESS1); |
893 | } |
894 | |
895 | /* |
896 | * ... the starting address isn't allocated |
897 | */ |
898 | |
899 | if (vm_map_lookup_entry(map, start, &temp_entry)) |
900 | RETURN(KERN_NO_SPACE3); |
901 | |
902 | entry = temp_entry; |
903 | |
904 | /* |
905 | * ... the next region doesn't overlap the |
906 | * end point. |
907 | */ |
908 | |
909 | if ((entry->vme_nextlinks.next != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
910 | (entry->vme_nextlinks.next->vme_startlinks.start < end)) |
911 | RETURN(KERN_NO_SPACE3); |
912 | } |
913 | |
914 | /* |
915 | * At this point, |
916 | * "start" and "end" should define the endpoints of the |
917 | * available new range, and |
918 | * "entry" should refer to the region before the new |
919 | * range, and |
920 | * |
921 | * the map should be locked. |
922 | */ |
923 | |
924 | /* |
925 | * See whether we can avoid creating a new entry (and object) by |
926 | * extending one of our neighbors. [So far, we only attempt to |
927 | * extend from below.] |
928 | */ |
929 | |
930 | if ((object == VM_OBJECT_NULL((vm_object_t) 0)) && |
931 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
932 | (entry->vme_endlinks.end == start) && |
933 | (!entry->is_shared) && |
934 | (!entry->is_sub_map) && |
935 | (entry->inheritance == inheritance) && |
936 | (entry->protection == cur_protection) && |
937 | (entry->max_protection == max_protection) && |
938 | (entry->wired_count == 0) && /* implies user_wired_count == 0 */ |
939 | (entry->projected_on == 0)) { |
940 | if (vm_object_coalesce(entry->object.vm_object, |
941 | VM_OBJECT_NULL((vm_object_t) 0), |
942 | entry->offset, |
943 | (vm_offset_t) 0, |
944 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start), |
945 | (vm_size_t)(end - entry->vme_endlinks.end))) { |
946 | |
947 | /* |
948 | * Coalesced the two objects - can extend |
949 | * the previous map entry to include the |
950 | * new range. |
951 | */ |
952 | map->size += (end - entry->vme_endlinks.end); |
953 | entry->vme_endlinks.end = end; |
954 | RETURN(KERN_SUCCESS0); |
955 | } |
956 | } |
957 | |
958 | /* |
959 | * Create a new entry |
960 | */ |
961 | |
962 | /**/ { |
963 | vm_map_entry_t new_entry; |
964 | |
965 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); |
966 | |
967 | new_entry->vme_startlinks.start = start; |
968 | new_entry->vme_endlinks.end = end; |
969 | |
970 | new_entry->is_shared = FALSE((boolean_t) 0); |
971 | new_entry->is_sub_map = FALSE((boolean_t) 0); |
972 | new_entry->object.vm_object = object; |
973 | new_entry->offset = offset; |
974 | |
975 | new_entry->needs_copy = needs_copy; |
976 | |
977 | new_entry->inheritance = inheritance; |
978 | new_entry->protection = cur_protection; |
979 | new_entry->max_protection = max_protection; |
980 | new_entry->wired_count = 0; |
981 | new_entry->user_wired_count = 0; |
982 | |
983 | new_entry->in_transition = FALSE((boolean_t) 0); |
984 | new_entry->needs_wakeup = FALSE((boolean_t) 0); |
985 | new_entry->projected_on = 0; |
986 | |
987 | /* |
988 | * Insert the new entry into the list |
989 | */ |
990 | |
991 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 991); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
992 | map->size += size; |
993 | |
994 | /* |
995 | * Update the free space hint and the lookup hint |
996 | */ |
997 | |
998 | if ((map->first_free == entry) && |
999 | ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) ? map->min_offsethdr.links.start : entry->vme_endlinks.end) |
1000 | >= new_entry->vme_startlinks.start)) |
1001 | map->first_free = new_entry; |
1002 | |
1003 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ;; |
1004 | |
1005 | vm_map_unlock(map)lock_done(&(map)->lock); |
1006 | |
1007 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && |
1008 | (vm_map_pmap_enter_enable) && |
1009 | (!anywhere) && |
1010 | (!needs_copy) && |
1011 | (size < (128*1024))) { |
1012 | vm_map_pmap_enter(map, start, end, |
1013 | object, offset, cur_protection); |
1014 | } |
1015 | |
1016 | return(result); |
1017 | /**/ } |
1018 | |
1019 | BailOut: ; |
1020 | |
1021 | vm_map_unlock(map)lock_done(&(map)->lock); |
1022 | return(result); |
1023 | |
1024 | #undef RETURN |
1025 | } |
1026 | |
1027 | /* |
1028 | * vm_map_clip_start: [ internal use only ] |
1029 | * |
1030 | * Asserts that the given entry begins at or after |
1031 | * the specified address; if necessary, |
1032 | * it splits the entry into two. |
1033 | */ |
1034 | void _vm_map_clip_start(); |
1035 | #define vm_map_clip_start(map, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(startaddr)); }) \ |
1036 | MACRO_BEGIN({ \ |
1037 | if ((startaddr) > (entry)->vme_startlinks.start) \ |
1038 | _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \ |
1039 | MACRO_END}) |
1040 | |
1041 | void _vm_map_copy_clip_start(); |
1042 | #define vm_map_copy_clip_start(copy, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(copy)->c_u.hdr,(entry),(startaddr)); }) \ |
1043 | MACRO_BEGIN({ \ |
1044 | if ((startaddr) > (entry)->vme_startlinks.start) \ |
1045 | _vm_map_clip_start(&(copy)->cpy_hdrc_u.hdr,(entry),(startaddr)); \ |
1046 | MACRO_END}) |
1047 | |
1048 | /* |
1049 | * This routine is called only when it is known that |
1050 | * the entry must be split. |
1051 | */ |
1052 | void _vm_map_clip_start(map_header, entry, start) |
1053 | struct vm_map_header *map_header; |
1054 | vm_map_entry_t entry; |
1055 | vm_offset_t start; |
1056 | { |
1057 | vm_map_entry_t new_entry; |
1058 | |
1059 | /* |
1060 | * Split off the front portion -- |
1061 | * note that we must insert the new |
1062 | * entry BEFORE this one, so that |
1063 | * this entry has the specified starting |
1064 | * address. |
1065 | */ |
1066 | |
1067 | new_entry = _vm_map_entry_create(map_header); |
1068 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); |
1069 | |
1070 | new_entry->vme_endlinks.end = start; |
1071 | entry->offset += (start - entry->vme_startlinks.start); |
1072 | entry->vme_startlinks.start = start; |
1073 | |
1074 | _vm_map_entry_link(map_header, entry->vme_prev, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry->links.prev); (new_entry)->links.next = (entry-> links.prev)->links.next; (new_entry)->links.prev->links .next = (new_entry)->links.next->links.prev = (new_entry ); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header )->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 1074); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(map_header)->tree, ___prev, ___index, &(new_entry )->tree_node); }); }); |
1075 | |
1076 | if (entry->is_sub_map) |
1077 | vm_map_reference(new_entry->object.sub_map); |
1078 | else |
1079 | vm_object_reference(new_entry->object.vm_object); |
1080 | } |
1081 | |
1082 | /* |
1083 | * vm_map_clip_end: [ internal use only ] |
1084 | * |
1085 | * Asserts that the given entry ends at or before |
1086 | * the specified address; if necessary, |
1087 | * it splits the entry into two. |
1088 | */ |
1089 | void _vm_map_clip_end(); |
1090 | #define vm_map_clip_end(map, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(map)->hdr,(entry),(endaddr)); }) \ |
1091 | MACRO_BEGIN({ \ |
1092 | if ((endaddr) < (entry)->vme_endlinks.end) \ |
1093 | _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \ |
1094 | MACRO_END}) |
1095 | |
1096 | void _vm_map_copy_clip_end(); |
1097 | #define vm_map_copy_clip_end(copy, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(copy)->c_u.hdr,(entry),(endaddr)); }) \ |
1098 | MACRO_BEGIN({ \ |
1099 | if ((endaddr) < (entry)->vme_endlinks.end) \ |
1100 | _vm_map_clip_end(&(copy)->cpy_hdrc_u.hdr,(entry),(endaddr)); \ |
1101 | MACRO_END}) |
1102 | |
1103 | /* |
1104 | * This routine is called only when it is known that |
1105 | * the entry must be split. |
1106 | */ |
1107 | void _vm_map_clip_end(map_header, entry, end) |
1108 | struct vm_map_header *map_header; |
1109 | vm_map_entry_t entry; |
1110 | vm_offset_t end; |
1111 | { |
1112 | vm_map_entry_t new_entry; |
1113 | |
1114 | /* |
1115 | * Create a new entry and insert it |
1116 | * AFTER the specified entry |
1117 | */ |
1118 | |
1119 | new_entry = _vm_map_entry_create(map_header); |
1120 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); |
1121 | |
1122 | new_entry->vme_startlinks.start = entry->vme_endlinks.end = end; |
1123 | new_entry->offset += (end - entry->vme_startlinks.start); |
1124 | |
1125 | _vm_map_entry_link(map_header, entry, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry); (new_entry)->links.next = (entry)->links.next; ( new_entry)->links.prev->links.next = (new_entry)->links .next->links.prev = (new_entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header)->tree)->root; while ( ___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(& (new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert ("___diff != 0", "../vm/vm_map.c", 1125); }); ___prev = ___cur ; ___index = rbtree_d2i(___diff); ___cur = ___cur->children [___index]; } rbtree_insert_rebalance(&(map_header)->tree , ___prev, ___index, &(new_entry)->tree_node); }); }); |
1126 | |
1127 | if (entry->is_sub_map) |
1128 | vm_map_reference(new_entry->object.sub_map); |
1129 | else |
1130 | vm_object_reference(new_entry->object.vm_object); |
1131 | } |
1132 | |
1133 | /* |
1134 | * VM_MAP_RANGE_CHECK: [ internal use only ] |
1135 | * |
1136 | * Asserts that the starting and ending region |
1137 | * addresses fall within the valid range of the map. |
1138 | */ |
1139 | #define VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; } \ |
1140 | { \ |
1141 | if (start < vm_map_min(map)((map)->hdr.links.start)) \ |
1142 | start = vm_map_min(map)((map)->hdr.links.start); \ |
1143 | if (end > vm_map_max(map)((map)->hdr.links.end)) \ |
1144 | end = vm_map_max(map)((map)->hdr.links.end); \ |
1145 | if (start > end) \ |
1146 | start = end; \ |
1147 | } |
1148 | |
1149 | /* |
1150 | * vm_map_submap: [ kernel use only ] |
1151 | * |
1152 | * Mark the given range as handled by a subordinate map. |
1153 | * |
1154 | * This range must have been created with vm_map_find using |
1155 | * the vm_submap_object, and no other operations may have been |
1156 | * performed on this range prior to calling vm_map_submap. |
1157 | * |
1158 | * Only a limited number of operations can be performed |
1159 | * within this rage after calling vm_map_submap: |
1160 | * vm_fault |
1161 | * [Don't try vm_map_copyin!] |
1162 | * |
1163 | * To remove a submapping, one must first remove the |
1164 | * range from the superior map, and then destroy the |
1165 | * submap (if desired). [Better yet, don't try it.] |
1166 | */ |
1167 | kern_return_t vm_map_submap(map, start, end, submap) |
1168 | vm_map_t map; |
1169 | vm_offset_t start; |
1170 | vm_offset_t end; |
1171 | vm_map_t submap; |
1172 | { |
1173 | vm_map_entry_t entry; |
1174 | kern_return_t result = KERN_INVALID_ARGUMENT4; |
1175 | vm_object_t object; |
1176 | |
1177 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1178 | |
1179 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1180 | |
1181 | if (vm_map_lookup_entry(map, start, &entry)) { |
1182 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1183 | } |
1184 | else |
1185 | entry = entry->vme_nextlinks.next; |
1186 | |
1187 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1188 | |
1189 | if ((entry->vme_startlinks.start == start) && (entry->vme_endlinks.end == end) && |
1190 | (!entry->is_sub_map) && |
1191 | ((object = entry->object.vm_object) == vm_submap_object) && |
1192 | (object->resident_page_count == 0) && |
1193 | (object->copy == VM_OBJECT_NULL((vm_object_t) 0)) && |
1194 | (object->shadow == VM_OBJECT_NULL((vm_object_t) 0)) && |
1195 | (!object->pager_created)) { |
1196 | entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); |
1197 | vm_object_deallocate(object); |
1198 | entry->is_sub_map = TRUE((boolean_t) 1); |
1199 | vm_map_reference(entry->object.sub_map = submap); |
1200 | result = KERN_SUCCESS0; |
1201 | } |
1202 | vm_map_unlock(map)lock_done(&(map)->lock); |
1203 | |
1204 | return(result); |
1205 | } |
1206 | |
1207 | /* |
1208 | * vm_map_protect: |
1209 | * |
1210 | * Sets the protection of the specified address |
1211 | * region in the target map. If "set_max" is |
1212 | * specified, the maximum protection is to be set; |
1213 | * otherwise, only the current protection is affected. |
1214 | */ |
1215 | kern_return_t vm_map_protect(map, start, end, new_prot, set_max) |
1216 | vm_map_t map; |
1217 | vm_offset_t start; |
1218 | vm_offset_t end; |
1219 | vm_prot_t new_prot; |
1220 | boolean_t set_max; |
1221 | { |
1222 | vm_map_entry_t current; |
1223 | vm_map_entry_t entry; |
1224 | |
1225 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1226 | |
1227 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1228 | |
1229 | if (vm_map_lookup_entry(map, start, &entry)) { |
1230 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1231 | } |
1232 | else |
1233 | entry = entry->vme_nextlinks.next; |
1234 | |
1235 | /* |
1236 | * Make a first pass to check for protection |
1237 | * violations. |
1238 | */ |
1239 | |
1240 | current = entry; |
1241 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1242 | (current->vme_startlinks.start < end)) { |
1243 | |
1244 | if (current->is_sub_map) { |
1245 | vm_map_unlock(map)lock_done(&(map)->lock); |
1246 | return(KERN_INVALID_ARGUMENT4); |
1247 | } |
1248 | if ((new_prot & (VM_PROT_NOTIFY((vm_prot_t) 0x10) | current->max_protection)) |
1249 | != new_prot) { |
1250 | vm_map_unlock(map)lock_done(&(map)->lock); |
1251 | return(KERN_PROTECTION_FAILURE2); |
1252 | } |
1253 | |
1254 | current = current->vme_nextlinks.next; |
1255 | } |
1256 | |
1257 | /* |
1258 | * Go back and fix up protections. |
1259 | * [Note that clipping is not necessary the second time.] |
1260 | */ |
1261 | |
1262 | current = entry; |
1263 | |
1264 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1265 | (current->vme_startlinks.start < end)) { |
1266 | |
1267 | vm_prot_t old_prot; |
1268 | |
1269 | vm_map_clip_end(map, current, end)({ if ((end) < (current)->links.end) _vm_map_clip_end(& (map)->hdr,(current),(end)); }); |
1270 | |
1271 | old_prot = current->protection; |
1272 | if (set_max) |
1273 | current->protection = |
1274 | (current->max_protection = new_prot) & |
1275 | old_prot; |
1276 | else |
1277 | current->protection = new_prot; |
1278 | |
1279 | /* |
1280 | * Update physical map if necessary. |
1281 | */ |
1282 | |
1283 | if (current->protection != old_prot) { |
1284 | pmap_protect(map->pmap, current->vme_startlinks.start, |
1285 | current->vme_endlinks.end, |
1286 | current->protection); |
1287 | } |
1288 | current = current->vme_nextlinks.next; |
1289 | } |
1290 | |
1291 | vm_map_unlock(map)lock_done(&(map)->lock); |
1292 | return(KERN_SUCCESS0); |
1293 | } |
1294 | |
1295 | /* |
1296 | * vm_map_inherit: |
1297 | * |
1298 | * Sets the inheritance of the specified address |
1299 | * range in the target map. Inheritance |
1300 | * affects how the map will be shared with |
1301 | * child maps at the time of vm_map_fork. |
1302 | */ |
1303 | kern_return_t vm_map_inherit(map, start, end, new_inheritance) |
1304 | vm_map_t map; |
1305 | vm_offset_t start; |
1306 | vm_offset_t end; |
1307 | vm_inherit_t new_inheritance; |
1308 | { |
1309 | vm_map_entry_t entry; |
1310 | vm_map_entry_t temp_entry; |
1311 | |
1312 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1313 | |
1314 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1315 | |
1316 | if (vm_map_lookup_entry(map, start, &temp_entry)) { |
1317 | entry = temp_entry; |
1318 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1319 | } |
1320 | else |
1321 | entry = temp_entry->vme_nextlinks.next; |
1322 | |
1323 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { |
1324 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1325 | |
1326 | entry->inheritance = new_inheritance; |
1327 | |
1328 | entry = entry->vme_nextlinks.next; |
1329 | } |
1330 | |
1331 | vm_map_unlock(map)lock_done(&(map)->lock); |
1332 | return(KERN_SUCCESS0); |
1333 | } |
1334 | |
1335 | /* |
1336 | * vm_map_pageable_common: |
1337 | * |
1338 | * Sets the pageability of the specified address |
1339 | * range in the target map. Regions specified |
1340 | * as not pageable require locked-down physical |
1341 | * memory and physical page maps. access_type indicates |
1342 | * types of accesses that must not generate page faults. |
1343 | * This is checked against protection of memory being locked-down. |
1344 | * access_type of VM_PROT_NONE makes memory pageable. |
1345 | * |
1346 | * The map must not be locked, but a reference |
1347 | * must remain to the map throughout the call. |
1348 | * |
1349 | * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable, |
1350 | * or vm_map_pageable_user); don't call vm_map_pageable directly. |
1351 | */ |
1352 | kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) |
1353 | vm_map_t map; |
1354 | vm_offset_t start; |
1355 | vm_offset_t end; |
1356 | vm_prot_t access_type; |
1357 | boolean_t user_wire; |
1358 | { |
1359 | vm_map_entry_t entry; |
1360 | vm_map_entry_t start_entry; |
1361 | |
1362 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1363 | |
1364 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1365 | |
1366 | if (vm_map_lookup_entry(map, start, &start_entry)) { |
1367 | entry = start_entry; |
1368 | /* |
1369 | * vm_map_clip_start will be done later. |
1370 | */ |
1371 | } |
1372 | else { |
1373 | /* |
1374 | * Start address is not in map; this is fatal. |
1375 | */ |
1376 | vm_map_unlock(map)lock_done(&(map)->lock); |
1377 | return(KERN_FAILURE5); |
1378 | } |
1379 | |
1380 | /* |
1381 | * Actions are rather different for wiring and unwiring, |
1382 | * so we have two separate cases. |
1383 | */ |
1384 | |
1385 | if (access_type == VM_PROT_NONE((vm_prot_t) 0x00)) { |
1386 | |
1387 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1388 | |
1389 | /* |
1390 | * Unwiring. First ensure that the range to be |
1391 | * unwired is really wired down. |
1392 | */ |
1393 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1394 | (entry->vme_startlinks.start < end)) { |
1395 | |
1396 | if ((entry->wired_count == 0) || |
1397 | ((entry->vme_endlinks.end < end) && |
1398 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
1399 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || |
1400 | (user_wire && (entry->user_wired_count == 0))) { |
1401 | vm_map_unlock(map)lock_done(&(map)->lock); |
1402 | return(KERN_INVALID_ARGUMENT4); |
1403 | } |
1404 | entry = entry->vme_nextlinks.next; |
1405 | } |
1406 | |
1407 | /* |
1408 | * Now decrement the wiring count for each region. |
1409 | * If a region becomes completely unwired, |
1410 | * unwire its physical pages and mappings. |
1411 | */ |
1412 | entry = start_entry; |
1413 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1414 | (entry->vme_startlinks.start < end)) { |
1415 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1416 | |
1417 | if (user_wire) { |
1418 | if (--(entry->user_wired_count) == 0) |
1419 | entry->wired_count--; |
1420 | } |
1421 | else { |
1422 | entry->wired_count--; |
1423 | } |
1424 | |
1425 | if (entry->wired_count == 0) |
1426 | vm_fault_unwire(map, entry); |
1427 | |
1428 | entry = entry->vme_nextlinks.next; |
1429 | } |
1430 | } |
1431 | |
1432 | else { |
1433 | /* |
1434 | * Wiring. We must do this in two passes: |
1435 | * |
1436 | * 1. Holding the write lock, we create any shadow |
1437 | * or zero-fill objects that need to be created. |
1438 | * Then we clip each map entry to the region to be |
1439 | * wired and increment its wiring count. We |
1440 | * create objects before clipping the map entries |
1441 | * to avoid object proliferation. |
1442 | * |
1443 | * 2. We downgrade to a read lock, and call |
1444 | * vm_fault_wire to fault in the pages for any |
1445 | * newly wired area (wired_count is 1). |
1446 | * |
1447 | * Downgrading to a read lock for vm_fault_wire avoids |
1448 | * a possible deadlock with another thread that may have |
1449 | * faulted on one of the pages to be wired (it would mark |
1450 | * the page busy, blocking us, then in turn block on the |
1451 | * map lock that we hold). Because of problems in the |
1452 | * recursive lock package, we cannot upgrade to a write |
1453 | * lock in vm_map_lookup. Thus, any actions that require |
1454 | * the write lock must be done beforehand. Because we |
1455 | * keep the read lock on the map, the copy-on-write |
1456 | * status of the entries we modify here cannot change. |
1457 | */ |
1458 | |
1459 | /* |
1460 | * Pass 1. |
1461 | */ |
1462 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1463 | (entry->vme_startlinks.start < end)) { |
1464 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1465 | |
1466 | if (entry->wired_count == 0) { |
1467 | |
1468 | /* |
1469 | * Perform actions of vm_map_lookup that need |
1470 | * the write lock on the map: create a shadow |
1471 | * object for a copy-on-write region, or an |
1472 | * object for a zero-fill region. |
1473 | */ |
1474 | if (entry->needs_copy && |
1475 | ((entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02)) != 0)) { |
1476 | |
1477 | vm_object_shadow(&entry->object.vm_object, |
1478 | &entry->offset, |
1479 | (vm_size_t)(entry->vme_endlinks.end |
1480 | - entry->vme_startlinks.start)); |
1481 | entry->needs_copy = FALSE((boolean_t) 0); |
1482 | } |
1483 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
1484 | entry->object.vm_object = |
1485 | vm_object_allocate( |
1486 | (vm_size_t)(entry->vme_endlinks.end |
1487 | - entry->vme_startlinks.start)); |
1488 | entry->offset = (vm_offset_t)0; |
1489 | } |
1490 | } |
1491 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1492 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1493 | |
1494 | if (user_wire) { |
1495 | if ((entry->user_wired_count)++ == 0) |
1496 | entry->wired_count++; |
1497 | } |
1498 | else { |
1499 | entry->wired_count++; |
1500 | } |
1501 | |
1502 | /* |
1503 | * Check for holes and protection mismatch. |
1504 | * Holes: Next entry should be contiguous unless |
1505 | * this is the end of the region. |
1506 | * Protection: Access requested must be allowed. |
1507 | */ |
1508 | if (((entry->vme_endlinks.end < end) && |
1509 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
1510 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || |
1511 | ((entry->protection & access_type) != access_type)) { |
1512 | /* |
1513 | * Found a hole or protection problem. |
1514 | * Object creation actions |
1515 | * do not need to be undone, but the |
1516 | * wired counts need to be restored. |
1517 | */ |
1518 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1519 | (entry->vme_endlinks.end > start)) { |
1520 | if (user_wire) { |
1521 | if (--(entry->user_wired_count) == 0) |
1522 | entry->wired_count--; |
1523 | } |
1524 | else { |
1525 | entry->wired_count--; |
1526 | } |
1527 | |
1528 | entry = entry->vme_prevlinks.prev; |
1529 | } |
1530 | |
1531 | vm_map_unlock(map)lock_done(&(map)->lock); |
1532 | return(KERN_FAILURE5); |
1533 | } |
1534 | entry = entry->vme_nextlinks.next; |
1535 | } |
1536 | |
1537 | /* |
1538 | * Pass 2. |
1539 | */ |
1540 | |
1541 | /* |
1542 | * HACK HACK HACK HACK |
1543 | * |
1544 | * If we are wiring in the kernel map or a submap of it, |
1545 | * unlock the map to avoid deadlocks. We trust that the |
1546 | * kernel threads are well-behaved, and therefore will |
1547 | * not do anything destructive to this region of the map |
1548 | * while we have it unlocked. We cannot trust user threads |
1549 | * to do the same. |
1550 | * |
1551 | * HACK HACK HACK HACK |
1552 | */ |
1553 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { |
1554 | vm_map_unlock(map)lock_done(&(map)->lock); /* trust me ... */ |
1555 | } |
1556 | else { |
1557 | vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock); |
1558 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
1559 | } |
1560 | |
1561 | entry = start_entry; |
1562 | while (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) && |
1563 | entry->vme_startlinks.start < end) { |
1564 | /* |
1565 | * Wiring cases: |
1566 | * Kernel: wired == 1 && user_wired == 0 |
1567 | * User: wired == 1 && user_wired == 1 |
1568 | * |
1569 | * Don't need to wire if either is > 1. wired = 0 && |
1570 | * user_wired == 1 can't happen. |
1571 | */ |
1572 | |
1573 | /* |
1574 | * XXX This assumes that the faults always succeed. |
1575 | */ |
1576 | if ((entry->wired_count == 1) && |
1577 | (entry->user_wired_count <= 1)) { |
1578 | vm_fault_wire(map, entry); |
1579 | } |
1580 | entry = entry->vme_nextlinks.next; |
1581 | } |
1582 | |
1583 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { |
1584 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1585 | } |
1586 | else { |
1587 | vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock); |
1588 | } |
1589 | } |
1590 | |
1591 | vm_map_unlock(map)lock_done(&(map)->lock); |
1592 | |
1593 | return(KERN_SUCCESS0); |
1594 | } |
1595 | |
1596 | /* |
1597 | * vm_map_entry_delete: [ internal use only ] |
1598 | * |
1599 | * Deallocate the given entry from the target map. |
1600 | */ |
1601 | void vm_map_entry_delete(map, entry) |
1602 | vm_map_t map; |
1603 | vm_map_entry_t entry; |
1604 | { |
1605 | vm_offset_t s, e; |
1606 | vm_object_t object; |
1607 | extern vm_object_t kernel_object; |
1608 | |
1609 | s = entry->vme_startlinks.start; |
1610 | e = entry->vme_endlinks.end; |
1611 | |
1612 | /*Check if projected buffer*/ |
1613 | if (map != kernel_map && entry->projected_on != 0) { |
1614 | /*Check if projected kernel entry is persistent; |
1615 | may only manipulate directly if it is*/ |
1616 | if (entry->projected_on->projected_on == 0) |
1617 | entry->wired_count = 0; /*Avoid unwire fault*/ |
1618 | else |
1619 | return; |
1620 | } |
1621 | |
1622 | /* |
1623 | * Get the object. Null objects cannot have pmap entries. |
1624 | */ |
1625 | |
1626 | if ((object = entry->object.vm_object) != VM_OBJECT_NULL((vm_object_t) 0)) { |
1627 | |
1628 | /* |
1629 | * Unwire before removing addresses from the pmap; |
1630 | * otherwise, unwiring will put the entries back in |
1631 | * the pmap. |
1632 | */ |
1633 | |
1634 | if (entry->wired_count != 0) { |
1635 | vm_fault_unwire(map, entry); |
1636 | entry->wired_count = 0; |
1637 | entry->user_wired_count = 0; |
1638 | } |
1639 | |
1640 | /* |
1641 | * If the object is shared, we must remove |
1642 | * *all* references to this data, since we can't |
1643 | * find all of the physical maps which are sharing |
1644 | * it. |
1645 | */ |
1646 | |
1647 | if (object == kernel_object) { |
1648 | vm_object_lock(object); |
1649 | vm_object_page_remove(object, entry->offset, |
1650 | entry->offset + (e - s)); |
1651 | vm_object_unlock(object); |
1652 | } else if (entry->is_shared) { |
1653 | vm_object_pmap_remove(object, |
1654 | entry->offset, |
1655 | entry->offset + (e - s)); |
1656 | } |
1657 | else { |
1658 | pmap_remove(map->pmap, s, e); |
1659 | } |
1660 | } |
1661 | |
1662 | /* |
1663 | * Deallocate the object only after removing all |
1664 | * pmap entries pointing to its pages. |
1665 | */ |
1666 | |
1667 | if (entry->is_sub_map) |
1668 | vm_map_deallocate(entry->object.sub_map); |
1669 | else |
1670 | vm_object_deallocate(entry->object.vm_object); |
1671 | |
1672 | vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ); |
1673 | map->size -= e - s; |
1674 | |
1675 | vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)); |
1676 | } |
1677 | |
1678 | /* |
1679 | * vm_map_delete: [ internal use only ] |
1680 | * |
1681 | * Deallocates the given address range from the target |
1682 | * map. |
1683 | */ |
1684 | |
1685 | kern_return_t vm_map_delete(map, start, end) |
1686 | vm_map_t map; |
1687 | vm_offset_t start; |
1688 | vm_offset_t end; |
1689 | { |
1690 | vm_map_entry_t entry; |
1691 | vm_map_entry_t first_entry; |
1692 | |
1693 | /* |
1694 | * Find the start of the region, and clip it |
1695 | */ |
1696 | |
1697 | if (!vm_map_lookup_entry(map, start, &first_entry)) |
1698 | entry = first_entry->vme_nextlinks.next; |
1699 | else { |
1700 | entry = first_entry; |
1701 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1702 | |
1703 | /* |
1704 | * Fix the lookup hint now, rather than each |
1705 | * time though the loop. |
1706 | */ |
1707 | |
1708 | SAVE_HINT(map, entry->vme_prev); (map)->hint = (entry->links.prev); ;; |
1709 | } |
1710 | |
1711 | /* |
1712 | * Save the free space hint |
1713 | */ |
1714 | |
1715 | if (map->first_free->vme_startlinks.start >= start) |
1716 | map->first_free = entry->vme_prevlinks.prev; |
1717 | |
1718 | /* |
1719 | * Step through all entries in this region |
1720 | */ |
1721 | |
1722 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { |
1723 | vm_map_entry_t next; |
1724 | |
1725 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1726 | |
1727 | /* |
1728 | * If the entry is in transition, we must wait |
1729 | * for it to exit that state. It could be clipped |
1730 | * while we leave the map unlocked. |
1731 | */ |
1732 | if(entry->in_transition) { |
1733 | /* |
1734 | * Say that we are waiting, and wait for entry. |
1735 | */ |
1736 | entry->needs_wakeup = TRUE((boolean_t) 1); |
1737 | vm_map_entry_wait(map, FALSE)({ assert_wait((event_t)&(map)->hdr, ((boolean_t) 0)); lock_done(&(map)->lock); thread_block((void (*)()) 0) ; }); |
1738 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1739 | |
1740 | /* |
1741 | * The entry could have been clipped or it |
1742 | * may not exist anymore. look it up again. |
1743 | */ |
1744 | if(!vm_map_lookup_entry(map, start, &entry)) { |
1745 | entry = entry->vme_nextlinks.next; |
1746 | } |
1747 | continue; |
1748 | } |
1749 | |
1750 | next = entry->vme_nextlinks.next; |
1751 | |
1752 | vm_map_entry_delete(map, entry); |
1753 | entry = next; |
1754 | } |
1755 | |
1756 | if (map->wait_for_space) |
1757 | thread_wakeup((event_t) map)thread_wakeup_prim(((event_t) map), ((boolean_t) 0), 0); |
1758 | |
1759 | return(KERN_SUCCESS0); |
1760 | } |
1761 | |
1762 | /* |
1763 | * vm_map_remove: |
1764 | * |
1765 | * Remove the given address range from the target map. |
1766 | * This is the exported form of vm_map_delete. |
1767 | */ |
1768 | kern_return_t vm_map_remove(map, start, end) |
1769 | vm_map_t map; |
1770 | vm_offset_t start; |
1771 | vm_offset_t end; |
1772 | { |
1773 | kern_return_t result; |
1774 | |
1775 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1776 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1777 | result = vm_map_delete(map, start, end); |
1778 | vm_map_unlock(map)lock_done(&(map)->lock); |
1779 | |
1780 | return(result); |
1781 | } |
1782 | |
1783 | |
1784 | /* |
1785 | * vm_map_copy_steal_pages: |
1786 | * |
1787 | * Steal all the pages from a vm_map_copy page_list by copying ones |
1788 | * that have not already been stolen. |
1789 | */ |
1790 | void |
1791 | vm_map_copy_steal_pages(copy) |
1792 | vm_map_copy_t copy; |
1793 | { |
1794 | vm_page_t m, new_m; |
1795 | int i; |
1796 | vm_object_t object; |
1797 | |
1798 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { |
1799 | |
1800 | /* |
1801 | * If the page is not tabled, then it's already stolen. |
1802 | */ |
1803 | m = copy->cpy_page_listc_u.c_p.page_list[i]; |
1804 | if (!m->tabled) |
1805 | continue; |
1806 | |
1807 | /* |
1808 | * Page was not stolen, get a new |
1809 | * one and do the copy now. |
1810 | */ |
1811 | while ((new_m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0)) { |
1812 | VM_PAGE_WAIT((void(*)()) 0)vm_page_wait((void(*)()) 0); |
1813 | } |
1814 | |
1815 | vm_page_copy(m, new_m); |
1816 | |
1817 | object = m->object; |
1818 | vm_object_lock(object); |
1819 | vm_page_lock_queues(); |
1820 | if (!m->active && !m->inactive) |
1821 | vm_page_activate(m); |
1822 | vm_page_unlock_queues(); |
1823 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
1824 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1824); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
1825 | vm_object_unlock(object); |
1826 | |
1827 | copy->cpy_page_listc_u.c_p.page_list[i] = new_m; |
1828 | } |
1829 | } |
1830 | |
1831 | /* |
1832 | * vm_map_copy_page_discard: |
1833 | * |
1834 | * Get rid of the pages in a page_list copy. If the pages are |
1835 | * stolen, they are freed. If the pages are not stolen, they |
1836 | * are unbusied, and associated state is cleaned up. |
1837 | */ |
1838 | void vm_map_copy_page_discard(copy) |
1839 | vm_map_copy_t copy; |
1840 | { |
1841 | while (copy->cpy_npagesc_u.c_p.npages > 0) { |
1842 | vm_page_t m; |
1843 | |
1844 | if((m = copy->cpy_page_listc_u.c_p.page_list[--(copy->cpy_npagesc_u.c_p.npages)]) != |
1845 | VM_PAGE_NULL((vm_page_t) 0)) { |
1846 | |
1847 | /* |
1848 | * If it's not in the table, then it's |
1849 | * a stolen page that goes back |
1850 | * to the free list. Else it belongs |
1851 | * to some object, and we hold a |
1852 | * paging reference on that object. |
1853 | */ |
1854 | if (!m->tabled) { |
1855 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); |
1856 | } |
1857 | else { |
1858 | vm_object_t object; |
1859 | |
1860 | object = m->object; |
1861 | |
1862 | vm_object_lock(object); |
1863 | vm_page_lock_queues(); |
1864 | if (!m->active && !m->inactive) |
1865 | vm_page_activate(m); |
1866 | vm_page_unlock_queues(); |
1867 | |
1868 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
1869 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1869); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
1870 | vm_object_unlock(object); |
1871 | } |
1872 | } |
1873 | } |
1874 | } |
1875 | |
1876 | /* |
1877 | * Routine: vm_map_copy_discard |
1878 | * |
1879 | * Description: |
1880 | * Dispose of a map copy object (returned by |
1881 | * vm_map_copyin). |
1882 | */ |
1883 | void |
1884 | vm_map_copy_discard(copy) |
1885 | vm_map_copy_t copy; |
1886 | { |
1887 | free_next_copy: |
1888 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
1889 | return; |
1890 | |
1891 | switch (copy->type) { |
1892 | case VM_MAP_COPY_ENTRY_LIST1: |
1893 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != |
1894 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
1895 | vm_map_entry_t entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
1896 | |
1897 | vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }); |
1898 | vm_object_deallocate(entry->object.vm_object); |
1899 | vm_map_copy_entry_dispose(copy, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)); |
1900 | } |
1901 | break; |
1902 | case VM_MAP_COPY_OBJECT2: |
1903 | vm_object_deallocate(copy->cpy_objectc_u.c_o.object); |
1904 | break; |
1905 | case VM_MAP_COPY_PAGE_LIST3: |
1906 | |
1907 | /* |
1908 | * To clean this up, we have to unbusy all the pages |
1909 | * and release the paging references in their objects. |
1910 | */ |
1911 | if (copy->cpy_npagesc_u.c_p.npages > 0) |
1912 | vm_map_copy_page_discard(copy); |
1913 | |
1914 | /* |
1915 | * If there's a continuation, abort it. The |
1916 | * abort routine releases any storage. |
1917 | */ |
1918 | if (vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
1919 | |
1920 | /* |
1921 | * Special case: recognize |
1922 | * vm_map_copy_discard_cont and optimize |
1923 | * here to avoid tail recursion. |
1924 | */ |
1925 | if (copy->cpy_contc_u.c_p.cont == vm_map_copy_discard_cont) { |
1926 | vm_map_copy_t new_copy; |
1927 | |
1928 | new_copy = (vm_map_copy_t) copy->cpy_cont_argsc_u.c_p.cont_args; |
1929 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
1930 | copy = new_copy; |
1931 | goto free_next_copy; |
1932 | } |
1933 | else { |
1934 | vm_map_copy_abort_cont(copy)({ vm_map_copy_page_discard(copy); (*((copy)->c_u.c_p.cont ))((copy)->c_u.c_p.cont_args, (vm_map_copy_t *) 0); (copy) ->c_u.c_p.cont = (kern_return_t (*)()) 0; (copy)->c_u.c_p .cont_args = (char *) 0; }); |
1935 | } |
1936 | } |
1937 | |
1938 | break; |
1939 | } |
1940 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
1941 | } |
1942 | |
1943 | /* |
1944 | * Routine: vm_map_copy_copy |
1945 | * |
1946 | * Description: |
1947 | * Move the information in a map copy object to |
1948 | * a new map copy object, leaving the old one |
1949 | * empty. |
1950 | * |
1951 | * This is used by kernel routines that need |
1952 | * to look at out-of-line data (in copyin form) |
1953 | * before deciding whether to return SUCCESS. |
1954 | * If the routine returns FAILURE, the original |
1955 | * copy object will be deallocated; therefore, |
1956 | * these routines must make a copy of the copy |
1957 | * object and leave the original empty so that |
1958 | * deallocation will not fail. |
1959 | */ |
1960 | vm_map_copy_t |
1961 | vm_map_copy_copy(copy) |
1962 | vm_map_copy_t copy; |
1963 | { |
1964 | vm_map_copy_t new_copy; |
1965 | |
1966 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
1967 | return VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
1968 | |
1969 | /* |
1970 | * Allocate a new copy object, and copy the information |
1971 | * from the old one into it. |
1972 | */ |
1973 | |
1974 | new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
1975 | *new_copy = *copy; |
1976 | |
1977 | if (copy->type == VM_MAP_COPY_ENTRY_LIST1) { |
1978 | /* |
1979 | * The links in the entry chain must be |
1980 | * changed to point to the new copy object. |
1981 | */ |
1982 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)->vme_prevlinks.prev |
1983 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); |
1984 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)->vme_nextlinks.next |
1985 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); |
1986 | } |
1987 | |
1988 | /* |
1989 | * Change the old copy object into one that contains |
1990 | * nothing to be deallocated. |
1991 | */ |
1992 | copy->type = VM_MAP_COPY_OBJECT2; |
1993 | copy->cpy_objectc_u.c_o.object = VM_OBJECT_NULL((vm_object_t) 0); |
1994 | |
1995 | /* |
1996 | * Return the new object. |
1997 | */ |
1998 | return new_copy; |
1999 | } |
2000 | |
2001 | /* |
2002 | * Routine: vm_map_copy_discard_cont |
2003 | * |
2004 | * Description: |
2005 | * A version of vm_map_copy_discard that can be called |
2006 | * as a continuation from a vm_map_copy page list. |
2007 | */ |
2008 | kern_return_t vm_map_copy_discard_cont(cont_args, copy_result) |
2009 | vm_map_copyin_args_t cont_args; |
2010 | vm_map_copy_t *copy_result; /* OUT */ |
2011 | { |
2012 | vm_map_copy_discard((vm_map_copy_t) cont_args); |
2013 | if (copy_result != (vm_map_copy_t *)0) |
2014 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
2015 | return(KERN_SUCCESS0); |
2016 | } |
2017 | |
2018 | /* |
2019 | * Routine: vm_map_copy_overwrite |
2020 | * |
2021 | * Description: |
2022 | * Copy the memory described by the map copy |
2023 | * object (copy; returned by vm_map_copyin) onto |
2024 | * the specified destination region (dst_map, dst_addr). |
2025 | * The destination must be writeable. |
2026 | * |
2027 | * Unlike vm_map_copyout, this routine actually |
2028 | * writes over previously-mapped memory. If the |
2029 | * previous mapping was to a permanent (user-supplied) |
2030 | * memory object, it is preserved. |
2031 | * |
2032 | * The attributes (protection and inheritance) of the |
2033 | * destination region are preserved. |
2034 | * |
2035 | * If successful, consumes the copy object. |
2036 | * Otherwise, the caller is responsible for it. |
2037 | * |
2038 | * Implementation notes: |
2039 | * To overwrite temporary virtual memory, it is |
2040 | * sufficient to remove the previous mapping and insert |
2041 | * the new copy. This replacement is done either on |
2042 | * the whole region (if no permanent virtual memory |
2043 | * objects are embedded in the destination region) or |
2044 | * in individual map entries. |
2045 | * |
2046 | * To overwrite permanent virtual memory, it is |
2047 | * necessary to copy each page, as the external |
2048 | * memory management interface currently does not |
2049 | * provide any optimizations. |
2050 | * |
2051 | * Once a page of permanent memory has been overwritten, |
2052 | * it is impossible to interrupt this function; otherwise, |
2053 | * the call would be neither atomic nor location-independent. |
2054 | * The kernel-state portion of a user thread must be |
2055 | * interruptible. |
2056 | * |
2057 | * It may be expensive to forward all requests that might |
2058 | * overwrite permanent memory (vm_write, vm_copy) to |
2059 | * uninterruptible kernel threads. This routine may be |
2060 | * called by interruptible threads; however, success is |
2061 | * not guaranteed -- if the request cannot be performed |
2062 | * atomically and interruptibly, an error indication is |
2063 | * returned. |
2064 | */ |
2065 | kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible) |
2066 | vm_map_t dst_map; |
2067 | vm_offset_t dst_addr; |
2068 | vm_map_copy_t copy; |
2069 | boolean_t interruptible; |
2070 | { |
2071 | vm_size_t size; |
2072 | vm_offset_t start; |
2073 | vm_map_entry_t tmp_entry; |
2074 | vm_map_entry_t entry; |
2075 | |
2076 | boolean_t contains_permanent_objects = FALSE((boolean_t) 0); |
2077 | |
2078 | interruptible = FALSE((boolean_t) 0); /* XXX */ |
2079 | |
2080 | /* |
2081 | * Check for null copy object. |
2082 | */ |
2083 | |
2084 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
2085 | return(KERN_SUCCESS0); |
2086 | |
2087 | /* |
2088 | * Only works for entry lists at the moment. Will |
2089 | * support page lists LATER. |
2090 | */ |
2091 | |
2092 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST)({ if (!(copy->type == 1)) Assert("copy->type == VM_MAP_COPY_ENTRY_LIST" , "../vm/vm_map.c", 2092); }); |
2093 | |
2094 | /* |
2095 | * Currently this routine only handles page-aligned |
2096 | * regions. Eventually, it should handle misalignments |
2097 | * by actually copying pages. |
2098 | */ |
2099 | |
2100 | if (!page_aligned(copy->offset)((((vm_offset_t) (copy->offset)) & ((1 << 12)-1) ) == 0) || |
2101 | !page_aligned(copy->size)((((vm_offset_t) (copy->size)) & ((1 << 12)-1)) == 0) || |
2102 | !page_aligned(dst_addr)((((vm_offset_t) (dst_addr)) & ((1 << 12)-1)) == 0)) |
2103 | return(KERN_INVALID_ARGUMENT4); |
2104 | |
2105 | size = copy->size; |
2106 | |
2107 | if (size == 0) { |
2108 | vm_map_copy_discard(copy); |
2109 | return(KERN_SUCCESS0); |
2110 | } |
2111 | |
2112 | /* |
2113 | * Verify that the destination is all writeable |
2114 | * initially. |
2115 | */ |
2116 | start_pass_1: |
2117 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2118 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
2119 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2120 | return(KERN_INVALID_ADDRESS1); |
2121 | } |
2122 | vm_map_clip_start(dst_map, tmp_entry, dst_addr)({ if ((dst_addr) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(dst_addr)); }); |
2123 | for (entry = tmp_entry;;) { |
2124 | vm_size_t sub_size = (entry->vme_endlinks.end - entry->vme_startlinks.start); |
2125 | vm_map_entry_t next = entry->vme_nextlinks.next; |
2126 | |
2127 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
2128 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2129 | return(KERN_PROTECTION_FAILURE2); |
2130 | } |
2131 | |
2132 | /* |
2133 | * If the entry is in transition, we must wait |
2134 | * for it to exit that state. Anything could happen |
2135 | * when we unlock the map, so start over. |
2136 | */ |
2137 | if (entry->in_transition) { |
2138 | |
2139 | /* |
2140 | * Say that we are waiting, and wait for entry. |
2141 | */ |
2142 | entry->needs_wakeup = TRUE((boolean_t) 1); |
2143 | vm_map_entry_wait(dst_map, FALSE)({ assert_wait((event_t)&(dst_map)->hdr, ((boolean_t) 0 )); lock_done(&(dst_map)->lock); thread_block((void (* )()) 0); }); |
2144 | |
2145 | goto start_pass_1; |
2146 | } |
2147 | |
2148 | if (size <= sub_size) |
2149 | break; |
2150 | |
2151 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2152 | (next->vme_startlinks.start != entry->vme_endlinks.end)) { |
2153 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2154 | return(KERN_INVALID_ADDRESS1); |
2155 | } |
2156 | |
2157 | |
2158 | /* |
2159 | * Check for permanent objects in the destination. |
2160 | */ |
2161 | |
2162 | if ((entry->object.vm_object != VM_OBJECT_NULL((vm_object_t) 0)) && |
2163 | !entry->object.vm_object->temporary) |
2164 | contains_permanent_objects = TRUE((boolean_t) 1); |
2165 | |
2166 | size -= sub_size; |
2167 | entry = next; |
2168 | } |
2169 | |
2170 | /* |
2171 | * If there are permanent objects in the destination, then |
2172 | * the copy cannot be interrupted. |
2173 | */ |
2174 | |
2175 | if (interruptible && contains_permanent_objects) { |
2176 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2177 | return(KERN_FAILURE5); /* XXX */ |
2178 | } |
2179 | |
2180 | /* |
2181 | * XXXO If there are no permanent objects in the destination, |
2182 | * XXXO and the source and destination map entry caches match, |
2183 | * XXXO and the destination map entry is not shared, |
2184 | * XXXO then the map entries can be deleted and replaced |
2185 | * XXXO with those from the copy. The following code is the |
2186 | * XXXO basic idea of what to do, but there are lots of annoying |
2187 | * XXXO little details about getting protection and inheritance |
2188 | * XXXO right. Should add protection, inheritance, and sharing checks |
2189 | * XXXO to the above pass and make sure that no wiring is involved. |
2190 | */ |
2191 | /* |
2192 | * if (!contains_permanent_objects && |
2193 | * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) { |
2194 | * |
2195 | * * |
2196 | * * Run over copy and adjust entries. Steal code |
2197 | * * from vm_map_copyout() to do this. |
2198 | * * |
2199 | * |
2200 | * tmp_entry = tmp_entry->vme_prev; |
2201 | * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size); |
2202 | * vm_map_copy_insert(dst_map, tmp_entry, copy); |
2203 | * |
2204 | * vm_map_unlock(dst_map); |
2205 | * vm_map_copy_discard(copy); |
2206 | * } |
2207 | */ |
2208 | /* |
2209 | * |
2210 | * Make a second pass, overwriting the data |
2211 | * At the beginning of each loop iteration, |
2212 | * the next entry to be overwritten is "tmp_entry" |
2213 | * (initially, the value returned from the lookup above), |
2214 | * and the starting address expected in that entry |
2215 | * is "start". |
2216 | */ |
2217 | |
2218 | start = dst_addr; |
2219 | |
2220 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
2221 | vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2222 | vm_size_t copy_size = (copy_entry->vme_endlinks.end - copy_entry->vme_startlinks.start); |
2223 | vm_object_t object; |
2224 | |
2225 | entry = tmp_entry; |
2226 | size = (entry->vme_endlinks.end - entry->vme_startlinks.start); |
2227 | /* |
2228 | * Make sure that no holes popped up in the |
2229 | * address map, and that the protection is |
2230 | * still valid, in case the map was unlocked |
2231 | * earlier. |
2232 | */ |
2233 | |
2234 | if (entry->vme_startlinks.start != start) { |
2235 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2236 | return(KERN_INVALID_ADDRESS1); |
2237 | } |
2238 | assert(entry != vm_map_to_entry(dst_map))({ if (!(entry != ((struct vm_map_entry *) &(dst_map)-> hdr.links))) Assert("entry != vm_map_to_entry(dst_map)", "../vm/vm_map.c" , 2238); }); |
2239 | |
2240 | /* |
2241 | * Check protection again |
2242 | */ |
2243 | |
2244 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
2245 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2246 | return(KERN_PROTECTION_FAILURE2); |
2247 | } |
2248 | |
2249 | /* |
2250 | * Adjust to source size first |
2251 | */ |
2252 | |
2253 | if (copy_size < size) { |
2254 | vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size)({ if ((entry->links.start + copy_size) < (entry)->links .end) _vm_map_clip_end(&(dst_map)->hdr,(entry),(entry-> links.start + copy_size)); }); |
2255 | size = copy_size; |
2256 | } |
2257 | |
2258 | /* |
2259 | * Adjust to destination size |
2260 | */ |
2261 | |
2262 | if (size < copy_size) { |
2263 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }) |
2264 | copy_entry->vme_start + size)({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }); |
2265 | copy_size = size; |
2266 | } |
2267 | |
2268 | assert((entry->vme_end - entry->vme_start) == size)({ if (!((entry->links.end - entry->links.start) == size )) Assert("(entry->vme_end - entry->vme_start) == size" , "../vm/vm_map.c", 2268); }); |
2269 | assert((tmp_entry->vme_end - tmp_entry->vme_start) == size)({ if (!((tmp_entry->links.end - tmp_entry->links.start ) == size)) Assert("(tmp_entry->vme_end - tmp_entry->vme_start) == size" , "../vm/vm_map.c", 2269); }); |
2270 | assert((copy_entry->vme_end - copy_entry->vme_start) == size)({ if (!((copy_entry->links.end - copy_entry->links.start ) == size)) Assert("(copy_entry->vme_end - copy_entry->vme_start) == size" , "../vm/vm_map.c", 2270); }); |
2271 | |
2272 | /* |
2273 | * If the destination contains temporary unshared memory, |
2274 | * we can perform the copy by throwing it away and |
2275 | * installing the source data. |
2276 | */ |
2277 | |
2278 | object = entry->object.vm_object; |
2279 | if (!entry->is_shared && |
2280 | ((object == VM_OBJECT_NULL((vm_object_t) 0)) || object->temporary)) { |
2281 | vm_object_t old_object = entry->object.vm_object; |
2282 | vm_offset_t old_offset = entry->offset; |
2283 | |
2284 | entry->object = copy_entry->object; |
2285 | entry->offset = copy_entry->offset; |
2286 | entry->needs_copy = copy_entry->needs_copy; |
2287 | entry->wired_count = 0; |
2288 | entry->user_wired_count = 0; |
2289 | |
2290 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); |
2291 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); |
2292 | |
2293 | vm_object_pmap_protect( |
2294 | old_object, |
2295 | old_offset, |
2296 | size, |
2297 | dst_map->pmap, |
2298 | tmp_entry->vme_startlinks.start, |
2299 | VM_PROT_NONE((vm_prot_t) 0x00)); |
2300 | |
2301 | vm_object_deallocate(old_object); |
2302 | |
2303 | /* |
2304 | * Set up for the next iteration. The map |
2305 | * has not been unlocked, so the next |
2306 | * address should be at the end of this |
2307 | * entry, and the next map entry should be |
2308 | * the one following it. |
2309 | */ |
2310 | |
2311 | start = tmp_entry->vme_endlinks.end; |
2312 | tmp_entry = tmp_entry->vme_nextlinks.next; |
2313 | } else { |
2314 | vm_map_version_t version; |
2315 | vm_object_t dst_object = entry->object.vm_object; |
2316 | vm_offset_t dst_offset = entry->offset; |
2317 | kern_return_t r; |
2318 | |
2319 | /* |
2320 | * Take an object reference, and record |
2321 | * the map version information so that the |
2322 | * map can be safely unlocked. |
2323 | */ |
2324 | |
2325 | vm_object_reference(dst_object); |
2326 | |
2327 | version.main_timestamp = dst_map->timestamp; |
2328 | |
2329 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2330 | |
2331 | /* |
2332 | * Copy as much as possible in one pass |
2333 | */ |
2334 | |
2335 | copy_size = size; |
2336 | r = vm_fault_copy( |
2337 | copy_entry->object.vm_object, |
2338 | copy_entry->offset, |
2339 | ©_size, |
2340 | dst_object, |
2341 | dst_offset, |
2342 | dst_map, |
2343 | &version, |
2344 | FALSE((boolean_t) 0) /* XXX interruptible */ ); |
2345 | |
2346 | /* |
2347 | * Release the object reference |
2348 | */ |
2349 | |
2350 | vm_object_deallocate(dst_object); |
2351 | |
2352 | /* |
2353 | * If a hard error occurred, return it now |
2354 | */ |
2355 | |
2356 | if (r != KERN_SUCCESS0) |
2357 | return(r); |
2358 | |
2359 | if (copy_size != 0) { |
2360 | /* |
2361 | * Dispose of the copied region |
2362 | */ |
2363 | |
2364 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }) |
2365 | copy_entry->vme_start + copy_size)({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }); |
2366 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); |
2367 | vm_object_deallocate(copy_entry->object.vm_object); |
2368 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); |
2369 | } |
2370 | |
2371 | /* |
2372 | * Pick up in the destination map where we left off. |
2373 | * |
2374 | * Use the version information to avoid a lookup |
2375 | * in the normal case. |
2376 | */ |
2377 | |
2378 | start += copy_size; |
2379 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2380 | if ((version.main_timestamp + 1) == dst_map->timestamp) { |
2381 | /* We can safely use saved tmp_entry value */ |
2382 | |
2383 | vm_map_clip_end(dst_map, tmp_entry, start)({ if ((start) < (tmp_entry)->links.end) _vm_map_clip_end (&(dst_map)->hdr,(tmp_entry),(start)); }); |
2384 | tmp_entry = tmp_entry->vme_nextlinks.next; |
2385 | } else { |
2386 | /* Must do lookup of tmp_entry */ |
2387 | |
2388 | if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { |
2389 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2390 | return(KERN_INVALID_ADDRESS1); |
2391 | } |
2392 | vm_map_clip_start(dst_map, tmp_entry, start)({ if ((start) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(start)); }); |
2393 | } |
2394 | } |
2395 | |
2396 | } |
2397 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2398 | |
2399 | /* |
2400 | * Throw away the vm_map_copy object |
2401 | */ |
2402 | vm_map_copy_discard(copy); |
2403 | |
2404 | return(KERN_SUCCESS0); |
2405 | } |
2406 | |
2407 | /* |
2408 | * Macro: vm_map_copy_insert |
2409 | * |
2410 | * Description: |
2411 | * Link a copy chain ("copy") into a map at the |
2412 | * specified location (after "where"). |
2413 | * Side effects: |
2414 | * The copy chain is destroyed. |
2415 | * Warning: |
2416 | * The arguments are evaluated multiple times. |
2417 | */ |
2418 | #define vm_map_copy_insert(map, where, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (map)->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2418); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); (((where)->links.next )->links.prev = ((copy)->c_u.hdr.links.prev)) ->links .next = ((where)->links.next); ((where)->links.next = ( (copy)->c_u.hdr.links.next)) ->links.prev = (where); (map )->hdr.nentries += (copy)->c_u.hdr.nentries; kmem_cache_free (&vm_map_copy_cache, (vm_offset_t) copy); }) \ |
2419 | MACRO_BEGIN({ \ |
2420 | struct rbtree_node *node, *tmp; \ |
2421 | rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp)for (node = rbtree_postwalk_deepest(&(copy)->c_u.hdr.tree ), tmp = rbtree_postwalk_unlink(node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink(node)) \ |
2422 | rbtree_insert(&(map)->hdr.tree, node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2423); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }) |
2423 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2423); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); \ |
2424 | (((where)->vme_nextlinks.next)->vme_prevlinks.prev = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)) \ |
2425 | ->vme_nextlinks.next = ((where)->vme_nextlinks.next); \ |
2426 | ((where)->vme_nextlinks.next = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)) \ |
2427 | ->vme_prevlinks.prev = (where); \ |
2428 | (map)->hdr.nentries += (copy)->cpy_hdrc_u.hdr.nentries; \ |
2429 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ |
2430 | MACRO_END}) |
2431 | |
2432 | /* |
2433 | * Routine: vm_map_copyout |
2434 | * |
2435 | * Description: |
2436 | * Copy out a copy chain ("copy") into newly-allocated |
2437 | * space in the destination map. |
2438 | * |
2439 | * If successful, consumes the copy object. |
2440 | * Otherwise, the caller is responsible for it. |
2441 | */ |
2442 | kern_return_t vm_map_copyout(dst_map, dst_addr, copy) |
2443 | vm_map_t dst_map; |
2444 | vm_offset_t *dst_addr; /* OUT */ |
2445 | vm_map_copy_t copy; |
2446 | { |
2447 | vm_size_t size; |
2448 | vm_size_t adjustment; |
2449 | vm_offset_t start; |
2450 | vm_offset_t vm_copy_start; |
2451 | vm_map_entry_t last; |
2452 | vm_map_entry_t entry; |
2453 | |
2454 | /* |
2455 | * Check for null copy object. |
2456 | */ |
2457 | |
2458 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { |
2459 | *dst_addr = 0; |
2460 | return(KERN_SUCCESS0); |
2461 | } |
2462 | |
2463 | /* |
2464 | * Check for special copy object, created |
2465 | * by vm_map_copyin_object. |
2466 | */ |
2467 | |
2468 | if (copy->type == VM_MAP_COPY_OBJECT2) { |
2469 | vm_object_t object = copy->cpy_objectc_u.c_o.object; |
2470 | vm_size_t offset = copy->offset; |
2471 | vm_size_t tmp_size = copy->size; |
2472 | kern_return_t kr; |
2473 | |
2474 | *dst_addr = 0; |
2475 | kr = vm_map_enter(dst_map, dst_addr, tmp_size, |
2476 | (vm_offset_t) 0, TRUE((boolean_t) 1), |
2477 | object, offset, FALSE((boolean_t) 0), |
2478 | VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), |
2479 | VM_INHERIT_DEFAULT((vm_inherit_t) 1)); |
2480 | if (kr != KERN_SUCCESS0) |
2481 | return(kr); |
2482 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
2483 | return(KERN_SUCCESS0); |
2484 | } |
2485 | |
2486 | if (copy->type == VM_MAP_COPY_PAGE_LIST3) |
2487 | return(vm_map_copyout_page_list(dst_map, dst_addr, copy)); |
2488 | |
2489 | /* |
2490 | * Find space for the data |
2491 | */ |
2492 | |
2493 | vm_copy_start = trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); |
2494 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - vm_copy_start; |
2495 | |
2496 | StartAgain: ; |
2497 | |
2498 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2499 | start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ? |
2500 | vm_map_min(dst_map)((dst_map)->hdr.links.start) : last->vme_endlinks.end; |
2501 | |
2502 | while (TRUE((boolean_t) 1)) { |
2503 | vm_map_entry_t next = last->vme_nextlinks.next; |
2504 | vm_offset_t end = start + size; |
2505 | |
2506 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { |
2507 | if (dst_map->wait_for_space) { |
2508 | if (size <= (dst_map->max_offsethdr.links.end - dst_map->min_offsethdr.links.start)) { |
2509 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); |
2510 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2511 | thread_block((void (*)()) 0); |
2512 | goto StartAgain; |
2513 | } |
2514 | } |
2515 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2516 | printf_once("no more room for vm_map_copyout in %p\n", dst_map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_copyout in %p\n" , dst_map); __once = 1; } }); |
2517 | return(KERN_NO_SPACE3); |
2518 | } |
2519 | |
2520 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2521 | (next->vme_startlinks.start >= end)) |
2522 | break; |
2523 | |
2524 | last = next; |
2525 | start = last->vme_endlinks.end; |
2526 | } |
2527 | |
2528 | /* |
2529 | * Since we're going to just drop the map |
2530 | * entries from the copy into the destination |
2531 | * map, they must come from the same pool. |
2532 | */ |
2533 | |
2534 | if (copy->cpy_hdrc_u.hdr.entries_pageable != dst_map->hdr.entries_pageable) { |
2535 | /* |
2536 | * Mismatches occur when dealing with the default |
2537 | * pager. |
2538 | */ |
2539 | kmem_cache_t old_cache; |
2540 | vm_map_entry_t next, new; |
2541 | |
2542 | /* |
2543 | * Find the cache that the copies were allocated from |
2544 | */ |
2545 | old_cache = (copy->cpy_hdrc_u.hdr.entries_pageable) |
2546 | ? &vm_map_entry_cache |
2547 | : &vm_map_kentry_cache; |
2548 | entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2549 | |
2550 | /* |
2551 | * Reinitialize the copy so that vm_map_copy_entry_link |
2552 | * will work. |
2553 | */ |
2554 | copy->cpy_hdrc_u.hdr.nentries = 0; |
2555 | copy->cpy_hdrc_u.hdr.entries_pageable = dst_map->hdr.entries_pageable; |
2556 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
2557 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = |
2558 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
2559 | |
2560 | /* |
2561 | * Copy each entry. |
2562 | */ |
2563 | while (entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
2564 | new = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); |
2565 | vm_map_entry_copy_full(new, entry)(*(new) = *(entry)); |
2566 | vm_map_copy_entry_link(copy,({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2568); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) |
2567 | vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2568); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) |
2568 | new)({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2568); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }); |
2569 | next = entry->vme_nextlinks.next; |
2570 | kmem_cache_free(old_cache, (vm_offset_t) entry); |
2571 | entry = next; |
2572 | } |
2573 | } |
2574 | |
2575 | /* |
2576 | * Adjust the addresses in the copy chain, and |
2577 | * reset the region attributes. |
2578 | */ |
2579 | |
2580 | adjustment = start - vm_copy_start; |
2581 | for (entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2582 | entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
2583 | entry = entry->vme_nextlinks.next) { |
2584 | entry->vme_startlinks.start += adjustment; |
2585 | entry->vme_endlinks.end += adjustment; |
2586 | |
2587 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
2588 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
2589 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
2590 | entry->projected_on = 0; |
2591 | |
2592 | /* |
2593 | * If the entry is now wired, |
2594 | * map the pages into the destination map. |
2595 | */ |
2596 | if (entry->wired_count != 0) { |
2597 | vm_offset_t va; |
2598 | vm_offset_t offset; |
2599 | vm_object_t object; |
2600 | |
2601 | object = entry->object.vm_object; |
2602 | offset = entry->offset; |
2603 | va = entry->vme_startlinks.start; |
2604 | |
2605 | pmap_pageable(dst_map->pmap, |
2606 | entry->vme_startlinks.start, |
2607 | entry->vme_endlinks.end, |
2608 | TRUE((boolean_t) 1)); |
2609 | |
2610 | while (va < entry->vme_endlinks.end) { |
2611 | vm_page_t m; |
2612 | |
2613 | /* |
2614 | * Look up the page in the object. |
2615 | * Assert that the page will be found in the |
2616 | * top object: |
2617 | * either |
2618 | * the object was newly created by |
2619 | * vm_object_copy_slowly, and has |
2620 | * copies of all of the pages from |
2621 | * the source object |
2622 | * or |
2623 | * the object was moved from the old |
2624 | * map entry; because the old map |
2625 | * entry was wired, all of the pages |
2626 | * were in the top-level object. |
2627 | * (XXX not true if we wire pages for |
2628 | * reading) |
2629 | */ |
2630 | vm_object_lock(object); |
2631 | vm_object_paging_begin(object)((object)->paging_in_progress++); |
2632 | |
2633 | m = vm_page_lookup(object, offset); |
2634 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->wire_count == 0 || |
2635 | m->absent) |
2636 | panic("vm_map_copyout: wiring 0x%x", m); |
2637 | |
2638 | m->busy = TRUE((boolean_t) 1); |
2639 | vm_object_unlock(object); |
2640 | |
2641 | PMAP_ENTER(dst_map->pmap, va, m,({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2642 | entry->protection, TRUE)({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }); |
2643 | |
2644 | vm_object_lock(object); |
2645 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
2646 | /* the page is wired, so we don't have to activate */ |
2647 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 2647); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
2648 | vm_object_unlock(object); |
2649 | |
2650 | offset += PAGE_SIZE(1 << 12); |
2651 | va += PAGE_SIZE(1 << 12); |
2652 | } |
2653 | } |
2654 | |
2655 | |
2656 | } |
2657 | |
2658 | /* |
2659 | * Correct the page alignment for the result |
2660 | */ |
2661 | |
2662 | *dst_addr = start + (copy->offset - vm_copy_start); |
2663 | |
2664 | /* |
2665 | * Update the hints and the map size |
2666 | */ |
2667 | |
2668 | if (dst_map->first_free == last) |
2669 | dst_map->first_free = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev); |
2670 | SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); (dst_map)->hint = (((copy)->c_u.hdr.links.prev)); ;; |
2671 | |
2672 | dst_map->size += size; |
2673 | |
2674 | /* |
2675 | * Link in the copy |
2676 | */ |
2677 | |
2678 | vm_map_copy_insert(dst_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (dst_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2678 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (dst_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (dst_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); |
2679 | |
2680 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2681 | |
2682 | /* |
2683 | * XXX If wiring_required, call vm_map_pageable |
2684 | */ |
2685 | |
2686 | return(KERN_SUCCESS0); |
2687 | } |
2688 | |
2689 | /* |
2690 | * |
2691 | * vm_map_copyout_page_list: |
2692 | * |
2693 | * Version of vm_map_copyout() for page list vm map copies. |
2694 | * |
2695 | */ |
2696 | kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy) |
2697 | vm_map_t dst_map; |
2698 | vm_offset_t *dst_addr; /* OUT */ |
2699 | vm_map_copy_t copy; |
2700 | { |
2701 | vm_size_t size; |
2702 | vm_offset_t start; |
2703 | vm_offset_t end; |
2704 | vm_offset_t offset; |
2705 | vm_map_entry_t last; |
2706 | vm_object_t object; |
2707 | vm_page_t *page_list, m; |
2708 | vm_map_entry_t entry; |
2709 | vm_offset_t old_last_offset; |
2710 | boolean_t cont_invoked, needs_wakeup = FALSE((boolean_t) 0); |
2711 | kern_return_t result = KERN_SUCCESS0; |
2712 | vm_map_copy_t orig_copy; |
2713 | vm_offset_t dst_offset; |
2714 | boolean_t must_wire; |
2715 | |
2716 | /* |
2717 | * Make sure the pages are stolen, because we are |
2718 | * going to put them in a new object. Assume that |
2719 | * all pages are identical to first in this regard. |
2720 | */ |
2721 | |
2722 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; |
2723 | if ((*page_list)->tabled) |
2724 | vm_map_copy_steal_pages(copy); |
2725 | |
2726 | /* |
2727 | * Find space for the data |
2728 | */ |
2729 | |
2730 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - |
2731 | trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); |
2732 | StartAgain: |
2733 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2734 | must_wire = dst_map->wiring_required; |
2735 | |
2736 | last = dst_map->first_free; |
2737 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) { |
2738 | start = vm_map_min(dst_map)((dst_map)->hdr.links.start); |
2739 | } else { |
2740 | start = last->vme_endlinks.end; |
2741 | } |
2742 | |
2743 | while (TRUE((boolean_t) 1)) { |
2744 | vm_map_entry_t next = last->vme_nextlinks.next; |
2745 | end = start + size; |
2746 | |
2747 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { |
2748 | if (dst_map->wait_for_space) { |
2749 | if (size <= (dst_map->max_offsethdr.links.end - |
2750 | dst_map->min_offsethdr.links.start)) { |
2751 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); |
2752 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2753 | thread_block((void (*)()) 0); |
2754 | goto StartAgain; |
2755 | } |
2756 | } |
2757 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2758 | printf_once("no more room for vm_map_copyout_page_list in %p\n", dst_map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_copyout_page_list in %p\n" , dst_map); __once = 1; } }); |
2759 | return(KERN_NO_SPACE3); |
2760 | } |
2761 | |
2762 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2763 | (next->vme_startlinks.start >= end)) { |
2764 | break; |
2765 | } |
2766 | |
2767 | last = next; |
2768 | start = last->vme_endlinks.end; |
2769 | } |
2770 | |
2771 | /* |
2772 | * See whether we can avoid creating a new entry (and object) by |
2773 | * extending one of our neighbors. [So far, we only attempt to |
2774 | * extend from below.] |
2775 | * |
2776 | * The code path below here is a bit twisted. If any of the |
2777 | * extension checks fails, we branch to create_object. If |
2778 | * it all works, we fall out the bottom and goto insert_pages. |
2779 | */ |
2780 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links) || |
2781 | last->vme_endlinks.end != start || |
2782 | last->is_shared != FALSE((boolean_t) 0) || |
2783 | last->is_sub_map != FALSE((boolean_t) 0) || |
2784 | last->inheritance != VM_INHERIT_DEFAULT((vm_inherit_t) 1) || |
2785 | last->protection != VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)) || |
2786 | last->max_protection != VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) || |
2787 | (must_wire ? (last->wired_count != 1 || |
2788 | last->user_wired_count != 1) : |
2789 | (last->wired_count != 0))) { |
2790 | goto create_object; |
2791 | } |
2792 | |
2793 | /* |
2794 | * If this entry needs an object, make one. |
2795 | */ |
2796 | if (last->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
2797 | object = vm_object_allocate( |
2798 | (vm_size_t)(last->vme_endlinks.end - last->vme_startlinks.start + size)); |
2799 | last->object.vm_object = object; |
2800 | last->offset = 0; |
2801 | vm_object_lock(object); |
2802 | } |
2803 | else { |
2804 | vm_offset_t prev_offset = last->offset; |
2805 | vm_size_t prev_size = start - last->vme_startlinks.start; |
2806 | vm_size_t new_size; |
2807 | |
2808 | /* |
2809 | * This is basically vm_object_coalesce. |
2810 | */ |
2811 | |
2812 | object = last->object.vm_object; |
2813 | vm_object_lock(object); |
2814 | |
2815 | /* |
2816 | * Try to collapse the object first |
2817 | */ |
2818 | vm_object_collapse(object); |
2819 | |
2820 | /* |
2821 | * Can't coalesce if pages not mapped to |
2822 | * last may be in use anyway: |
2823 | * . more than one reference |
2824 | * . paged out |
2825 | * . shadows another object |
2826 | * . has a copy elsewhere |
2827 | * . paging references (pages might be in page-list) |
2828 | */ |
2829 | |
2830 | if ((object->ref_count > 1) || |
2831 | object->pager_created || |
2832 | (object->shadow != VM_OBJECT_NULL((vm_object_t) 0)) || |
2833 | (object->copy != VM_OBJECT_NULL((vm_object_t) 0)) || |
2834 | (object->paging_in_progress != 0)) { |
2835 | vm_object_unlock(object); |
2836 | goto create_object; |
2837 | } |
2838 | |
2839 | /* |
2840 | * Extend the object if necessary. Don't have to call |
2841 | * vm_object_page_remove because the pages aren't mapped, |
2842 | * and vm_page_replace will free up any old ones it encounters. |
2843 | */ |
2844 | new_size = prev_offset + prev_size + size; |
2845 | if (new_size > object->size) |
2846 | object->size = new_size; |
2847 | } |
2848 | |
2849 | /* |
2850 | * Coalesced the two objects - can extend |
2851 | * the previous map entry to include the |
2852 | * new range. |
2853 | */ |
2854 | dst_map->size += size; |
2855 | last->vme_endlinks.end = end; |
2856 | |
2857 | SAVE_HINT(dst_map, last); (dst_map)->hint = (last); ;; |
2858 | |
2859 | goto insert_pages; |
2860 | |
2861 | create_object: |
2862 | |
2863 | /* |
2864 | * Create object |
2865 | */ |
2866 | object = vm_object_allocate(size); |
2867 | |
2868 | /* |
2869 | * Create entry |
2870 | */ |
2871 | |
2872 | entry = vm_map_entry_create(dst_map)_vm_map_entry_create(&(dst_map)->hdr); |
2873 | |
2874 | entry->object.vm_object = object; |
2875 | entry->offset = 0; |
2876 | |
2877 | entry->is_shared = FALSE((boolean_t) 0); |
2878 | entry->is_sub_map = FALSE((boolean_t) 0); |
2879 | entry->needs_copy = FALSE((boolean_t) 0); |
2880 | |
2881 | if (must_wire) { |
2882 | entry->wired_count = 1; |
2883 | entry->user_wired_count = 1; |
2884 | } else { |
2885 | entry->wired_count = 0; |
2886 | entry->user_wired_count = 0; |
2887 | } |
2888 | |
2889 | entry->in_transition = TRUE((boolean_t) 1); |
2890 | entry->needs_wakeup = FALSE((boolean_t) 0); |
2891 | |
2892 | entry->vme_startlinks.start = start; |
2893 | entry->vme_endlinks.end = start + size; |
2894 | |
2895 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
2896 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
2897 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
2898 | entry->projected_on = 0; |
2899 | |
2900 | vm_object_lock(object); |
2901 | |
2902 | /* |
2903 | * Update the hints and the map size |
2904 | */ |
2905 | if (dst_map->first_free == last) { |
2906 | dst_map->first_free = entry; |
2907 | } |
2908 | SAVE_HINT(dst_map, entry); (dst_map)->hint = (entry); ;; |
2909 | dst_map->size += size; |
2910 | |
2911 | /* |
2912 | * Link in the entry |
2913 | */ |
2914 | vm_map_entry_link(dst_map, last, entry)({ (&(dst_map)->hdr)->nentries++; (entry)->links .prev = (last); (entry)->links.next = (last)->links.next ; (entry)->links.prev->links.next = (entry)->links.next ->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(dst_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2914); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(dst_map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }); |
2915 | last = entry; |
2916 | |
2917 | /* |
2918 | * Transfer pages into new object. |
2919 | * Scan page list in vm_map_copy. |
2920 | */ |
2921 | insert_pages: |
2922 | dst_offset = copy->offset & PAGE_MASK((1 << 12)-1); |
2923 | cont_invoked = FALSE((boolean_t) 0); |
2924 | orig_copy = copy; |
2925 | last->in_transition = TRUE((boolean_t) 1); |
2926 | old_last_offset = last->offset |
2927 | + (start - last->vme_startlinks.start); |
2928 | |
2929 | vm_page_lock_queues(); |
2930 | |
2931 | for (offset = 0; offset < size; offset += PAGE_SIZE(1 << 12)) { |
2932 | m = *page_list; |
2933 | assert(m && !m->tabled)({ if (!(m && !m->tabled)) Assert("m && !m->tabled" , "../vm/vm_map.c", 2933); }); |
2934 | |
2935 | /* |
2936 | * Must clear busy bit in page before inserting it. |
2937 | * Ok to skip wakeup logic because nobody else |
2938 | * can possibly know about this page. |
2939 | * The page is dirty in its new object. |
2940 | */ |
2941 | |
2942 | assert(!m->wanted)({ if (!(!m->wanted)) Assert("!m->wanted", "../vm/vm_map.c" , 2942); }); |
2943 | |
2944 | m->busy = FALSE((boolean_t) 0); |
2945 | m->dirty = TRUE((boolean_t) 1); |
2946 | vm_page_replace(m, object, old_last_offset + offset); |
2947 | if (must_wire) { |
2948 | vm_page_wire(m); |
2949 | PMAP_ENTER(dst_map->pmap,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2950 | last->vme_start + m->offset - last->offset,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2951 | m, last->protection, TRUE)({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }); |
2952 | } else { |
2953 | vm_page_activate(m); |
2954 | } |
2955 | |
2956 | *page_list++ = VM_PAGE_NULL((vm_page_t) 0); |
2957 | if (--(copy->cpy_npagesc_u.c_p.npages) == 0 && |
2958 | vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
2959 | vm_map_copy_t new_copy; |
2960 | |
2961 | /* |
2962 | * Ok to unlock map because entry is |
2963 | * marked in_transition. |
2964 | */ |
2965 | cont_invoked = TRUE((boolean_t) 1); |
2966 | vm_page_unlock_queues(); |
2967 | vm_object_unlock(object); |
2968 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2969 | vm_map_copy_invoke_cont(copy, &new_copy, &result)({ vm_map_copy_page_discard(copy); *&result = (*((copy)-> c_u.c_p.cont))((copy)->c_u.c_p.cont_args, &new_copy); ( copy)->c_u.c_p.cont = (kern_return_t (*)()) 0; }); |
2970 | |
2971 | if (result == KERN_SUCCESS0) { |
2972 | |
2973 | /* |
2974 | * If we got back a copy with real pages, |
2975 | * steal them now. Either all of the |
2976 | * pages in the list are tabled or none |
2977 | * of them are; mixtures are not possible. |
2978 | * |
2979 | * Save original copy for consume on |
2980 | * success logic at end of routine. |
2981 | */ |
2982 | if (copy != orig_copy) |
2983 | vm_map_copy_discard(copy); |
2984 | |
2985 | if ((copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { |
2986 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; |
2987 | if ((*page_list)->tabled) |
2988 | vm_map_copy_steal_pages(copy); |
2989 | } |
2990 | } |
2991 | else { |
2992 | /* |
2993 | * Continuation failed. |
2994 | */ |
2995 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2996 | goto error; |
2997 | } |
2998 | |
2999 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
3000 | vm_object_lock(object); |
3001 | vm_page_lock_queues(); |
3002 | } |
3003 | } |
3004 | |
3005 | vm_page_unlock_queues(); |
3006 | vm_object_unlock(object); |
3007 | |
3008 | *dst_addr = start + dst_offset; |
3009 | |
3010 | /* |
3011 | * Clear the in transition bits. This is easy if we |
3012 | * didn't have a continuation. |
3013 | */ |
3014 | error: |
3015 | if (!cont_invoked) { |
3016 | /* |
3017 | * We didn't unlock the map, so nobody could |
3018 | * be waiting. |
3019 | */ |
3020 | last->in_transition = FALSE((boolean_t) 0); |
3021 | assert(!last->needs_wakeup)({ if (!(!last->needs_wakeup)) Assert("!last->needs_wakeup" , "../vm/vm_map.c", 3021); }); |
3022 | needs_wakeup = FALSE((boolean_t) 0); |
3023 | } |
3024 | else { |
3025 | if (!vm_map_lookup_entry(dst_map, start, &entry)) |
3026 | panic("vm_map_copyout_page_list: missing entry"); |
3027 | |
3028 | /* |
3029 | * Clear transition bit for all constituent entries that |
3030 | * were in the original entry. Also check for waiters. |
3031 | */ |
3032 | while((entry != vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) && |
3033 | (entry->vme_startlinks.start < end)) { |
3034 | assert(entry->in_transition)({ if (!(entry->in_transition)) Assert("entry->in_transition" , "../vm/vm_map.c", 3034); }); |
3035 | entry->in_transition = FALSE((boolean_t) 0); |
3036 | if(entry->needs_wakeup) { |
3037 | entry->needs_wakeup = FALSE((boolean_t) 0); |
3038 | needs_wakeup = TRUE((boolean_t) 1); |
3039 | } |
3040 | entry = entry->vme_nextlinks.next; |
3041 | } |
3042 | } |
3043 | |
3044 | if (result != KERN_SUCCESS0) |
3045 | vm_map_delete(dst_map, start, end); |
3046 | |
3047 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
3048 | |
3049 | if (needs_wakeup) |
3050 | vm_map_entry_wakeup(dst_map)thread_wakeup_prim(((event_t)&(dst_map)->hdr), ((boolean_t ) 0), 0); |
3051 | |
3052 | /* |
3053 | * Consume on success logic. |
3054 | */ |
3055 | if (copy != orig_copy) { |
3056 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
3057 | } |
3058 | if (result == KERN_SUCCESS0) { |
3059 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); |
3060 | } |
3061 | |
3062 | return(result); |
3063 | } |
3064 | |
3065 | /* |
3066 | * Routine: vm_map_copyin |
3067 | * |
3068 | * Description: |
3069 | * Copy the specified region (src_addr, len) from the |
3070 | * source address space (src_map), possibly removing |
3071 | * the region from the source address space (src_destroy). |
3072 | * |
3073 | * Returns: |
3074 | * A vm_map_copy_t object (copy_result), suitable for |
3075 | * insertion into another address space (using vm_map_copyout), |
3076 | * copying over another address space region (using |
3077 | * vm_map_copy_overwrite). If the copy is unused, it |
3078 | * should be destroyed (using vm_map_copy_discard). |
3079 | * |
3080 | * In/out conditions: |
3081 | * The source map should not be locked on entry. |
3082 | */ |
3083 | kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) |
3084 | vm_map_t src_map; |
3085 | vm_offset_t src_addr; |
3086 | vm_size_t len; |
3087 | boolean_t src_destroy; |
3088 | vm_map_copy_t *copy_result; /* OUT */ |
3089 | { |
3090 | vm_map_entry_t tmp_entry; /* Result of last map lookup -- |
3091 | * in multi-level lookup, this |
3092 | * entry contains the actual |
3093 | * vm_object/offset. |
3094 | */ |
3095 | |
3096 | vm_offset_t src_start; /* Start of current entry -- |
3097 | * where copy is taking place now |
3098 | */ |
3099 | vm_offset_t src_end; /* End of entire region to be |
3100 | * copied */ |
3101 | |
3102 | vm_map_copy_t copy; /* Resulting copy */ |
3103 | |
3104 | /* |
3105 | * Check for copies of zero bytes. |
3106 | */ |
3107 | |
3108 | if (len == 0) { |
3109 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3110 | return(KERN_SUCCESS0); |
3111 | } |
3112 | |
3113 | /* |
3114 | * Compute start and end of region |
3115 | */ |
3116 | |
3117 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3118 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
3119 | |
3120 | /* |
3121 | * Check that the end address doesn't overflow |
3122 | */ |
3123 | |
3124 | if (src_end <= src_start) |
3125 | if ((src_end < src_start) || (src_start != 0)) |
3126 | return(KERN_INVALID_ADDRESS1); |
3127 | |
3128 | /* |
3129 | * Allocate a header element for the list. |
3130 | * |
3131 | * Use the start and end in the header to |
3132 | * remember the endpoints prior to rounding. |
3133 | */ |
3134 | |
3135 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3136 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
3137 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
3138 | copy->type = VM_MAP_COPY_ENTRY_LIST1; |
3139 | copy->cpy_hdrc_u.hdr.nentries = 0; |
3140 | copy->cpy_hdrc_u.hdr.entries_pageable = TRUE((boolean_t) 1); |
3141 | rbtree_init(©->cpy_hdrc_u.hdr.tree); |
3142 | |
3143 | copy->offset = src_addr; |
3144 | copy->size = len; |
3145 | |
3146 | #define RETURN(x) \ |
3147 | MACRO_BEGIN({ \ |
3148 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); \ |
3149 | vm_map_copy_discard(copy); \ |
3150 | MACRO_RETURNif (((boolean_t) 1)) return(x); \ |
3151 | MACRO_END}) |
3152 | |
3153 | /* |
3154 | * Find the beginning of the region. |
3155 | */ |
3156 | |
3157 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3158 | |
3159 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) |
3160 | RETURN(KERN_INVALID_ADDRESS1); |
3161 | vm_map_clip_start(src_map, tmp_entry, src_start)({ if ((src_start) > (tmp_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(tmp_entry),(src_start)); }); |
3162 | |
3163 | /* |
3164 | * Go through entries until we get to the end. |
3165 | */ |
3166 | |
3167 | while (TRUE((boolean_t) 1)) { |
3168 | vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ |
3169 | vm_size_t src_size; /* Size of source |
3170 | * map entry (in both |
3171 | * maps) |
3172 | */ |
3173 | |
3174 | vm_object_t src_object; /* Object to copy */ |
3175 | vm_offset_t src_offset; |
3176 | |
3177 | boolean_t src_needs_copy; /* Should source map |
3178 | * be made read-only |
3179 | * for copy-on-write? |
3180 | */ |
3181 | |
3182 | vm_map_entry_t new_entry; /* Map entry for copy */ |
3183 | boolean_t new_entry_needs_copy; /* Will new entry be COW? */ |
3184 | |
3185 | boolean_t was_wired; /* Was source wired? */ |
3186 | vm_map_version_t version; /* Version before locks |
3187 | * dropped to make copy |
3188 | */ |
3189 | |
3190 | /* |
3191 | * Verify that the region can be read. |
3192 | */ |
3193 | |
3194 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) |
3195 | RETURN(KERN_PROTECTION_FAILURE2); |
3196 | |
3197 | /* |
3198 | * Clip against the endpoints of the entire region. |
3199 | */ |
3200 | |
3201 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); |
3202 | |
3203 | src_size = src_entry->vme_endlinks.end - src_start; |
3204 | src_object = src_entry->object.vm_object; |
3205 | src_offset = src_entry->offset; |
3206 | was_wired = (src_entry->wired_count != 0); |
3207 | |
3208 | /* |
3209 | * Create a new address map entry to |
3210 | * hold the result. Fill in the fields from |
3211 | * the appropriate source entries. |
3212 | */ |
3213 | |
3214 | new_entry = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); |
3215 | vm_map_entry_copy(new_entry, src_entry)({ *(new_entry) = *(src_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
3216 | |
3217 | /* |
3218 | * Attempt non-blocking copy-on-write optimizations. |
3219 | */ |
3220 | |
3221 | if (src_destroy && |
3222 | (src_object == VM_OBJECT_NULL((vm_object_t) 0) || |
3223 | (src_object->temporary && !src_object->use_shared_copy))) |
3224 | { |
3225 | /* |
3226 | * If we are destroying the source, and the object |
3227 | * is temporary, and not shared writable, |
3228 | * we can move the object reference |
3229 | * from the source to the copy. The copy is |
3230 | * copy-on-write only if the source is. |
3231 | * We make another reference to the object, because |
3232 | * destroying the source entry will deallocate it. |
3233 | */ |
3234 | vm_object_reference(src_object); |
3235 | |
3236 | /* |
3237 | * Copy is always unwired. vm_map_copy_entry |
3238 | * set its wired count to zero. |
3239 | */ |
3240 | |
3241 | goto CopySuccessful; |
3242 | } |
3243 | |
3244 | if (!was_wired && |
3245 | vm_object_copy_temporary( |
3246 | &new_entry->object.vm_object, |
3247 | &new_entry->offset, |
3248 | &src_needs_copy, |
3249 | &new_entry_needs_copy)) { |
3250 | |
3251 | new_entry->needs_copy = new_entry_needs_copy; |
3252 | |
3253 | /* |
3254 | * Handle copy-on-write obligations |
3255 | */ |
3256 | |
3257 | if (src_needs_copy && !tmp_entry->needs_copy) { |
3258 | vm_object_pmap_protect( |
3259 | src_object, |
3260 | src_offset, |
3261 | src_size, |
3262 | (src_entry->is_shared ? PMAP_NULL((pmap_t) 0) |
3263 | : src_map->pmap), |
3264 | src_entry->vme_startlinks.start, |
3265 | src_entry->protection & |
3266 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
3267 | |
3268 | tmp_entry->needs_copy = TRUE((boolean_t) 1); |
3269 | } |
3270 | |
3271 | /* |
3272 | * The map has never been unlocked, so it's safe to |
3273 | * move to the next entry rather than doing another |
3274 | * lookup. |
3275 | */ |
3276 | |
3277 | goto CopySuccessful; |
3278 | } |
3279 | |
3280 | new_entry->needs_copy = FALSE((boolean_t) 0); |
3281 | |
3282 | /* |
3283 | * Take an object reference, so that we may |
3284 | * release the map lock(s). |
3285 | */ |
3286 | |
3287 | assert(src_object != VM_OBJECT_NULL)({ if (!(src_object != ((vm_object_t) 0))) Assert("src_object != VM_OBJECT_NULL" , "../vm/vm_map.c", 3287); }); |
3288 | vm_object_reference(src_object); |
3289 | |
3290 | /* |
3291 | * Record the timestamp for later verification. |
3292 | * Unlock the map. |
3293 | */ |
3294 | |
3295 | version.main_timestamp = src_map->timestamp; |
3296 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3297 | |
3298 | /* |
3299 | * Perform the copy |
3300 | */ |
3301 | |
3302 | if (was_wired) { |
3303 | vm_object_lock(src_object); |
3304 | (void) vm_object_copy_slowly( |
3305 | src_object, |
3306 | src_offset, |
3307 | src_size, |
3308 | FALSE((boolean_t) 0), |
3309 | &new_entry->object.vm_object); |
3310 | new_entry->offset = 0; |
3311 | new_entry->needs_copy = FALSE((boolean_t) 0); |
3312 | } else { |
3313 | kern_return_t result; |
3314 | |
3315 | result = vm_object_copy_strategically(src_object, |
3316 | src_offset, |
3317 | src_size, |
3318 | &new_entry->object.vm_object, |
3319 | &new_entry->offset, |
3320 | &new_entry_needs_copy); |
3321 | |
3322 | new_entry->needs_copy = new_entry_needs_copy; |
3323 | |
3324 | |
3325 | if (result != KERN_SUCCESS0) { |
3326 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3327 | |
3328 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3329 | RETURN(result); |
3330 | } |
3331 | |
3332 | } |
3333 | |
3334 | /* |
3335 | * Throw away the extra reference |
3336 | */ |
3337 | |
3338 | vm_object_deallocate(src_object); |
3339 | |
3340 | /* |
3341 | * Verify that the map has not substantially |
3342 | * changed while the copy was being made. |
3343 | */ |
3344 | |
3345 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); /* Increments timestamp once! */ |
3346 | |
3347 | if ((version.main_timestamp + 1) == src_map->timestamp) |
3348 | goto CopySuccessful; |
3349 | |
3350 | /* |
3351 | * Simple version comparison failed. |
3352 | * |
3353 | * Retry the lookup and verify that the |
3354 | * same object/offset are still present. |
3355 | * |
3356 | * [Note: a memory manager that colludes with |
3357 | * the calling task can detect that we have |
3358 | * cheated. While the map was unlocked, the |
3359 | * mapping could have been changed and restored.] |
3360 | */ |
3361 | |
3362 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { |
3363 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3364 | RETURN(KERN_INVALID_ADDRESS1); |
3365 | } |
3366 | |
3367 | src_entry = tmp_entry; |
3368 | vm_map_clip_start(src_map, src_entry, src_start)({ if ((src_start) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(src_start)); }); |
3369 | |
3370 | if ((src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01)) == VM_PROT_NONE((vm_prot_t) 0x00)) |
3371 | goto VerificationFailed; |
3372 | |
3373 | if (src_entry->vme_endlinks.end < new_entry->vme_endlinks.end) |
3374 | src_size = (new_entry->vme_endlinks.end = src_entry->vme_endlinks.end) - src_start; |
Value stored to 'src_size' is never read | |
3375 | |
3376 | if ((src_entry->object.vm_object != src_object) || |
3377 | (src_entry->offset != src_offset) ) { |
3378 | |
3379 | /* |
3380 | * Verification failed. |
3381 | * |
3382 | * Start over with this top-level entry. |
3383 | */ |
3384 | |
3385 | VerificationFailed: ; |
3386 | |
3387 | vm_object_deallocate(new_entry->object.vm_object); |
3388 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3389 | tmp_entry = src_entry; |
3390 | continue; |
3391 | } |
3392 | |
3393 | /* |
3394 | * Verification succeeded. |
3395 | */ |
3396 | |
3397 | CopySuccessful: ; |
3398 | |
3399 | /* |
3400 | * Link in the new copy entry. |
3401 | */ |
3402 | |
3403 | vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3404); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }) |
3404 | new_entry)({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3404); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }); |
3405 | |
3406 | /* |
3407 | * Determine whether the entire region |
3408 | * has been copied. |
3409 | */ |
3410 | src_start = new_entry->vme_endlinks.end; |
3411 | if ((src_start >= src_end) && (src_end != 0)) |
3412 | break; |
3413 | |
3414 | /* |
3415 | * Verify that there are no gaps in the region |
3416 | */ |
3417 | |
3418 | tmp_entry = src_entry->vme_nextlinks.next; |
3419 | if (tmp_entry->vme_startlinks.start != src_start) |
3420 | RETURN(KERN_INVALID_ADDRESS1); |
3421 | } |
3422 | |
3423 | /* |
3424 | * If the source should be destroyed, do it now, since the |
3425 | * copy was successful. |
3426 | */ |
3427 | if (src_destroy) |
3428 | (void) vm_map_delete(src_map, trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))), src_end); |
3429 | |
3430 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3431 | |
3432 | *copy_result = copy; |
3433 | return(KERN_SUCCESS0); |
3434 | |
3435 | #undef RETURN |
3436 | } |
3437 | |
3438 | /* |
3439 | * vm_map_copyin_object: |
3440 | * |
3441 | * Create a copy object from an object. |
3442 | * Our caller donates an object reference. |
3443 | */ |
3444 | |
3445 | kern_return_t vm_map_copyin_object(object, offset, size, copy_result) |
3446 | vm_object_t object; |
3447 | vm_offset_t offset; /* offset of region in object */ |
3448 | vm_size_t size; /* size of region in object */ |
3449 | vm_map_copy_t *copy_result; /* OUT */ |
3450 | { |
3451 | vm_map_copy_t copy; /* Resulting copy */ |
3452 | |
3453 | /* |
3454 | * We drop the object into a special copy object |
3455 | * that contains the object directly. These copy objects |
3456 | * are distinguished by entries_pageable == FALSE |
3457 | * and null links. |
3458 | */ |
3459 | |
3460 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3461 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
3462 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = VM_MAP_ENTRY_NULL((vm_map_entry_t) 0); |
3463 | copy->type = VM_MAP_COPY_OBJECT2; |
3464 | copy->cpy_objectc_u.c_o.object = object; |
3465 | copy->offset = offset; |
3466 | copy->size = size; |
3467 | |
3468 | *copy_result = copy; |
3469 | return(KERN_SUCCESS0); |
3470 | } |
3471 | |
3472 | /* |
3473 | * vm_map_copyin_page_list_cont: |
3474 | * |
3475 | * Continuation routine for vm_map_copyin_page_list. |
3476 | * |
3477 | * If vm_map_copyin_page_list can't fit the entire vm range |
3478 | * into a single page list object, it creates a continuation. |
3479 | * When the target of the operation has used the pages in the |
3480 | * initial page list, it invokes the continuation, which calls |
3481 | * this routine. If an error happens, the continuation is aborted |
3482 | * (abort arg to this routine is TRUE). To avoid deadlocks, the |
3483 | * pages are discarded from the initial page list before invoking |
3484 | * the continuation. |
3485 | * |
3486 | * NOTE: This is not the same sort of continuation used by |
3487 | * the scheduler. |
3488 | */ |
3489 | |
3490 | kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result) |
3491 | vm_map_copyin_args_t cont_args; |
3492 | vm_map_copy_t *copy_result; /* OUT */ |
3493 | { |
3494 | kern_return_t result = 0; /* '=0' to quiet gcc warnings */ |
3495 | boolean_t do_abort, src_destroy, src_destroy_only; |
3496 | |
3497 | /* |
3498 | * Check for cases that only require memory destruction. |
3499 | */ |
3500 | do_abort = (copy_result == (vm_map_copy_t *) 0); |
3501 | src_destroy = (cont_args->destroy_len != (vm_size_t) 0); |
3502 | src_destroy_only = (cont_args->src_len == (vm_size_t) 0); |
3503 | |
3504 | if (do_abort || src_destroy_only) { |
3505 | if (src_destroy) |
3506 | result = vm_map_remove(cont_args->map, |
3507 | cont_args->destroy_addr, |
3508 | cont_args->destroy_addr + cont_args->destroy_len); |
3509 | if (!do_abort) |
3510 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3511 | } |
3512 | else { |
3513 | result = vm_map_copyin_page_list(cont_args->map, |
3514 | cont_args->src_addr, cont_args->src_len, src_destroy, |
3515 | cont_args->steal_pages, copy_result, TRUE((boolean_t) 1)); |
3516 | |
3517 | if (src_destroy && !cont_args->steal_pages && |
3518 | vm_map_copy_has_cont(*copy_result)(((*copy_result)->c_u.c_p.cont) != (kern_return_t (*)()) 0 )) { |
3519 | vm_map_copyin_args_t new_args; |
3520 | /* |
3521 | * Transfer old destroy info. |
3522 | */ |
3523 | new_args = (vm_map_copyin_args_t) |
3524 | (*copy_result)->cpy_cont_argsc_u.c_p.cont_args; |
3525 | new_args->destroy_addr = cont_args->destroy_addr; |
3526 | new_args->destroy_len = cont_args->destroy_len; |
3527 | } |
3528 | } |
3529 | |
3530 | vm_map_deallocate(cont_args->map); |
3531 | kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t)); |
3532 | |
3533 | return(result); |
3534 | } |
3535 | |
3536 | /* |
3537 | * vm_map_copyin_page_list: |
3538 | * |
3539 | * This is a variant of vm_map_copyin that copies in a list of pages. |
3540 | * If steal_pages is TRUE, the pages are only in the returned list. |
3541 | * If steal_pages is FALSE, the pages are busy and still in their |
3542 | * objects. A continuation may be returned if not all the pages fit: |
3543 | * the recipient of this copy_result must be prepared to deal with it. |
3544 | */ |
3545 | |
3546 | kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, |
3547 | steal_pages, copy_result, is_cont) |
3548 | vm_map_t src_map; |
3549 | vm_offset_t src_addr; |
3550 | vm_size_t len; |
3551 | boolean_t src_destroy; |
3552 | boolean_t steal_pages; |
3553 | vm_map_copy_t *copy_result; /* OUT */ |
3554 | boolean_t is_cont; |
3555 | { |
3556 | vm_map_entry_t src_entry; |
3557 | vm_page_t m; |
3558 | vm_offset_t src_start; |
3559 | vm_offset_t src_end; |
3560 | vm_size_t src_size; |
3561 | vm_object_t src_object; |
3562 | vm_offset_t src_offset; |
3563 | vm_offset_t src_last_offset; |
3564 | vm_map_copy_t copy; /* Resulting copy */ |
3565 | kern_return_t result = KERN_SUCCESS0; |
3566 | boolean_t need_map_lookup; |
3567 | vm_map_copyin_args_t cont_args; |
3568 | |
3569 | /* |
3570 | * If steal_pages is FALSE, this leaves busy pages in |
3571 | * the object. A continuation must be used if src_destroy |
3572 | * is true in this case (!steal_pages && src_destroy). |
3573 | * |
3574 | * XXX Still have a more general problem of what happens |
3575 | * XXX if the same page occurs twice in a list. Deadlock |
3576 | * XXX can happen if vm_fault_page was called. A |
3577 | * XXX possible solution is to use a continuation if vm_fault_page |
3578 | * XXX is called and we cross a map entry boundary. |
3579 | */ |
3580 | |
3581 | /* |
3582 | * Check for copies of zero bytes. |
3583 | */ |
3584 | |
3585 | if (len == 0) { |
3586 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3587 | return(KERN_SUCCESS0); |
3588 | } |
3589 | |
3590 | /* |
3591 | * Compute start and end of region |
3592 | */ |
3593 | |
3594 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3595 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
3596 | |
3597 | /* |
3598 | * Check that the end address doesn't overflow |
3599 | */ |
3600 | |
3601 | if (src_end <= src_start && (src_end < src_start || src_start != 0)) { |
3602 | return KERN_INVALID_ADDRESS1; |
3603 | } |
3604 | |
3605 | /* |
3606 | * Allocate a header element for the page list. |
3607 | * |
3608 | * Record original offset and size, as caller may not |
3609 | * be page-aligned. |
3610 | */ |
3611 | |
3612 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3613 | copy->type = VM_MAP_COPY_PAGE_LIST3; |
3614 | copy->cpy_npagesc_u.c_p.npages = 0; |
3615 | copy->offset = src_addr; |
3616 | copy->size = len; |
3617 | copy->cpy_contc_u.c_p.cont = ((kern_return_t (*)()) 0); |
3618 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL((vm_map_copyin_args_t) 0); |
3619 | |
3620 | /* |
3621 | * Find the beginning of the region. |
3622 | */ |
3623 | |
3624 | do_map_lookup: |
3625 | |
3626 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3627 | |
3628 | if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) { |
3629 | result = KERN_INVALID_ADDRESS1; |
3630 | goto error; |
3631 | } |
3632 | need_map_lookup = FALSE((boolean_t) 0); |
3633 | |
3634 | /* |
3635 | * Go through entries until we get to the end. |
3636 | */ |
3637 | |
3638 | while (TRUE((boolean_t) 1)) { |
3639 | |
3640 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) { |
3641 | result = KERN_PROTECTION_FAILURE2; |
3642 | goto error; |
3643 | } |
3644 | |
3645 | if (src_end > src_entry->vme_endlinks.end) |
3646 | src_size = src_entry->vme_endlinks.end - src_start; |
3647 | else |
3648 | src_size = src_end - src_start; |
3649 | |
3650 | src_object = src_entry->object.vm_object; |
3651 | src_offset = src_entry->offset + |
3652 | (src_start - src_entry->vme_startlinks.start); |
3653 | |
3654 | /* |
3655 | * If src_object is NULL, allocate it now; |
3656 | * we're going to fault on it shortly. |
3657 | */ |
3658 | if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
3659 | src_object = vm_object_allocate((vm_size_t) |
3660 | src_entry->vme_endlinks.end - |
3661 | src_entry->vme_startlinks.start); |
3662 | src_entry->object.vm_object = src_object; |
3663 | } |
3664 | |
3665 | /* |
3666 | * Iterate over pages. Fault in ones that aren't present. |
3667 | */ |
3668 | src_last_offset = src_offset + src_size; |
3669 | for (; (src_offset < src_last_offset && !need_map_lookup); |
3670 | src_offset += PAGE_SIZE(1 << 12), src_start += PAGE_SIZE(1 << 12)) { |
3671 | |
3672 | if (copy->cpy_npagesc_u.c_p.npages == VM_MAP_COPY_PAGE_LIST_MAX64) { |
3673 | make_continuation: |
3674 | /* |
3675 | * At this point we have the max number of |
3676 | * pages busy for this thread that we're |
3677 | * willing to allow. Stop here and record |
3678 | * arguments for the remainder. Note: |
3679 | * this means that this routine isn't atomic, |
3680 | * but that's the breaks. Note that only |
3681 | * the first vm_map_copy_t that comes back |
3682 | * from this routine has the right offset |
3683 | * and size; those from continuations are |
3684 | * page rounded, and short by the amount |
3685 | * already done. |
3686 | * |
3687 | * Reset src_end so the src_destroy |
3688 | * code at the bottom doesn't do |
3689 | * something stupid. |
3690 | */ |
3691 | |
3692 | cont_args = (vm_map_copyin_args_t) |
3693 | kalloc(sizeof(vm_map_copyin_args_data_t)); |
3694 | cont_args->map = src_map; |
3695 | vm_map_reference(src_map); |
3696 | cont_args->src_addr = src_start; |
3697 | cont_args->src_len = len - (src_start - src_addr); |
3698 | if (src_destroy) { |
3699 | cont_args->destroy_addr = cont_args->src_addr; |
3700 | cont_args->destroy_len = cont_args->src_len; |
3701 | } |
3702 | else { |
3703 | cont_args->destroy_addr = (vm_offset_t) 0; |
3704 | cont_args->destroy_len = (vm_offset_t) 0; |
3705 | } |
3706 | cont_args->steal_pages = steal_pages; |
3707 | |
3708 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; |
3709 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; |
3710 | |
3711 | src_end = src_start; |
3712 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); |
3713 | break; |
3714 | } |
3715 | |
3716 | /* |
3717 | * Try to find the page of data. |
3718 | */ |
3719 | vm_object_lock(src_object); |
3720 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3721 | if (((m = vm_page_lookup(src_object, src_offset)) != |
3722 | VM_PAGE_NULL((vm_page_t) 0)) && !m->busy && !m->fictitious && |
3723 | !m->absent && !m->error) { |
3724 | |
3725 | /* |
3726 | * This is the page. Mark it busy |
3727 | * and keep the paging reference on |
3728 | * the object whilst we do our thing. |
3729 | */ |
3730 | m->busy = TRUE((boolean_t) 1); |
3731 | |
3732 | /* |
3733 | * Also write-protect the page, so |
3734 | * that the map`s owner cannot change |
3735 | * the data. The busy bit will prevent |
3736 | * faults on the page from succeeding |
3737 | * until the copy is released; after |
3738 | * that, the page can be re-entered |
3739 | * as writable, since we didn`t alter |
3740 | * the map entry. This scheme is a |
3741 | * cheap copy-on-write. |
3742 | * |
3743 | * Don`t forget the protection and |
3744 | * the page_lock value! |
3745 | * |
3746 | * If the source is being destroyed |
3747 | * AND not shared writable, we don`t |
3748 | * have to protect the page, since |
3749 | * we will destroy the (only) |
3750 | * writable mapping later. |
3751 | */ |
3752 | if (!src_destroy || |
3753 | src_object->use_shared_copy) |
3754 | { |
3755 | pmap_page_protect(m->phys_addr, |
3756 | src_entry->protection |
3757 | & ~m->page_lock |
3758 | & ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
3759 | } |
3760 | |
3761 | } |
3762 | else { |
3763 | vm_prot_t result_prot; |
3764 | vm_page_t top_page; |
3765 | kern_return_t kr; |
3766 | |
3767 | /* |
3768 | * Have to fault the page in; must |
3769 | * unlock the map to do so. While |
3770 | * the map is unlocked, anything |
3771 | * can happen, we must lookup the |
3772 | * map entry before continuing. |
3773 | */ |
3774 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3775 | need_map_lookup = TRUE((boolean_t) 1); |
3776 | retry: |
3777 | result_prot = VM_PROT_READ((vm_prot_t) 0x01); |
3778 | |
3779 | kr = vm_fault_page(src_object, src_offset, |
3780 | VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), FALSE((boolean_t) 0), |
3781 | &result_prot, &m, &top_page, |
3782 | FALSE((boolean_t) 0), (void (*)()) 0); |
3783 | /* |
3784 | * Cope with what happened. |
3785 | */ |
3786 | switch (kr) { |
3787 | case VM_FAULT_SUCCESS0: |
3788 | break; |
3789 | case VM_FAULT_INTERRUPTED2: /* ??? */ |
3790 | case VM_FAULT_RETRY1: |
3791 | vm_object_lock(src_object); |
3792 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3793 | goto retry; |
3794 | case VM_FAULT_MEMORY_SHORTAGE3: |
3795 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); |
3796 | vm_object_lock(src_object); |
3797 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3798 | goto retry; |
3799 | case VM_FAULT_FICTITIOUS_SHORTAGE4: |
3800 | vm_page_more_fictitious(); |
3801 | vm_object_lock(src_object); |
3802 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3803 | goto retry; |
3804 | case VM_FAULT_MEMORY_ERROR5: |
3805 | /* |
3806 | * Something broke. If this |
3807 | * is a continuation, return |
3808 | * a partial result if possible, |
3809 | * else fail the whole thing. |
3810 | * In the continuation case, the |
3811 | * next continuation call will |
3812 | * get this error if it persists. |
3813 | */ |
3814 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3815 | if (is_cont && |
3816 | copy->cpy_npagesc_u.c_p.npages != 0) |
3817 | goto make_continuation; |
3818 | |
3819 | result = KERN_MEMORY_ERROR10; |
3820 | goto error; |
3821 | } |
3822 | |
3823 | if (top_page != VM_PAGE_NULL((vm_page_t) 0)) { |
3824 | vm_object_lock(src_object); |
3825 | VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ; }); |
3826 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 3826); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); |
3827 | vm_object_unlock(src_object); |
3828 | } |
3829 | |
3830 | /* |
3831 | * We do not need to write-protect |
3832 | * the page, since it cannot have |
3833 | * been in the pmap (and we did not |
3834 | * enter it above). The busy bit |
3835 | * will protect the page from being |
3836 | * entered as writable until it is |
3837 | * unlocked. |
3838 | */ |
3839 | |
3840 | } |
3841 | |
3842 | /* |
3843 | * The page is busy, its object is locked, and |
3844 | * we have a paging reference on it. Either |
3845 | * the map is locked, or need_map_lookup is |
3846 | * TRUE. |
3847 | * |
3848 | * Put the page in the page list. |
3849 | */ |
3850 | copy->cpy_page_listc_u.c_p.page_list[copy->cpy_npagesc_u.c_p.npages++] = m; |
3851 | vm_object_unlock(m->object); |
3852 | } |
3853 | |
3854 | /* |
3855 | * DETERMINE whether the entire region |
3856 | * has been copied. |
3857 | */ |
3858 | if (src_start >= src_end && src_end != 0) { |
3859 | if (need_map_lookup) |
3860 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3861 | break; |
3862 | } |
3863 | |
3864 | /* |
3865 | * If need_map_lookup is TRUE, have to start over with |
3866 | * another map lookup. Note that we dropped the map |
3867 | * lock (to call vm_fault_page) above only in this case. |
3868 | */ |
3869 | if (need_map_lookup) |
3870 | goto do_map_lookup; |
3871 | |
3872 | /* |
3873 | * Verify that there are no gaps in the region |
3874 | */ |
3875 | |
3876 | src_start = src_entry->vme_endlinks.end; |
3877 | src_entry = src_entry->vme_nextlinks.next; |
3878 | if (src_entry->vme_startlinks.start != src_start) { |
3879 | result = KERN_INVALID_ADDRESS1; |
3880 | goto error; |
3881 | } |
3882 | } |
3883 | |
3884 | /* |
3885 | * If steal_pages is true, make sure all |
3886 | * pages in the copy are not in any object |
3887 | * We try to remove them from the original |
3888 | * object, but we may have to copy them. |
3889 | * |
3890 | * At this point every page in the list is busy |
3891 | * and holds a paging reference to its object. |
3892 | * When we're done stealing, every page is busy, |
3893 | * and in no object (m->tabled == FALSE). |
3894 | */ |
3895 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3896 | if (steal_pages) { |
3897 | int i; |
3898 | vm_offset_t unwire_end; |
3899 | |
3900 | unwire_end = src_start; |
3901 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { |
3902 | |
3903 | /* |
3904 | * Remove the page from its object if it |
3905 | * can be stolen. It can be stolen if: |
3906 | * |
3907 | * (1) The source is being destroyed, |
3908 | * the object is temporary, and |
3909 | * not shared. |
3910 | * (2) The page is not precious. |
3911 | * |
3912 | * The not shared check consists of two |
3913 | * parts: (a) there are no objects that |
3914 | * shadow this object. (b) it is not the |
3915 | * object in any shared map entries (i.e., |
3916 | * use_shared_copy is not set). |
3917 | * |
3918 | * The first check (a) means that we can't |
3919 | * steal pages from objects that are not |
3920 | * at the top of their shadow chains. This |
3921 | * should not be a frequent occurrence. |
3922 | * |
3923 | * Stealing wired pages requires telling the |
3924 | * pmap module to let go of them. |
3925 | * |
3926 | * NOTE: stealing clean pages from objects |
3927 | * whose mappings survive requires a call to |
3928 | * the pmap module. Maybe later. |
3929 | */ |
3930 | m = copy->cpy_page_listc_u.c_p.page_list[i]; |
3931 | src_object = m->object; |
3932 | vm_object_lock(src_object); |
3933 | |
3934 | if (src_destroy && |
3935 | src_object->temporary && |
3936 | (!src_object->shadowed) && |
3937 | (!src_object->use_shared_copy) && |
3938 | !m->precious) { |
3939 | vm_offset_t page_vaddr; |
3940 | |
3941 | page_vaddr = src_start + (i * PAGE_SIZE(1 << 12)); |
3942 | if (m->wire_count > 0) { |
3943 | |
3944 | assert(m->wire_count == 1)({ if (!(m->wire_count == 1)) Assert("m->wire_count == 1" , "../vm/vm_map.c", 3944); }); |
3945 | /* |
3946 | * In order to steal a wired |
3947 | * page, we have to unwire it |
3948 | * first. We do this inline |
3949 | * here because we have the page. |
3950 | * |
3951 | * Step 1: Unwire the map entry. |
3952 | * Also tell the pmap module |
3953 | * that this piece of the |
3954 | * pmap is pageable. |
3955 | */ |
3956 | vm_object_unlock(src_object); |
3957 | if (page_vaddr >= unwire_end) { |
3958 | if (!vm_map_lookup_entry(src_map, |
3959 | page_vaddr, &src_entry)) |
3960 | panic("vm_map_copyin_page_list: missing wired map entry"); |
3961 | |
3962 | vm_map_clip_start(src_map, src_entry,({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }) |
3963 | page_vaddr)({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }); |
3964 | vm_map_clip_end(src_map, src_entry,({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }) |
3965 | src_start + src_size)({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }); |
3966 | |
3967 | assert(src_entry->wired_count > 0)({ if (!(src_entry->wired_count > 0)) Assert("src_entry->wired_count > 0" , "../vm/vm_map.c", 3967); }); |
3968 | src_entry->wired_count = 0; |
3969 | src_entry->user_wired_count = 0; |
3970 | unwire_end = src_entry->vme_endlinks.end; |
3971 | pmap_pageable(vm_map_pmap(src_map)((src_map)->pmap), |
3972 | page_vaddr, unwire_end, TRUE((boolean_t) 1)); |
3973 | } |
3974 | |
3975 | /* |
3976 | * Step 2: Unwire the page. |
3977 | * pmap_remove handles this for us. |
3978 | */ |
3979 | vm_object_lock(src_object); |
3980 | } |
3981 | |
3982 | /* |
3983 | * Don't need to remove the mapping; |
3984 | * vm_map_delete will handle it. |
3985 | * |
3986 | * Steal the page. Setting the wire count |
3987 | * to zero is vm_page_unwire without |
3988 | * activating the page. |
3989 | */ |
3990 | vm_page_lock_queues(); |
3991 | vm_page_remove(m); |
3992 | if (m->wire_count > 0) { |
3993 | m->wire_count = 0; |
3994 | vm_page_wire_count--; |
3995 | } else { |
3996 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); |
3997 | } |
3998 | vm_page_unlock_queues(); |
3999 | } |
4000 | else { |
4001 | /* |
4002 | * Have to copy this page. Have to |
4003 | * unlock the map while copying, |
4004 | * hence no further page stealing. |
4005 | * Hence just copy all the pages. |
4006 | * Unlock the map while copying; |
4007 | * This means no further page stealing. |
4008 | */ |
4009 | vm_object_unlock(src_object); |
4010 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
4011 | |
4012 | vm_map_copy_steal_pages(copy); |
4013 | |
4014 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
4015 | break; |
4016 | } |
4017 | |
4018 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 4018); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); |
4019 | vm_object_unlock(src_object); |
4020 | } |
4021 | |
4022 | /* |
4023 | * If the source should be destroyed, do it now, since the |
4024 | * copy was successful. |
4025 | */ |
4026 | |
4027 | if (src_destroy) { |
4028 | (void) vm_map_delete(src_map, src_start, src_end); |
4029 | } |
4030 | } |
4031 | else { |
4032 | /* |
4033 | * !steal_pages leaves busy pages in the map. |
4034 | * This will cause src_destroy to hang. Use |
4035 | * a continuation to prevent this. |
4036 | */ |
4037 | if (src_destroy && !vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
4038 | cont_args = (vm_map_copyin_args_t) |
4039 | kalloc(sizeof(vm_map_copyin_args_data_t)); |
4040 | vm_map_reference(src_map); |
4041 | cont_args->map = src_map; |
4042 | cont_args->src_addr = (vm_offset_t) 0; |
4043 | cont_args->src_len = (vm_size_t) 0; |
4044 | cont_args->destroy_addr = src_start; |
4045 | cont_args->destroy_len = src_end - src_start; |
4046 | cont_args->steal_pages = FALSE((boolean_t) 0); |
4047 | |
4048 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; |
4049 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; |
4050 | } |
4051 | |
4052 | } |
4053 | |
4054 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
4055 | |
4056 | *copy_result = copy; |
4057 | return(result); |
4058 | |
4059 | error: |
4060 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
4061 | vm_map_copy_discard(copy); |
4062 | return(result); |
4063 | } |
4064 | |
4065 | /* |
4066 | * vm_map_fork: |
4067 | * |
4068 | * Create and return a new map based on the old |
4069 | * map, according to the inheritance values on the |
4070 | * regions in that map. |
4071 | * |
4072 | * The source map must not be locked. |
4073 | */ |
4074 | vm_map_t vm_map_fork(old_map) |
4075 | vm_map_t old_map; |
4076 | { |
4077 | vm_map_t new_map; |
4078 | vm_map_entry_t old_entry; |
4079 | vm_map_entry_t new_entry; |
4080 | pmap_t new_pmap = pmap_create((vm_size_t) 0); |
4081 | vm_size_t new_size = 0; |
4082 | vm_size_t entry_size; |
4083 | vm_object_t object; |
4084 | |
4085 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4086 | |
4087 | new_map = vm_map_create(new_pmap, |
4088 | old_map->min_offsethdr.links.start, |
4089 | old_map->max_offsethdr.links.end, |
4090 | old_map->hdr.entries_pageable); |
4091 | |
4092 | for ( |
4093 | old_entry = vm_map_first_entry(old_map)((old_map)->hdr.links.next); |
4094 | old_entry != vm_map_to_entry(old_map)((struct vm_map_entry *) &(old_map)->hdr.links); |
4095 | ) { |
4096 | if (old_entry->is_sub_map) |
4097 | panic("vm_map_fork: encountered a submap"); |
4098 | |
4099 | entry_size = (old_entry->vme_endlinks.end - old_entry->vme_startlinks.start); |
4100 | |
4101 | switch (old_entry->inheritance) { |
4102 | case VM_INHERIT_NONE((vm_inherit_t) 2): |
4103 | break; |
4104 | |
4105 | case VM_INHERIT_SHARE((vm_inherit_t) 0): |
4106 | /* |
4107 | * New sharing code. New map entry |
4108 | * references original object. Temporary |
4109 | * objects use asynchronous copy algorithm for |
4110 | * future copies. First make sure we have |
4111 | * the right object. If we need a shadow, |
4112 | * or someone else already has one, then |
4113 | * make a new shadow and share it. |
4114 | */ |
4115 | |
4116 | object = old_entry->object.vm_object; |
4117 | if (object == VM_OBJECT_NULL((vm_object_t) 0)) { |
4118 | object = vm_object_allocate( |
4119 | (vm_size_t)(old_entry->vme_endlinks.end - |
4120 | old_entry->vme_startlinks.start)); |
4121 | old_entry->offset = 0; |
4122 | old_entry->object.vm_object = object; |
4123 | assert(!old_entry->needs_copy)({ if (!(!old_entry->needs_copy)) Assert("!old_entry->needs_copy" , "../vm/vm_map.c", 4123); }); |
4124 | } |
4125 | else if (old_entry->needs_copy || object->shadowed || |
4126 | (object->temporary && !old_entry->is_shared && |
4127 | object->size > (vm_size_t)(old_entry->vme_endlinks.end - |
4128 | old_entry->vme_startlinks.start))) { |
4129 | |
4130 | assert(object->temporary)({ if (!(object->temporary)) Assert("object->temporary" , "../vm/vm_map.c", 4130); }); |
4131 | assert(!(object->shadowed && old_entry->is_shared))({ if (!(!(object->shadowed && old_entry->is_shared ))) Assert("!(object->shadowed && old_entry->is_shared)" , "../vm/vm_map.c", 4131); }); |
4132 | vm_object_shadow( |
4133 | &old_entry->object.vm_object, |
4134 | &old_entry->offset, |
4135 | (vm_size_t) (old_entry->vme_endlinks.end - |
4136 | old_entry->vme_startlinks.start)); |
4137 | |
4138 | /* |
4139 | * If we're making a shadow for other than |
4140 | * copy on write reasons, then we have |
4141 | * to remove write permission. |
4142 | */ |
4143 | |
4144 | if (!old_entry->needs_copy && |
4145 | (old_entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
4146 | pmap_protect(vm_map_pmap(old_map)((old_map)->pmap), |
4147 | old_entry->vme_startlinks.start, |
4148 | old_entry->vme_endlinks.end, |
4149 | old_entry->protection & |
4150 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4151 | } |
4152 | old_entry->needs_copy = FALSE((boolean_t) 0); |
4153 | object = old_entry->object.vm_object; |
4154 | } |
4155 | |
4156 | /* |
4157 | * Set use_shared_copy to indicate that |
4158 | * object must use shared (delayed) copy-on |
4159 | * write. This is ignored for permanent objects. |
4160 | * Bump the reference count for the new entry |
4161 | */ |
4162 | |
4163 | vm_object_lock(object); |
4164 | object->use_shared_copy = TRUE((boolean_t) 1); |
4165 | object->ref_count++; |
4166 | vm_object_unlock(object); |
4167 | |
4168 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); |
4169 | |
4170 | if (old_entry->projected_on != 0) { |
4171 | /* |
4172 | * If entry is projected buffer, clone the |
4173 | * entry exactly. |
4174 | */ |
4175 | |
4176 | vm_map_entry_copy_full(new_entry, old_entry)(*(new_entry) = *(old_entry)); |
4177 | |
4178 | } else { |
4179 | /* |
4180 | * Clone the entry, using object ref from above. |
4181 | * Mark both entries as shared. |
4182 | */ |
4183 | |
4184 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
4185 | old_entry->is_shared = TRUE((boolean_t) 1); |
4186 | new_entry->is_shared = TRUE((boolean_t) 1); |
4187 | } |
4188 | |
4189 | /* |
4190 | * Insert the entry into the new map -- we |
4191 | * know we're inserting at the end of the new |
4192 | * map. |
4193 | */ |
4194 | |
4195 | vm_map_entry_link(({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4198); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4196 | new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4198); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4197 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4198); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4198 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4198); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
4199 | |
4200 | /* |
4201 | * Update the physical map |
4202 | */ |
4203 | |
4204 | pmap_copy(new_map->pmap, old_map->pmap, |
4205 | new_entry->vme_start, |
4206 | entry_size, |
4207 | old_entry->vme_start); |
4208 | |
4209 | new_size += entry_size; |
4210 | break; |
4211 | |
4212 | case VM_INHERIT_COPY((vm_inherit_t) 1): |
4213 | if (old_entry->wired_count == 0) { |
4214 | boolean_t src_needs_copy; |
4215 | boolean_t new_entry_needs_copy; |
4216 | |
4217 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); |
4218 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
4219 | |
4220 | if (vm_object_copy_temporary( |
4221 | &new_entry->object.vm_object, |
4222 | &new_entry->offset, |
4223 | &src_needs_copy, |
4224 | &new_entry_needs_copy)) { |
4225 | |
4226 | /* |
4227 | * Handle copy-on-write obligations |
4228 | */ |
4229 | |
4230 | if (src_needs_copy && !old_entry->needs_copy) { |
4231 | vm_object_pmap_protect( |
4232 | old_entry->object.vm_object, |
4233 | old_entry->offset, |
4234 | entry_size, |
4235 | (old_entry->is_shared ? |
4236 | PMAP_NULL((pmap_t) 0) : |
4237 | old_map->pmap), |
4238 | old_entry->vme_startlinks.start, |
4239 | old_entry->protection & |
4240 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4241 | |
4242 | old_entry->needs_copy = TRUE((boolean_t) 1); |
4243 | } |
4244 | |
4245 | new_entry->needs_copy = new_entry_needs_copy; |
4246 | |
4247 | /* |
4248 | * Insert the entry at the end |
4249 | * of the map. |
4250 | */ |
4251 | |
4252 | vm_map_entry_link(new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4254); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4253 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4254); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4254 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4254); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
4255 | |
4256 | |
4257 | new_size += entry_size; |
4258 | break; |
4259 | } |
4260 | |
4261 | vm_map_entry_dispose(new_map, new_entry)_vm_map_entry_dispose(&(new_map)->hdr, (new_entry)); |
4262 | } |
4263 | |
4264 | /* INNER BLOCK (copy cannot be optimized) */ { |
4265 | |
4266 | vm_offset_t start = old_entry->vme_startlinks.start; |
4267 | vm_map_copy_t copy; |
4268 | vm_map_entry_t last = vm_map_last_entry(new_map)((new_map)->hdr.links.prev); |
4269 | |
4270 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); |
4271 | if (vm_map_copyin(old_map, |
4272 | start, |
4273 | entry_size, |
4274 | FALSE((boolean_t) 0), |
4275 | ©) |
4276 | != KERN_SUCCESS0) { |
4277 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4278 | if (!vm_map_lookup_entry(old_map, start, &last)) |
4279 | last = last->vme_nextlinks.next; |
4280 | old_entry = last; |
4281 | /* |
4282 | * For some error returns, want to |
4283 | * skip to the next element. |
4284 | */ |
4285 | |
4286 | continue; |
4287 | } |
4288 | |
4289 | /* |
4290 | * Insert the copy into the new map |
4291 | */ |
4292 | |
4293 | vm_map_copy_insert(new_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (new_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4293 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (new_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (new_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); |
4294 | new_size += entry_size; |
4295 | |
4296 | /* |
4297 | * Pick up the traversal at the end of |
4298 | * the copied region. |
4299 | */ |
4300 | |
4301 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4302 | start += entry_size; |
4303 | if (!vm_map_lookup_entry(old_map, start, &last)) |
4304 | last = last->vme_nextlinks.next; |
4305 | else |
4306 | vm_map_clip_start(old_map, last, start)({ if ((start) > (last)->links.start) _vm_map_clip_start (&(old_map)->hdr,(last),(start)); }); |
4307 | old_entry = last; |
4308 | |
4309 | continue; |
4310 | /* INNER BLOCK (copy cannot be optimized) */ } |
4311 | } |
4312 | old_entry = old_entry->vme_nextlinks.next; |
4313 | } |
4314 | |
4315 | new_map->size = new_size; |
4316 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); |
4317 | |
4318 | return(new_map); |
4319 | } |
4320 | |
4321 | /* |
4322 | * vm_map_lookup: |
4323 | * |
4324 | * Finds the VM object, offset, and |
4325 | * protection for a given virtual address in the |
4326 | * specified map, assuming a page fault of the |
4327 | * type specified. |
4328 | * |
4329 | * Returns the (object, offset, protection) for |
4330 | * this address, whether it is wired down, and whether |
4331 | * this map has the only reference to the data in question. |
4332 | * In order to later verify this lookup, a "version" |
4333 | * is returned. |
4334 | * |
4335 | * The map should not be locked; it will not be |
4336 | * locked on exit. In order to guarantee the |
4337 | * existence of the returned object, it is returned |
4338 | * locked. |
4339 | * |
4340 | * If a lookup is requested with "write protection" |
4341 | * specified, the map may be changed to perform virtual |
4342 | * copying operations, although the data referenced will |
4343 | * remain the same. |
4344 | */ |
4345 | kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, |
4346 | object, offset, out_prot, wired) |
4347 | vm_map_t *var_map; /* IN/OUT */ |
4348 | vm_offset_t vaddr; |
4349 | vm_prot_t fault_type; |
4350 | |
4351 | vm_map_version_t *out_version; /* OUT */ |
4352 | vm_object_t *object; /* OUT */ |
4353 | vm_offset_t *offset; /* OUT */ |
4354 | vm_prot_t *out_prot; /* OUT */ |
4355 | boolean_t *wired; /* OUT */ |
4356 | { |
4357 | vm_map_entry_t entry; |
4358 | vm_map_t map = *var_map; |
4359 | vm_prot_t prot; |
4360 | |
4361 | RetryLookup: ; |
4362 | |
4363 | /* |
4364 | * Lookup the faulting address. |
4365 | */ |
4366 | |
4367 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4368 | |
4369 | #define RETURN(why) \ |
4370 | { \ |
4371 | vm_map_unlock_read(map)lock_done(&(map)->lock); \ |
4372 | return(why); \ |
4373 | } |
4374 | |
4375 | /* |
4376 | * If the map has an interesting hint, try it before calling |
4377 | * full blown lookup routine. |
4378 | */ |
4379 | |
4380 | simple_lock(&map->hint_lock); |
4381 | entry = map->hint; |
4382 | simple_unlock(&map->hint_lock); |
4383 | |
4384 | if ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
4385 | (vaddr < entry->vme_startlinks.start) || (vaddr >= entry->vme_endlinks.end)) { |
4386 | vm_map_entry_t tmp_entry; |
4387 | |
4388 | /* |
4389 | * Entry was either not a valid hint, or the vaddr |
4390 | * was not contained in the entry, so do a full lookup. |
4391 | */ |
4392 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) |
4393 | RETURN(KERN_INVALID_ADDRESS1); |
4394 | |
4395 | entry = tmp_entry; |
4396 | } |
4397 | |
4398 | /* |
4399 | * Handle submaps. |
4400 | */ |
4401 | |
4402 | if (entry->is_sub_map) { |
4403 | vm_map_t old_map = map; |
4404 | |
4405 | *var_map = map = entry->object.sub_map; |
4406 | vm_map_unlock_read(old_map)lock_done(&(old_map)->lock); |
4407 | goto RetryLookup; |
4408 | } |
4409 | |
4410 | /* |
4411 | * Check whether this task is allowed to have |
4412 | * this page. |
4413 | */ |
4414 | |
4415 | prot = entry->protection; |
4416 | |
4417 | if ((fault_type & (prot)) != fault_type) { |
4418 | if ((prot & VM_PROT_NOTIFY((vm_prot_t) 0x10)) && (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
4419 | RETURN(KERN_WRITE_PROTECTION_FAILURE24); |
4420 | } else { |
4421 | RETURN(KERN_PROTECTION_FAILURE2); |
4422 | } |
4423 | } |
4424 | |
4425 | /* |
4426 | * If this page is not pageable, we have to get |
4427 | * it for all possible accesses. |
4428 | */ |
4429 | |
4430 | if ((*wired = (entry->wired_count != 0))) |
4431 | prot = fault_type = entry->protection; |
4432 | |
4433 | /* |
4434 | * If the entry was copy-on-write, we either ... |
4435 | */ |
4436 | |
4437 | if (entry->needs_copy) { |
4438 | /* |
4439 | * If we want to write the page, we may as well |
4440 | * handle that now since we've got the map locked. |
4441 | * |
4442 | * If we don't need to write the page, we just |
4443 | * demote the permissions allowed. |
4444 | */ |
4445 | |
4446 | if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) { |
4447 | /* |
4448 | * Make a new object, and place it in the |
4449 | * object chain. Note that no new references |
4450 | * have appeared -- one just moved from the |
4451 | * map to the new object. |
4452 | */ |
4453 | |
4454 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { |
4455 | goto RetryLookup; |
4456 | } |
4457 | map->timestamp++; |
4458 | |
4459 | vm_object_shadow( |
4460 | &entry->object.vm_object, |
4461 | &entry->offset, |
4462 | (vm_size_t) (entry->vme_endlinks.end - entry->vme_startlinks.start)); |
4463 | |
4464 | entry->needs_copy = FALSE((boolean_t) 0); |
4465 | |
4466 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
4467 | } |
4468 | else { |
4469 | /* |
4470 | * We're attempting to read a copy-on-write |
4471 | * page -- don't allow writes. |
4472 | */ |
4473 | |
4474 | prot &= (~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4475 | } |
4476 | } |
4477 | |
4478 | /* |
4479 | * Create an object if necessary. |
4480 | */ |
4481 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
4482 | |
4483 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { |
4484 | goto RetryLookup; |
4485 | } |
4486 | |
4487 | entry->object.vm_object = vm_object_allocate( |
4488 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start)); |
4489 | entry->offset = 0; |
4490 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
4491 | } |
4492 | |
4493 | /* |
4494 | * Return the object/offset from this entry. If the entry |
4495 | * was copy-on-write or empty, it has been fixed up. Also |
4496 | * return the protection. |
4497 | */ |
4498 | |
4499 | *offset = (vaddr - entry->vme_startlinks.start) + entry->offset; |
4500 | *object = entry->object.vm_object; |
4501 | *out_prot = prot; |
4502 | |
4503 | /* |
4504 | * Lock the object to prevent it from disappearing |
4505 | */ |
4506 | |
4507 | vm_object_lock(*object); |
4508 | |
4509 | /* |
4510 | * Save the version number and unlock the map. |
4511 | */ |
4512 | |
4513 | out_version->main_timestamp = map->timestamp; |
4514 | |
4515 | RETURN(KERN_SUCCESS0); |
4516 | |
4517 | #undef RETURN |
4518 | } |
4519 | |
4520 | /* |
4521 | * vm_map_verify: |
4522 | * |
4523 | * Verifies that the map in question has not changed |
4524 | * since the given version. If successful, the map |
4525 | * will not change until vm_map_verify_done() is called. |
4526 | */ |
4527 | boolean_t vm_map_verify(map, version) |
4528 | vm_map_t map; |
4529 | vm_map_version_t *version; /* REF */ |
4530 | { |
4531 | boolean_t result; |
4532 | |
4533 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4534 | result = (map->timestamp == version->main_timestamp); |
4535 | |
4536 | if (!result) |
4537 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4538 | |
4539 | return(result); |
4540 | } |
4541 | |
4542 | /* |
4543 | * vm_map_verify_done: |
4544 | * |
4545 | * Releases locks acquired by a vm_map_verify. |
4546 | * |
4547 | * This is now a macro in vm/vm_map.h. It does a |
4548 | * vm_map_unlock_read on the map. |
4549 | */ |
4550 | |
4551 | /* |
4552 | * vm_region: |
4553 | * |
4554 | * User call to obtain information about a region in |
4555 | * a task's address map. |
4556 | */ |
4557 | |
4558 | kern_return_t vm_region(map, address, size, |
4559 | protection, max_protection, |
4560 | inheritance, is_shared, |
4561 | object_name, offset_in_object) |
4562 | vm_map_t map; |
4563 | vm_offset_t *address; /* IN/OUT */ |
4564 | vm_size_t *size; /* OUT */ |
4565 | vm_prot_t *protection; /* OUT */ |
4566 | vm_prot_t *max_protection; /* OUT */ |
4567 | vm_inherit_t *inheritance; /* OUT */ |
4568 | boolean_t *is_shared; /* OUT */ |
4569 | ipc_port_t *object_name; /* OUT */ |
4570 | vm_offset_t *offset_in_object; /* OUT */ |
4571 | { |
4572 | vm_map_entry_t tmp_entry; |
4573 | vm_map_entry_t entry; |
4574 | vm_offset_t tmp_offset; |
4575 | vm_offset_t start; |
4576 | |
4577 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
4578 | return(KERN_INVALID_ARGUMENT4); |
4579 | |
4580 | start = *address; |
4581 | |
4582 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4583 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { |
4584 | if ((entry = tmp_entry->vme_nextlinks.next) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) { |
4585 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4586 | return(KERN_NO_SPACE3); |
4587 | } |
4588 | } else { |
4589 | entry = tmp_entry; |
4590 | } |
4591 | |
4592 | start = entry->vme_startlinks.start; |
4593 | *protection = entry->protection; |
4594 | *max_protection = entry->max_protection; |
4595 | *inheritance = entry->inheritance; |
4596 | *address = start; |
4597 | *size = (entry->vme_endlinks.end - start); |
4598 | |
4599 | tmp_offset = entry->offset; |
4600 | |
4601 | |
4602 | if (entry->is_sub_map) { |
4603 | *is_shared = FALSE((boolean_t) 0); |
4604 | *object_name = IP_NULL((ipc_port_t) ((ipc_object_t) 0)); |
4605 | *offset_in_object = tmp_offset; |
4606 | } else { |
4607 | *is_shared = entry->is_shared; |
4608 | *object_name = vm_object_name(entry->object.vm_object); |
4609 | *offset_in_object = tmp_offset; |
4610 | } |
4611 | |
4612 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4613 | |
4614 | return(KERN_SUCCESS0); |
4615 | } |
4616 | |
4617 | /* |
4618 | * Routine: vm_map_simplify |
4619 | * |
4620 | * Description: |
4621 | * Attempt to simplify the map representation in |
4622 | * the vicinity of the given starting address. |
4623 | * Note: |
4624 | * This routine is intended primarily to keep the |
4625 | * kernel maps more compact -- they generally don't |
4626 | * benefit from the "expand a map entry" technology |
4627 | * at allocation time because the adjacent entry |
4628 | * is often wired down. |
4629 | */ |
4630 | void vm_map_simplify(map, start) |
4631 | vm_map_t map; |
4632 | vm_offset_t start; |
4633 | { |
4634 | vm_map_entry_t this_entry; |
4635 | vm_map_entry_t prev_entry; |
4636 | |
4637 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
4638 | if ( |
4639 | (vm_map_lookup_entry(map, start, &this_entry)) && |
4640 | ((prev_entry = this_entry->vme_prevlinks.prev) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
4641 | |
4642 | (prev_entry->vme_endlinks.end == start) && |
4643 | |
4644 | (prev_entry->is_shared == FALSE((boolean_t) 0)) && |
4645 | (prev_entry->is_sub_map == FALSE((boolean_t) 0)) && |
4646 | |
4647 | (this_entry->is_shared == FALSE((boolean_t) 0)) && |
4648 | (this_entry->is_sub_map == FALSE((boolean_t) 0)) && |
4649 | |
4650 | (prev_entry->inheritance == this_entry->inheritance) && |
4651 | (prev_entry->protection == this_entry->protection) && |
4652 | (prev_entry->max_protection == this_entry->max_protection) && |
4653 | (prev_entry->wired_count == this_entry->wired_count) && |
4654 | (prev_entry->user_wired_count == this_entry->user_wired_count) && |
4655 | |
4656 | (prev_entry->needs_copy == this_entry->needs_copy) && |
4657 | |
4658 | (prev_entry->object.vm_object == this_entry->object.vm_object) && |
4659 | ((prev_entry->offset + (prev_entry->vme_endlinks.end - prev_entry->vme_startlinks.start)) |
4660 | == this_entry->offset) && |
4661 | (prev_entry->projected_on == 0) && |
4662 | (this_entry->projected_on == 0) |
4663 | ) { |
4664 | if (map->first_free == this_entry) |
4665 | map->first_free = prev_entry; |
4666 | |
4667 | SAVE_HINT(map, prev_entry); (map)->hint = (prev_entry); ;; |
4668 | vm_map_entry_unlink(map, this_entry)({ (&(map)->hdr)->nentries--; (this_entry)->links .next->links.prev = (this_entry)->links.prev; (this_entry )->links.prev->links.next = (this_entry)->links.next ; rbtree_remove(&(&(map)->hdr)->tree, &(this_entry )->tree_node); }); |
4669 | prev_entry->vme_endlinks.end = this_entry->vme_endlinks.end; |
4670 | vm_object_deallocate(this_entry->object.vm_object); |
4671 | vm_map_entry_dispose(map, this_entry)_vm_map_entry_dispose(&(map)->hdr, (this_entry)); |
4672 | } |
4673 | vm_map_unlock(map)lock_done(&(map)->lock); |
4674 | } |
4675 | |
4676 | |
4677 | /* |
4678 | * Routine: vm_map_machine_attribute |
4679 | * Purpose: |
4680 | * Provide machine-specific attributes to mappings, |
4681 | * such as cachability etc. for machines that provide |
4682 | * them. NUMA architectures and machines with big/strange |
4683 | * caches will use this. |
4684 | * Note: |
4685 | * Responsibilities for locking and checking are handled here, |
4686 | * everything else in the pmap module. If any non-volatile |
4687 | * information must be kept, the pmap module should handle |
4688 | * it itself. [This assumes that attributes do not |
4689 | * need to be inherited, which seems ok to me] |
4690 | */ |
4691 | kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) |
4692 | vm_map_t map; |
4693 | vm_offset_t address; |
4694 | vm_size_t size; |
4695 | vm_machine_attribute_t attribute; |
4696 | vm_machine_attribute_val_t* value; /* IN/OUT */ |
4697 | { |
4698 | kern_return_t ret; |
4699 | |
4700 | if (address < vm_map_min(map)((map)->hdr.links.start) || |
4701 | (address + size) > vm_map_max(map)((map)->hdr.links.end)) |
4702 | return KERN_INVALID_ARGUMENT4; |
4703 | |
4704 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
4705 | |
4706 | ret = pmap_attribute(map->pmap, address, size, attribute, value)(1); |
4707 | |
4708 | vm_map_unlock(map)lock_done(&(map)->lock); |
4709 | |
4710 | return ret; |
4711 | } |
4712 | |
4713 | |
4714 | #if MACH_KDB0 |
4715 | |
4716 | #define printf kdbprintf |
4717 | |
4718 | /* |
4719 | * vm_map_print: [ debug ] |
4720 | */ |
4721 | void vm_map_print(map) |
4722 | vm_map_t map; |
4723 | { |
4724 | vm_map_entry_t entry; |
4725 | |
4726 | iprintf("Task map 0x%X: pmap=0x%X,", |
4727 | (vm_offset_t) map, (vm_offset_t) (map->pmap)); |
4728 | printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries); |
4729 | printf("version=%d\n", map->timestamp); |
4730 | indent += 2; |
4731 | for (entry = vm_map_first_entry(map)((map)->hdr.links.next); |
4732 | entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
4733 | entry = entry->vme_nextlinks.next) { |
4734 | static char *inheritance_name[3] = { "share", "copy", "none"}; |
4735 | |
4736 | iprintf("map entry 0x%X: ", (vm_offset_t) entry); |
4737 | printf("start=0x%X, end=0x%X, ", |
4738 | (vm_offset_t) entry->vme_startlinks.start, (vm_offset_t) entry->vme_endlinks.end); |
4739 | printf("prot=%X/%X/%s, ", |
4740 | entry->protection, |
4741 | entry->max_protection, |
4742 | inheritance_name[entry->inheritance]); |
4743 | if (entry->wired_count != 0) { |
4744 | printf("wired("); |
4745 | if (entry->user_wired_count != 0) |
4746 | printf("u"); |
4747 | if (entry->wired_count > |
4748 | ((entry->user_wired_count == 0) ? 0 : 1)) |
4749 | printf("k"); |
4750 | printf(") "); |
4751 | } |
4752 | if (entry->in_transition) { |
4753 | printf("in transition"); |
4754 | if (entry->needs_wakeup) |
4755 | printf("(wake request)"); |
4756 | printf(", "); |
4757 | } |
4758 | if (entry->is_sub_map) { |
4759 | printf("submap=0x%X, offset=0x%X\n", |
4760 | (vm_offset_t) entry->object.sub_map, |
4761 | (vm_offset_t) entry->offset); |
4762 | } else { |
4763 | printf("object=0x%X, offset=0x%X", |
4764 | (vm_offset_t) entry->object.vm_object, |
4765 | (vm_offset_t) entry->offset); |
4766 | if (entry->is_shared) |
4767 | printf(", shared"); |
4768 | if (entry->needs_copy) |
4769 | printf(", copy needed"); |
4770 | printf("\n"); |
4771 | |
4772 | if ((entry->vme_prevlinks.prev == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
4773 | (entry->vme_prevlinks.prev->object.vm_object != entry->object.vm_object)) { |
4774 | indent += 2; |
4775 | vm_object_print(entry->object.vm_object); |
4776 | indent -= 2; |
4777 | } |
4778 | } |
4779 | } |
4780 | indent -= 2; |
4781 | } |
4782 | |
4783 | /* |
4784 | * Routine: vm_map_copy_print |
4785 | * Purpose: |
4786 | * Pretty-print a copy object for ddb. |
4787 | */ |
4788 | |
4789 | void vm_map_copy_print(copy) |
4790 | vm_map_copy_t copy; |
4791 | { |
4792 | int i, npages; |
4793 | |
4794 | printf("copy object 0x%x\n", copy); |
4795 | |
4796 | indent += 2; |
4797 | |
4798 | iprintf("type=%d", copy->type); |
4799 | switch (copy->type) { |
4800 | case VM_MAP_COPY_ENTRY_LIST1: |
4801 | printf("[entry_list]"); |
4802 | break; |
4803 | |
4804 | case VM_MAP_COPY_OBJECT2: |
4805 | printf("[object]"); |
4806 | break; |
4807 | |
4808 | case VM_MAP_COPY_PAGE_LIST3: |
4809 | printf("[page_list]"); |
4810 | break; |
4811 | |
4812 | default: |
4813 | printf("[bad type]"); |
4814 | break; |
4815 | } |
4816 | printf(", offset=0x%x", copy->offset); |
4817 | printf(", size=0x%x\n", copy->size); |
4818 | |
4819 | switch (copy->type) { |
4820 | case VM_MAP_COPY_ENTRY_LIST1: |
4821 | /* XXX add stuff here */ |
4822 | break; |
4823 | |
4824 | case VM_MAP_COPY_OBJECT2: |
4825 | iprintf("object=0x%x\n", copy->cpy_objectc_u.c_o.object); |
4826 | break; |
4827 | |
4828 | case VM_MAP_COPY_PAGE_LIST3: |
4829 | iprintf("npages=%d", copy->cpy_npagesc_u.c_p.npages); |
4830 | printf(", cont=%x", copy->cpy_contc_u.c_p.cont); |
4831 | printf(", cont_args=%x\n", copy->cpy_cont_argsc_u.c_p.cont_args); |
4832 | if (copy->cpy_npagesc_u.c_p.npages < 0) { |
4833 | npages = 0; |
4834 | } else if (copy->cpy_npagesc_u.c_p.npages > VM_MAP_COPY_PAGE_LIST_MAX64) { |
4835 | npages = VM_MAP_COPY_PAGE_LIST_MAX64; |
4836 | } else { |
4837 | npages = copy->cpy_npagesc_u.c_p.npages; |
4838 | } |
4839 | iprintf("copy->cpy_page_list[0..%d] = {", npages); |
4840 | for (i = 0; i < npages - 1; i++) { |
4841 | printf("0x%x, ", copy->cpy_page_listc_u.c_p.page_list[i]); |
4842 | } |
4843 | if (npages > 0) { |
4844 | printf("0x%x", copy->cpy_page_listc_u.c_p.page_list[npages - 1]); |
4845 | } |
4846 | printf("}\n"); |
4847 | break; |
4848 | } |
4849 | |
4850 | indent -= 2; |
4851 | } |
4852 | #endif /* MACH_KDB */ |