File: | obj-scan-build/../vm/vm_map.c |
Location: | line 3363, column 4 |
Description: | Value stored to 'src_size' is never read |
1 | /* |
2 | * Mach Operating System |
3 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. |
4 | * Copyright (c) 1993,1994 The University of Utah and |
5 | * the Computer Systems Laboratory (CSL). |
6 | * All rights reserved. |
7 | * |
8 | * Permission to use, copy, modify and distribute this software and its |
9 | * documentation is hereby granted, provided that both the copyright |
10 | * notice and this permission notice appear in all copies of the |
11 | * software, derivative works or modified versions, and any portions |
12 | * thereof, and that both notices appear in supporting documentation. |
13 | * |
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF |
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY |
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF |
17 | * THIS SOFTWARE. |
18 | * |
19 | * Carnegie Mellon requests users of this software to return to |
20 | * |
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
22 | * School of Computer Science |
23 | * Carnegie Mellon University |
24 | * Pittsburgh PA 15213-3890 |
25 | * |
26 | * any improvements or extensions that they make and grant Carnegie Mellon |
27 | * the rights to redistribute these changes. |
28 | */ |
29 | /* |
30 | * File: vm/vm_map.c |
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young |
32 | * Date: 1985 |
33 | * |
34 | * Virtual memory mapping module. |
35 | */ |
36 | |
37 | #include <kern/printfdb_printf.h> |
38 | #include <mach/kern_return.h> |
39 | #include <mach/port.h> |
40 | #include <mach/vm_attributes.h> |
41 | #include <mach/vm_param.h> |
42 | #include <kern/assert.h> |
43 | #include <kern/debug.h> |
44 | #include <kern/kalloc.h> |
45 | #include <kern/rbtree.h> |
46 | #include <kern/slab.h> |
47 | #include <vm/pmap.h> |
48 | #include <vm/vm_fault.h> |
49 | #include <vm/vm_map.h> |
50 | #include <vm/vm_object.h> |
51 | #include <vm/vm_page.h> |
52 | #include <vm/vm_resident.h> |
53 | #include <vm/vm_kern.h> |
54 | #include <ipc/ipc_port.h> |
55 | |
56 | #if MACH_KDB1 |
57 | #include <ddb/db_output.h> |
58 | #include <vm/vm_print.h> |
59 | #endif /* MACH_KDB */ |
60 | |
61 | /* |
62 | * Macros to copy a vm_map_entry. We must be careful to correctly |
63 | * manage the wired page count. vm_map_entry_copy() creates a new |
64 | * map entry to the same memory - the wired count in the new entry |
65 | * must be set to zero. vm_map_entry_copy_full() creates a new |
66 | * entry that is identical to the old entry. This preserves the |
67 | * wire count; it's used for map splitting and cache changing in |
68 | * vm_map_copyout. |
69 | */ |
70 | #define vm_map_entry_copy(NEW,OLD)({ *(NEW) = *(OLD); (NEW)->is_shared = ((boolean_t) 0); (NEW )->needs_wakeup = ((boolean_t) 0); (NEW)->in_transition = ((boolean_t) 0); (NEW)->wired_count = 0; (NEW)->user_wired_count = 0; }) \({ |
71 | MACRO_BEGIN({ \ |
72 | *(NEW) = *(OLD); \ |
73 | (NEW)->is_shared = FALSE((boolean_t) 0); \ |
74 | (NEW)->needs_wakeup = FALSE((boolean_t) 0); \ |
75 | (NEW)->in_transition = FALSE((boolean_t) 0); \ |
76 | (NEW)->wired_count = 0; \ |
77 | (NEW)->user_wired_count = 0; \}) |
78 | MACRO_END}) |
79 | |
80 | #define vm_map_entry_copy_full(NEW,OLD)(*(NEW) = *(OLD)) (*(NEW) = *(OLD)) |
81 | |
82 | /* |
83 | * Virtual memory maps provide for the mapping, protection, |
84 | * and sharing of virtual memory objects. In addition, |
85 | * this module provides for an efficient virtual copy of |
86 | * memory from one map to another. |
87 | * |
88 | * Synchronization is required prior to most operations. |
89 | * |
90 | * Maps consist of an ordered doubly-linked list of simple |
91 | * entries; a hint and a red-black tree are used to speed up lookups. |
92 | * |
93 | * Sharing maps have been deleted from this version of Mach. |
94 | * All shared objects are now mapped directly into the respective |
95 | * maps. This requires a change in the copy on write strategy; |
96 | * the asymmetric (delayed) strategy is used for shared temporary |
97 | * objects instead of the symmetric (shadow) strategy. This is |
98 | * selected by the (new) use_shared_copy bit in the object. See |
99 | * vm_object_copy_temporary in vm_object.c for details. All maps |
100 | * are now "top level" maps (either task map, kernel map or submap |
101 | * of the kernel map). |
102 | * |
103 | * Since portions of maps are specified by start/end addreses, |
104 | * which may not align with existing map entries, all |
105 | * routines merely "clip" entries to these start/end values. |
106 | * [That is, an entry is split into two, bordering at a |
107 | * start or end value.] Note that these clippings may not |
108 | * always be necessary (as the two resulting entries are then |
109 | * not changed); however, the clipping is done for convenience. |
110 | * No attempt is currently made to "glue back together" two |
111 | * abutting entries. |
112 | * |
113 | * The symmetric (shadow) copy strategy implements virtual copy |
114 | * by copying VM object references from one map to |
115 | * another, and then marking both regions as copy-on-write. |
116 | * It is important to note that only one writeable reference |
117 | * to a VM object region exists in any map when this strategy |
118 | * is used -- this means that shadow object creation can be |
119 | * delayed until a write operation occurs. The asymmetric (delayed) |
120 | * strategy allows multiple maps to have writeable references to |
121 | * the same region of a vm object, and hence cannot delay creating |
122 | * its copy objects. See vm_object_copy_temporary() in vm_object.c. |
123 | * Copying of permanent objects is completely different; see |
124 | * vm_object_copy_strategically() in vm_object.c. |
125 | */ |
126 | |
127 | struct kmem_cache vm_map_cache; /* cache for vm_map structures */ |
128 | struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ |
129 | struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ |
130 | struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ |
131 | |
132 | /* |
133 | * Placeholder object for submap operations. This object is dropped |
134 | * into the range by a call to vm_map_find, and removed when |
135 | * vm_map_submap creates the submap. |
136 | */ |
137 | |
138 | static struct vm_object vm_submap_object_store; |
139 | vm_object_t vm_submap_object = &vm_submap_object_store; |
140 | |
141 | /* |
142 | * vm_map_init: |
143 | * |
144 | * Initialize the vm_map module. Must be called before |
145 | * any other vm_map routines. |
146 | * |
147 | * Map and entry structures are allocated from caches -- we must |
148 | * initialize those caches. |
149 | * |
150 | * There are three caches of interest: |
151 | * |
152 | * vm_map_cache: used to allocate maps. |
153 | * vm_map_entry_cache: used to allocate map entries. |
154 | * vm_map_kentry_cache: used to allocate map entries for the kernel. |
155 | * |
156 | * Kernel map entries are allocated from a special cache, using a custom |
157 | * page allocation function to avoid recursion. It would be difficult |
158 | * (perhaps impossible) for the kernel to allocate more memory to an entry |
159 | * cache when it became empty since the very act of allocating memory |
160 | * implies the creation of a new entry. |
161 | */ |
162 | |
163 | vm_offset_t kentry_data; |
164 | vm_size_t kentry_data_size = KENTRY_DATA_SIZE(256*(1 << 12)); |
165 | |
166 | static vm_offset_t kentry_pagealloc(vm_size_t size) |
167 | { |
168 | vm_offset_t result; |
169 | |
170 | if (size > kentry_data_size) |
171 | panic("vm_map: kentry memory exhausted"); |
172 | |
173 | result = kentry_data; |
174 | kentry_data += size; |
175 | kentry_data_size -= size; |
176 | return result; |
177 | } |
178 | |
179 | void vm_map_init(void) |
180 | { |
181 | kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, |
182 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
183 | kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", |
184 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
185 | kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", |
186 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), kentry_pagealloc, |
187 | NULL((void *) 0), KMEM_CACHE_NOCPUPOOL0x1 | KMEM_CACHE_NOOFFSLAB0x2 |
188 | | KMEM_CACHE_NORECLAIM0x4); |
189 | kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", |
190 | sizeof(struct vm_map_copy), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); |
191 | |
192 | /* |
193 | * Submap object is initialized by vm_object_init. |
194 | */ |
195 | } |
196 | |
197 | void vm_map_setup( |
198 | vm_map_t map, |
199 | pmap_t pmap, |
200 | vm_offset_t min, |
201 | vm_offset_t max, |
202 | boolean_t pageable) |
203 | { |
204 | vm_map_first_entry(map)((map)->hdr.links.next) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
205 | vm_map_last_entry(map)((map)->hdr.links.prev) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
206 | map->hdr.nentries = 0; |
207 | map->hdr.entries_pageable = pageable; |
208 | rbtree_init(&map->hdr.tree); |
209 | |
210 | map->size = 0; |
211 | map->ref_count = 1; |
212 | map->pmap = pmap; |
213 | map->min_offsethdr.links.start = min; |
214 | map->max_offsethdr.links.end = max; |
215 | map->wiring_required = FALSE((boolean_t) 0); |
216 | map->wait_for_space = FALSE((boolean_t) 0); |
217 | map->first_free = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
218 | map->hint = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
219 | vm_map_lock_init(map)({ lock_init(&(map)->lock, ((boolean_t) 1)); (map)-> timestamp = 0; }); |
220 | simple_lock_init(&map->ref_lock); |
221 | simple_lock_init(&map->hint_lock); |
222 | } |
223 | |
224 | /* |
225 | * vm_map_create: |
226 | * |
227 | * Creates and returns a new empty VM map with |
228 | * the given physical map structure, and having |
229 | * the given lower and upper address bounds. |
230 | */ |
231 | vm_map_t vm_map_create( |
232 | pmap_t pmap, |
233 | vm_offset_t min, |
234 | vm_offset_t max, |
235 | boolean_t pageable) |
236 | { |
237 | vm_map_t result; |
238 | |
239 | result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); |
240 | if (result == VM_MAP_NULL((vm_map_t) 0)) |
241 | panic("vm_map_create"); |
242 | |
243 | vm_map_setup(result, pmap, min, max, pageable); |
244 | |
245 | return(result); |
246 | } |
247 | |
248 | /* |
249 | * vm_map_entry_create: [ internal use only ] |
250 | * |
251 | * Allocates a VM map entry for insertion in the |
252 | * given map (or map copy). No fields are filled. |
253 | */ |
254 | #define vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr) \ |
255 | _vm_map_entry_create(&(map)->hdr) |
256 | |
257 | #define vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr) \ |
258 | _vm_map_entry_create(&(copy)->cpy_hdrc_u.hdr) |
259 | |
260 | vm_map_entry_t _vm_map_entry_create(map_header) |
261 | const struct vm_map_header *map_header; |
262 | { |
263 | kmem_cache_t cache; |
264 | vm_map_entry_t entry; |
265 | |
266 | if (map_header->entries_pageable) |
267 | cache = &vm_map_entry_cache; |
268 | else |
269 | cache = &vm_map_kentry_cache; |
270 | |
271 | entry = (vm_map_entry_t) kmem_cache_alloc(cache); |
272 | if (entry == VM_MAP_ENTRY_NULL((vm_map_entry_t) 0)) |
273 | panic("vm_map_entry_create"); |
274 | |
275 | return(entry); |
276 | } |
277 | |
278 | /* |
279 | * vm_map_entry_dispose: [ internal use only ] |
280 | * |
281 | * Inverse of vm_map_entry_create. |
282 | */ |
283 | #define vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)) \ |
284 | _vm_map_entry_dispose(&(map)->hdr, (entry)) |
285 | |
286 | #define vm_map_copy_entry_dispose(map, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)) \ |
287 | _vm_map_entry_dispose(&(copy)->cpy_hdrc_u.hdr, (entry)) |
288 | |
289 | void _vm_map_entry_dispose(map_header, entry) |
290 | const struct vm_map_header *map_header; |
291 | vm_map_entry_t entry; |
292 | { |
293 | kmem_cache_t cache; |
294 | |
295 | if (map_header->entries_pageable) |
296 | cache = &vm_map_entry_cache; |
297 | else |
298 | cache = &vm_map_kentry_cache; |
299 | |
300 | kmem_cache_free(cache, (vm_offset_t) entry); |
301 | } |
302 | |
303 | /* |
304 | * Red-black tree lookup/insert comparison functions |
305 | */ |
306 | static inline int vm_map_entry_cmp_lookup(vm_offset_t addr, |
307 | const struct rbtree_node *node) |
308 | { |
309 | struct vm_map_entry *entry; |
310 | |
311 | entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); |
312 | |
313 | if (addr < entry->vme_startlinks.start) |
314 | return -1; |
315 | else if (addr < entry->vme_endlinks.end) |
316 | return 0; |
317 | else |
318 | return 1; |
319 | } |
320 | |
321 | static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a, |
322 | const struct rbtree_node *b) |
323 | { |
324 | struct vm_map_entry *entry; |
325 | |
326 | entry = rbtree_entry(a, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)a - __builtin_offsetof (struct vm_map_entry, tree_node))); |
327 | return vm_map_entry_cmp_lookup(entry->vme_startlinks.start, b); |
328 | } |
329 | |
330 | /* |
331 | * vm_map_entry_{un,}link: |
332 | * |
333 | * Insert/remove entries from maps (or map copies). |
334 | * |
335 | * The start and end addresses of the entries must be properly set |
336 | * before using these macros. |
337 | */ |
338 | #define vm_map_entry_link(map, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 338); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ |
339 | _vm_map_entry_link(&(map)->hdr, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 339); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) |
340 | |
341 | #define vm_map_copy_entry_link(copy, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 341); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) \ |
342 | _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 342); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) |
343 | |
344 | #define _vm_map_entry_link(hdr, after_where, entry)({ (hdr)->nentries++; (entry)->links.prev = (after_where ); (entry)->links.next = (after_where)->links.next; (entry )->links.prev->links.next = (entry)->links.next-> links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(hdr)->tree)->root; while (___cur != ( (void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry) ->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 344); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ |
345 | MACRO_BEGIN({ \ |
346 | (hdr)->nentries++; \ |
347 | (entry)->vme_prevlinks.prev = (after_where); \ |
348 | (entry)->vme_nextlinks.next = (after_where)->vme_nextlinks.next; \ |
349 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = \ |
350 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry); \ |
351 | rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 352); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }) |
352 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 352); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); \ |
353 | MACRO_END}) |
354 | |
355 | #define vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) \ |
356 | _vm_map_entry_unlink(&(map)->hdr, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) |
357 | |
358 | #define vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) \ |
359 | _vm_map_entry_unlink(&(copy)->cpy_hdr, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) |
360 | |
361 | #define _vm_map_entry_unlink(hdr, entry)({ (hdr)->nentries--; (entry)->links.next->links.prev = (entry)->links.prev; (entry)->links.prev->links.next = (entry)->links.next; rbtree_remove(&(hdr)->tree, &(entry)->tree_node); }) \ |
362 | MACRO_BEGIN({ \ |
363 | (hdr)->nentries--; \ |
364 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry)->vme_prevlinks.prev; \ |
365 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = (entry)->vme_nextlinks.next; \ |
366 | rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \ |
367 | MACRO_END}) |
368 | |
369 | /* |
370 | * vm_map_reference: |
371 | * |
372 | * Creates another valid reference to the given map. |
373 | * |
374 | */ |
375 | void vm_map_reference(vm_map_t map) |
376 | { |
377 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
378 | return; |
379 | |
380 | simple_lock(&map->ref_lock); |
381 | map->ref_count++; |
382 | simple_unlock(&map->ref_lock)((void)(&map->ref_lock)); |
383 | } |
384 | |
385 | /* |
386 | * vm_map_deallocate: |
387 | * |
388 | * Removes a reference from the specified map, |
389 | * destroying it if no references remain. |
390 | * The map should not be locked. |
391 | */ |
392 | void vm_map_deallocate(vm_map_t map) |
393 | { |
394 | int c; |
395 | |
396 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
397 | return; |
398 | |
399 | simple_lock(&map->ref_lock); |
400 | c = --map->ref_count; |
401 | simple_unlock(&map->ref_lock)((void)(&map->ref_lock)); |
402 | |
403 | if (c > 0) { |
404 | return; |
405 | } |
406 | |
407 | projected_buffer_collect(map); |
408 | (void) vm_map_delete(map, map->min_offsethdr.links.start, map->max_offsethdr.links.end); |
409 | |
410 | pmap_destroy(map->pmap); |
411 | |
412 | kmem_cache_free(&vm_map_cache, (vm_offset_t) map); |
413 | } |
414 | |
415 | /* |
416 | * SAVE_HINT: |
417 | * |
418 | * Saves the specified entry as the hint for |
419 | * future lookups. Performs necessary interlocks. |
420 | */ |
421 | #define SAVE_HINT(map,value); (map)->hint = (value); ((void)(&(map)->hint_lock) ); \ |
422 | simple_lock(&(map)->hint_lock); \ |
423 | (map)->hint = (value); \ |
424 | simple_unlock(&(map)->hint_lock)((void)(&(map)->hint_lock)); |
425 | |
426 | /* |
427 | * vm_map_lookup_entry: [ internal use only ] |
428 | * |
429 | * Finds the map entry containing (or |
430 | * immediately preceding) the specified address |
431 | * in the given map; the entry is returned |
432 | * in the "entry" parameter. The boolean |
433 | * result indicates whether the address is |
434 | * actually contained in the map. |
435 | */ |
436 | boolean_t vm_map_lookup_entry( |
437 | vm_map_t map, |
438 | vm_offset_t address, |
439 | vm_map_entry_t *entry) /* OUT */ |
440 | { |
441 | struct rbtree_node *node; |
442 | vm_map_entry_t hint; |
443 | |
444 | /* |
445 | * First, make a quick check to see if we are already |
446 | * looking at the entry we want (which is often the case). |
447 | */ |
448 | |
449 | simple_lock(&map->hint_lock); |
450 | hint = map->hint; |
451 | simple_unlock(&map->hint_lock)((void)(&map->hint_lock)); |
452 | |
453 | if ((hint != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (address >= hint->vme_startlinks.start)) { |
454 | if (address < hint->vme_endlinks.end) { |
455 | *entry = hint; |
456 | return(TRUE((boolean_t) 1)); |
457 | } else { |
458 | vm_map_entry_t next = hint->vme_nextlinks.next; |
459 | |
460 | if ((next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
461 | || (address < next->vme_startlinks.start)) { |
462 | *entry = hint; |
463 | return(FALSE((boolean_t) 0)); |
464 | } |
465 | } |
466 | } |
467 | |
468 | /* |
469 | * If the hint didn't help, use the red-black tree. |
470 | */ |
471 | |
472 | node = rbtree_lookup_nearest(&map->hdr.tree, address,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }) |
473 | vm_map_entry_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }); |
474 | |
475 | if (node == NULL((void *) 0)) { |
476 | *entry = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
477 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ((void)(&(map)->hint_lock ));; |
478 | return(FALSE((boolean_t) 0)); |
479 | } else { |
480 | *entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); |
481 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ((void)(&(map)->hint_lock ));; |
482 | return((address < (*entry)->vme_endlinks.end) ? TRUE((boolean_t) 1) : FALSE((boolean_t) 0)); |
483 | } |
484 | } |
485 | |
486 | /* |
487 | * Routine: invalid_user_access |
488 | * |
489 | * Verifies whether user access is valid. |
490 | */ |
491 | |
492 | boolean_t |
493 | invalid_user_access( |
494 | vm_map_t map, |
495 | vm_offset_t start, |
496 | vm_offset_t end, |
497 | vm_prot_t prot) |
498 | { |
499 | vm_map_entry_t entry; |
500 | |
501 | return (map == VM_MAP_NULL((vm_map_t) 0) || map == kernel_map || |
502 | !vm_map_lookup_entry(map, start, &entry) || |
503 | entry->vme_endlinks.end < end || |
504 | (prot & ~(entry->protection))); |
505 | } |
506 | |
507 | |
508 | /* |
509 | * Routine: vm_map_find_entry |
510 | * Purpose: |
511 | * Allocate a range in the specified virtual address map, |
512 | * returning the entry allocated for that range. |
513 | * Used by kmem_alloc, etc. Returns wired entries. |
514 | * |
515 | * The map must be locked. |
516 | * |
517 | * If an entry is allocated, the object/offset fields |
518 | * are initialized to zero. If an object is supplied, |
519 | * then an existing entry may be extended. |
520 | */ |
521 | kern_return_t vm_map_find_entry( |
522 | vm_map_t map, |
523 | vm_offset_t *address, /* OUT */ |
524 | vm_size_t size, |
525 | vm_offset_t mask, |
526 | vm_object_t object, |
527 | vm_map_entry_t *o_entry) /* OUT */ |
528 | { |
529 | vm_map_entry_t entry, new_entry; |
530 | vm_offset_t start; |
531 | vm_offset_t end; |
532 | |
533 | /* |
534 | * Look for the first possible address; |
535 | * if there's already something at this |
536 | * address, we have to start after it. |
537 | */ |
538 | |
539 | if ((entry = map->first_free) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
540 | start = map->min_offsethdr.links.start; |
541 | else |
542 | start = entry->vme_endlinks.end; |
543 | |
544 | /* |
545 | * In any case, the "entry" always precedes |
546 | * the proposed new region throughout the loop: |
547 | */ |
548 | |
549 | while (TRUE((boolean_t) 1)) { |
550 | vm_map_entry_t next; |
551 | |
552 | /* |
553 | * Find the end of the proposed new region. |
554 | * Be sure we didn't go beyond the end, or |
555 | * wrap around the address. |
556 | */ |
557 | |
558 | if (((start + mask) & ~mask) < start) { |
559 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); |
560 | return(KERN_NO_SPACE3); |
561 | } |
562 | start = ((start + mask) & ~mask); |
563 | end = start + size; |
564 | |
565 | if ((end > map->max_offsethdr.links.end) || (end < start)) { |
566 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); |
567 | return(KERN_NO_SPACE3); |
568 | } |
569 | |
570 | /* |
571 | * If there are no more entries, we must win. |
572 | */ |
573 | |
574 | next = entry->vme_nextlinks.next; |
575 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
576 | break; |
577 | |
578 | /* |
579 | * If there is another entry, it must be |
580 | * after the end of the potential new region. |
581 | */ |
582 | |
583 | if (next->vme_startlinks.start >= end) |
584 | break; |
585 | |
586 | /* |
587 | * Didn't fit -- move to the next entry. |
588 | */ |
589 | |
590 | entry = next; |
591 | start = entry->vme_endlinks.end; |
592 | } |
593 | |
594 | /* |
595 | * At this point, |
596 | * "start" and "end" should define the endpoints of the |
597 | * available new range, and |
598 | * "entry" should refer to the region before the new |
599 | * range, and |
600 | * |
601 | * the map should be locked. |
602 | */ |
603 | |
604 | *address = start; |
605 | |
606 | /* |
607 | * See whether we can avoid creating a new entry by |
608 | * extending one of our neighbors. [So far, we only attempt to |
609 | * extend from below.] |
610 | */ |
611 | |
612 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && |
613 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
614 | (entry->vme_endlinks.end == start) && |
615 | (!entry->is_shared) && |
616 | (!entry->is_sub_map) && |
617 | (entry->object.vm_object == object) && |
618 | (entry->needs_copy == FALSE((boolean_t) 0)) && |
619 | (entry->inheritance == VM_INHERIT_DEFAULT((vm_inherit_t) 1)) && |
620 | (entry->protection == VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02))) && |
621 | (entry->max_protection == VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) && |
622 | (entry->wired_count == 1) && |
623 | (entry->user_wired_count == 0) && |
624 | (entry->projected_on == 0)) { |
625 | /* |
626 | * Because this is a special case, |
627 | * we don't need to use vm_object_coalesce. |
628 | */ |
629 | |
630 | entry->vme_endlinks.end = end; |
631 | new_entry = entry; |
632 | } else { |
633 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); |
634 | |
635 | new_entry->vme_startlinks.start = start; |
636 | new_entry->vme_endlinks.end = end; |
637 | |
638 | new_entry->is_shared = FALSE((boolean_t) 0); |
639 | new_entry->is_sub_map = FALSE((boolean_t) 0); |
640 | new_entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); |
641 | new_entry->offset = (vm_offset_t) 0; |
642 | |
643 | new_entry->needs_copy = FALSE((boolean_t) 0); |
644 | |
645 | new_entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
646 | new_entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
647 | new_entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
648 | new_entry->wired_count = 1; |
649 | new_entry->user_wired_count = 0; |
650 | |
651 | new_entry->in_transition = FALSE((boolean_t) 0); |
652 | new_entry->needs_wakeup = FALSE((boolean_t) 0); |
653 | new_entry->projected_on = 0; |
654 | |
655 | /* |
656 | * Insert the new entry into the list |
657 | */ |
658 | |
659 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 659); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
660 | } |
661 | |
662 | map->size += size; |
663 | |
664 | /* |
665 | * Update the free space hint and the lookup hint |
666 | */ |
667 | |
668 | map->first_free = new_entry; |
669 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ((void)(&(map)->hint_lock ));; |
670 | |
671 | *o_entry = new_entry; |
672 | return(KERN_SUCCESS0); |
673 | } |
674 | |
675 | boolean_t vm_map_pmap_enter_print = FALSE((boolean_t) 0); |
676 | boolean_t vm_map_pmap_enter_enable = FALSE((boolean_t) 0); |
677 | |
678 | /* |
679 | * Routine: vm_map_pmap_enter |
680 | * |
681 | * Description: |
682 | * Force pages from the specified object to be entered into |
683 | * the pmap at the specified address if they are present. |
684 | * As soon as a page not found in the object the scan ends. |
685 | * |
686 | * Returns: |
687 | * Nothing. |
688 | * |
689 | * In/out conditions: |
690 | * The source map should not be locked on entry. |
691 | */ |
692 | void |
693 | vm_map_pmap_enter( |
694 | vm_map_t map, |
695 | vm_offset_t addr, |
696 | vm_offset_t end_addr, |
697 | vm_object_t object, |
698 | vm_offset_t offset, |
699 | vm_prot_t protection) |
700 | { |
701 | while (addr < end_addr) { |
702 | vm_page_t m; |
703 | |
704 | vm_object_lock(object); |
705 | vm_object_paging_begin(object)((object)->paging_in_progress++); |
706 | |
707 | m = vm_page_lookup(object, offset); |
708 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->absent) { |
709 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 709); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
710 | vm_object_unlock(object)((void)(&(object)->Lock)); |
711 | return; |
712 | } |
713 | |
714 | if (vm_map_pmap_enter_print) { |
715 | printfdb_printf("vm_map_pmap_enter:"); |
716 | printfdb_printf("map: %p, addr: %lx, object: %p, offset: %lx\n", |
717 | map, addr, object, offset); |
718 | } |
719 | |
720 | m->busy = TRUE((boolean_t) 1); |
721 | vm_object_unlock(object)((void)(&(object)->Lock)); |
722 | |
723 | PMAP_ENTER(map->pmap, addr, m,({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }) |
724 | protection, FALSE)({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }); |
725 | |
726 | vm_object_lock(object); |
727 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
728 | vm_page_lock_queues(); |
729 | if (!m->active && !m->inactive) |
730 | vm_page_activate(m); |
731 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
732 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 732); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
733 | vm_object_unlock(object)((void)(&(object)->Lock)); |
734 | |
735 | offset += PAGE_SIZE(1 << 12); |
736 | addr += PAGE_SIZE(1 << 12); |
737 | } |
738 | } |
739 | |
740 | /* |
741 | * Routine: vm_map_enter |
742 | * |
743 | * Description: |
744 | * Allocate a range in the specified virtual address map. |
745 | * The resulting range will refer to memory defined by |
746 | * the given memory object and offset into that object. |
747 | * |
748 | * Arguments are as defined in the vm_map call. |
749 | */ |
750 | kern_return_t vm_map_enter( |
751 | vm_map_t map, |
752 | vm_offset_t *address, /* IN/OUT */ |
753 | vm_size_t size, |
754 | vm_offset_t mask, |
755 | boolean_t anywhere, |
756 | vm_object_t object, |
757 | vm_offset_t offset, |
758 | boolean_t needs_copy, |
759 | vm_prot_t cur_protection, |
760 | vm_prot_t max_protection, |
761 | vm_inherit_t inheritance) |
762 | { |
763 | vm_map_entry_t entry; |
764 | vm_offset_t start; |
765 | vm_offset_t end; |
766 | kern_return_t result = KERN_SUCCESS0; |
767 | |
768 | #define RETURN(value) { result = value; goto BailOut; } |
769 | |
770 | if (size == 0) |
771 | return KERN_INVALID_ARGUMENT4; |
772 | |
773 | StartAgain: ; |
774 | |
775 | start = *address; |
776 | |
777 | if (anywhere) { |
778 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
779 | |
780 | /* |
781 | * Calculate the first possible address. |
782 | */ |
783 | |
784 | if (start < map->min_offsethdr.links.start) |
785 | start = map->min_offsethdr.links.start; |
786 | if (start > map->max_offsethdr.links.end) |
787 | RETURN(KERN_NO_SPACE3); |
788 | |
789 | /* |
790 | * Look for the first possible address; |
791 | * if there's already something at this |
792 | * address, we have to start after it. |
793 | */ |
794 | |
795 | if (start == map->min_offsethdr.links.start) { |
796 | if ((entry = map->first_free) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
797 | start = entry->vme_endlinks.end; |
798 | } else { |
799 | vm_map_entry_t tmp_entry; |
800 | if (vm_map_lookup_entry(map, start, &tmp_entry)) |
801 | start = tmp_entry->vme_endlinks.end; |
802 | entry = tmp_entry; |
803 | } |
804 | |
805 | /* |
806 | * In any case, the "entry" always precedes |
807 | * the proposed new region throughout the |
808 | * loop: |
809 | */ |
810 | |
811 | while (TRUE((boolean_t) 1)) { |
812 | vm_map_entry_t next; |
813 | |
814 | /* |
815 | * Find the end of the proposed new region. |
816 | * Be sure we didn't go beyond the end, or |
817 | * wrap around the address. |
818 | */ |
819 | |
820 | if (((start + mask) & ~mask) < start) { |
821 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); |
822 | RETURN(KERN_NO_SPACE3); |
823 | } |
824 | start = ((start + mask) & ~mask); |
825 | end = start + size; |
826 | |
827 | if ((end > map->max_offsethdr.links.end) || (end < start)) { |
828 | if (map->wait_for_space) { |
829 | if (size <= (map->max_offsethdr.links.end - |
830 | map->min_offsethdr.links.start)) { |
831 | assert_wait((event_t) map, TRUE((boolean_t) 1)); |
832 | vm_map_unlock(map)lock_done(&(map)->lock); |
833 | thread_block((void (*)()) 0); |
834 | goto StartAgain; |
835 | } |
836 | } |
837 | |
838 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); |
839 | RETURN(KERN_NO_SPACE3); |
840 | } |
841 | |
842 | /* |
843 | * If there are no more entries, we must win. |
844 | */ |
845 | |
846 | next = entry->vme_nextlinks.next; |
847 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) |
848 | break; |
849 | |
850 | /* |
851 | * If there is another entry, it must be |
852 | * after the end of the potential new region. |
853 | */ |
854 | |
855 | if (next->vme_startlinks.start >= end) |
856 | break; |
857 | |
858 | /* |
859 | * Didn't fit -- move to the next entry. |
860 | */ |
861 | |
862 | entry = next; |
863 | start = entry->vme_endlinks.end; |
864 | } |
865 | *address = start; |
866 | } else { |
867 | vm_map_entry_t temp_entry; |
868 | |
869 | /* |
870 | * Verify that: |
871 | * the address doesn't itself violate |
872 | * the mask requirement. |
873 | */ |
874 | |
875 | if ((start & mask) != 0) |
876 | return(KERN_NO_SPACE3); |
877 | |
878 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
879 | |
880 | /* |
881 | * ... the address is within bounds |
882 | */ |
883 | |
884 | end = start + size; |
885 | |
886 | if ((start < map->min_offsethdr.links.start) || |
887 | (end > map->max_offsethdr.links.end) || |
888 | (start >= end)) { |
889 | RETURN(KERN_INVALID_ADDRESS1); |
890 | } |
891 | |
892 | /* |
893 | * ... the starting address isn't allocated |
894 | */ |
895 | |
896 | if (vm_map_lookup_entry(map, start, &temp_entry)) |
897 | RETURN(KERN_NO_SPACE3); |
898 | |
899 | entry = temp_entry; |
900 | |
901 | /* |
902 | * ... the next region doesn't overlap the |
903 | * end point. |
904 | */ |
905 | |
906 | if ((entry->vme_nextlinks.next != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
907 | (entry->vme_nextlinks.next->vme_startlinks.start < end)) |
908 | RETURN(KERN_NO_SPACE3); |
909 | } |
910 | |
911 | /* |
912 | * At this point, |
913 | * "start" and "end" should define the endpoints of the |
914 | * available new range, and |
915 | * "entry" should refer to the region before the new |
916 | * range, and |
917 | * |
918 | * the map should be locked. |
919 | */ |
920 | |
921 | /* |
922 | * See whether we can avoid creating a new entry (and object) by |
923 | * extending one of our neighbors. [So far, we only attempt to |
924 | * extend from below.] |
925 | */ |
926 | |
927 | if ((object == VM_OBJECT_NULL((vm_object_t) 0)) && |
928 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
929 | (entry->vme_endlinks.end == start) && |
930 | (!entry->is_shared) && |
931 | (!entry->is_sub_map) && |
932 | (entry->inheritance == inheritance) && |
933 | (entry->protection == cur_protection) && |
934 | (entry->max_protection == max_protection) && |
935 | (entry->wired_count == 0) && /* implies user_wired_count == 0 */ |
936 | (entry->projected_on == 0)) { |
937 | if (vm_object_coalesce(entry->object.vm_object, |
938 | VM_OBJECT_NULL((vm_object_t) 0), |
939 | entry->offset, |
940 | (vm_offset_t) 0, |
941 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start), |
942 | (vm_size_t)(end - entry->vme_endlinks.end))) { |
943 | |
944 | /* |
945 | * Coalesced the two objects - can extend |
946 | * the previous map entry to include the |
947 | * new range. |
948 | */ |
949 | map->size += (end - entry->vme_endlinks.end); |
950 | entry->vme_endlinks.end = end; |
951 | RETURN(KERN_SUCCESS0); |
952 | } |
953 | } |
954 | |
955 | /* |
956 | * Create a new entry |
957 | */ |
958 | |
959 | /**/ { |
960 | vm_map_entry_t new_entry; |
961 | |
962 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); |
963 | |
964 | new_entry->vme_startlinks.start = start; |
965 | new_entry->vme_endlinks.end = end; |
966 | |
967 | new_entry->is_shared = FALSE((boolean_t) 0); |
968 | new_entry->is_sub_map = FALSE((boolean_t) 0); |
969 | new_entry->object.vm_object = object; |
970 | new_entry->offset = offset; |
971 | |
972 | new_entry->needs_copy = needs_copy; |
973 | |
974 | new_entry->inheritance = inheritance; |
975 | new_entry->protection = cur_protection; |
976 | new_entry->max_protection = max_protection; |
977 | new_entry->wired_count = 0; |
978 | new_entry->user_wired_count = 0; |
979 | |
980 | new_entry->in_transition = FALSE((boolean_t) 0); |
981 | new_entry->needs_wakeup = FALSE((boolean_t) 0); |
982 | new_entry->projected_on = 0; |
983 | |
984 | /* |
985 | * Insert the new entry into the list |
986 | */ |
987 | |
988 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 988); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
989 | map->size += size; |
990 | |
991 | /* |
992 | * Update the free space hint and the lookup hint |
993 | */ |
994 | |
995 | if ((map->first_free == entry) && |
996 | ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) ? map->min_offsethdr.links.start : entry->vme_endlinks.end) |
997 | >= new_entry->vme_startlinks.start)) |
998 | map->first_free = new_entry; |
999 | |
1000 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ((void)(&(map)->hint_lock ));; |
1001 | |
1002 | vm_map_unlock(map)lock_done(&(map)->lock); |
1003 | |
1004 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && |
1005 | (vm_map_pmap_enter_enable) && |
1006 | (!anywhere) && |
1007 | (!needs_copy) && |
1008 | (size < (128*1024))) { |
1009 | vm_map_pmap_enter(map, start, end, |
1010 | object, offset, cur_protection); |
1011 | } |
1012 | |
1013 | return(result); |
1014 | /**/ } |
1015 | |
1016 | BailOut: ; |
1017 | |
1018 | vm_map_unlock(map)lock_done(&(map)->lock); |
1019 | return(result); |
1020 | |
1021 | #undef RETURN |
1022 | } |
1023 | |
1024 | /* |
1025 | * vm_map_clip_start: [ internal use only ] |
1026 | * |
1027 | * Asserts that the given entry begins at or after |
1028 | * the specified address; if necessary, |
1029 | * it splits the entry into two. |
1030 | */ |
1031 | #define vm_map_clip_start(map, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(startaddr)); }) \ |
1032 | MACRO_BEGIN({ \ |
1033 | if ((startaddr) > (entry)->vme_startlinks.start) \ |
1034 | _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \ |
1035 | MACRO_END}) |
1036 | |
1037 | #define vm_map_copy_clip_start(copy, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(copy)->c_u.hdr,(entry),(startaddr)); }) \ |
1038 | MACRO_BEGIN({ \ |
1039 | if ((startaddr) > (entry)->vme_startlinks.start) \ |
1040 | _vm_map_clip_start(&(copy)->cpy_hdrc_u.hdr,(entry),(startaddr)); \ |
1041 | MACRO_END}) |
1042 | |
1043 | /* |
1044 | * This routine is called only when it is known that |
1045 | * the entry must be split. |
1046 | */ |
1047 | void _vm_map_clip_start( |
1048 | struct vm_map_header *map_header, |
1049 | vm_map_entry_t entry, |
1050 | vm_offset_t start) |
1051 | { |
1052 | vm_map_entry_t new_entry; |
1053 | |
1054 | /* |
1055 | * Split off the front portion -- |
1056 | * note that we must insert the new |
1057 | * entry BEFORE this one, so that |
1058 | * this entry has the specified starting |
1059 | * address. |
1060 | */ |
1061 | |
1062 | new_entry = _vm_map_entry_create(map_header); |
1063 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); |
1064 | |
1065 | new_entry->vme_endlinks.end = start; |
1066 | entry->offset += (start - entry->vme_startlinks.start); |
1067 | entry->vme_startlinks.start = start; |
1068 | |
1069 | _vm_map_entry_link(map_header, entry->vme_prev, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry->links.prev); (new_entry)->links.next = (entry-> links.prev)->links.next; (new_entry)->links.prev->links .next = (new_entry)->links.next->links.prev = (new_entry ); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header )->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 1069); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(map_header)->tree, ___prev, ___index, &(new_entry )->tree_node); }); }); |
1070 | |
1071 | if (entry->is_sub_map) |
1072 | vm_map_reference(new_entry->object.sub_map); |
1073 | else |
1074 | vm_object_reference(new_entry->object.vm_object); |
1075 | } |
1076 | |
1077 | /* |
1078 | * vm_map_clip_end: [ internal use only ] |
1079 | * |
1080 | * Asserts that the given entry ends at or before |
1081 | * the specified address; if necessary, |
1082 | * it splits the entry into two. |
1083 | */ |
1084 | #define vm_map_clip_end(map, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(map)->hdr,(entry),(endaddr)); }) \ |
1085 | MACRO_BEGIN({ \ |
1086 | if ((endaddr) < (entry)->vme_endlinks.end) \ |
1087 | _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \ |
1088 | MACRO_END}) |
1089 | |
1090 | #define vm_map_copy_clip_end(copy, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(copy)->c_u.hdr,(entry),(endaddr)); }) \ |
1091 | MACRO_BEGIN({ \ |
1092 | if ((endaddr) < (entry)->vme_endlinks.end) \ |
1093 | _vm_map_clip_end(&(copy)->cpy_hdrc_u.hdr,(entry),(endaddr)); \ |
1094 | MACRO_END}) |
1095 | |
1096 | /* |
1097 | * This routine is called only when it is known that |
1098 | * the entry must be split. |
1099 | */ |
1100 | void _vm_map_clip_end( |
1101 | struct vm_map_header *map_header, |
1102 | vm_map_entry_t entry, |
1103 | vm_offset_t end) |
1104 | { |
1105 | vm_map_entry_t new_entry; |
1106 | |
1107 | /* |
1108 | * Create a new entry and insert it |
1109 | * AFTER the specified entry |
1110 | */ |
1111 | |
1112 | new_entry = _vm_map_entry_create(map_header); |
1113 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); |
1114 | |
1115 | new_entry->vme_startlinks.start = entry->vme_endlinks.end = end; |
1116 | new_entry->offset += (end - entry->vme_startlinks.start); |
1117 | |
1118 | _vm_map_entry_link(map_header, entry, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry); (new_entry)->links.next = (entry)->links.next; ( new_entry)->links.prev->links.next = (new_entry)->links .next->links.prev = (new_entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header)->tree)->root; while ( ___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(& (new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert ("___diff != 0", "../vm/vm_map.c", 1118); }); ___prev = ___cur ; ___index = rbtree_d2i(___diff); ___cur = ___cur->children [___index]; } rbtree_insert_rebalance(&(map_header)->tree , ___prev, ___index, &(new_entry)->tree_node); }); }); |
1119 | |
1120 | if (entry->is_sub_map) |
1121 | vm_map_reference(new_entry->object.sub_map); |
1122 | else |
1123 | vm_object_reference(new_entry->object.vm_object); |
1124 | } |
1125 | |
1126 | /* |
1127 | * VM_MAP_RANGE_CHECK: [ internal use only ] |
1128 | * |
1129 | * Asserts that the starting and ending region |
1130 | * addresses fall within the valid range of the map. |
1131 | */ |
1132 | #define VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; } \ |
1133 | { \ |
1134 | if (start < vm_map_min(map)((map)->hdr.links.start)) \ |
1135 | start = vm_map_min(map)((map)->hdr.links.start); \ |
1136 | if (end > vm_map_max(map)((map)->hdr.links.end)) \ |
1137 | end = vm_map_max(map)((map)->hdr.links.end); \ |
1138 | if (start > end) \ |
1139 | start = end; \ |
1140 | } |
1141 | |
1142 | /* |
1143 | * vm_map_submap: [ kernel use only ] |
1144 | * |
1145 | * Mark the given range as handled by a subordinate map. |
1146 | * |
1147 | * This range must have been created with vm_map_find using |
1148 | * the vm_submap_object, and no other operations may have been |
1149 | * performed on this range prior to calling vm_map_submap. |
1150 | * |
1151 | * Only a limited number of operations can be performed |
1152 | * within this rage after calling vm_map_submap: |
1153 | * vm_fault |
1154 | * [Don't try vm_map_copyin!] |
1155 | * |
1156 | * To remove a submapping, one must first remove the |
1157 | * range from the superior map, and then destroy the |
1158 | * submap (if desired). [Better yet, don't try it.] |
1159 | */ |
1160 | kern_return_t vm_map_submap( |
1161 | vm_map_t map, |
1162 | vm_offset_t start, |
1163 | vm_offset_t end, |
1164 | vm_map_t submap) |
1165 | { |
1166 | vm_map_entry_t entry; |
1167 | kern_return_t result = KERN_INVALID_ARGUMENT4; |
1168 | vm_object_t object; |
1169 | |
1170 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1171 | |
1172 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1173 | |
1174 | if (vm_map_lookup_entry(map, start, &entry)) { |
1175 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1176 | } |
1177 | else |
1178 | entry = entry->vme_nextlinks.next; |
1179 | |
1180 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1181 | |
1182 | if ((entry->vme_startlinks.start == start) && (entry->vme_endlinks.end == end) && |
1183 | (!entry->is_sub_map) && |
1184 | ((object = entry->object.vm_object) == vm_submap_object) && |
1185 | (object->resident_page_count == 0) && |
1186 | (object->copy == VM_OBJECT_NULL((vm_object_t) 0)) && |
1187 | (object->shadow == VM_OBJECT_NULL((vm_object_t) 0)) && |
1188 | (!object->pager_created)) { |
1189 | entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); |
1190 | vm_object_deallocate(object); |
1191 | entry->is_sub_map = TRUE((boolean_t) 1); |
1192 | vm_map_reference(entry->object.sub_map = submap); |
1193 | result = KERN_SUCCESS0; |
1194 | } |
1195 | vm_map_unlock(map)lock_done(&(map)->lock); |
1196 | |
1197 | return(result); |
1198 | } |
1199 | |
1200 | /* |
1201 | * vm_map_protect: |
1202 | * |
1203 | * Sets the protection of the specified address |
1204 | * region in the target map. If "set_max" is |
1205 | * specified, the maximum protection is to be set; |
1206 | * otherwise, only the current protection is affected. |
1207 | */ |
1208 | kern_return_t vm_map_protect( |
1209 | vm_map_t map, |
1210 | vm_offset_t start, |
1211 | vm_offset_t end, |
1212 | vm_prot_t new_prot, |
1213 | boolean_t set_max) |
1214 | { |
1215 | vm_map_entry_t current; |
1216 | vm_map_entry_t entry; |
1217 | |
1218 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1219 | |
1220 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1221 | |
1222 | if (vm_map_lookup_entry(map, start, &entry)) { |
1223 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1224 | } |
1225 | else |
1226 | entry = entry->vme_nextlinks.next; |
1227 | |
1228 | /* |
1229 | * Make a first pass to check for protection |
1230 | * violations. |
1231 | */ |
1232 | |
1233 | current = entry; |
1234 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1235 | (current->vme_startlinks.start < end)) { |
1236 | |
1237 | if (current->is_sub_map) { |
1238 | vm_map_unlock(map)lock_done(&(map)->lock); |
1239 | return(KERN_INVALID_ARGUMENT4); |
1240 | } |
1241 | if ((new_prot & (VM_PROT_NOTIFY((vm_prot_t) 0x10) | current->max_protection)) |
1242 | != new_prot) { |
1243 | vm_map_unlock(map)lock_done(&(map)->lock); |
1244 | return(KERN_PROTECTION_FAILURE2); |
1245 | } |
1246 | |
1247 | current = current->vme_nextlinks.next; |
1248 | } |
1249 | |
1250 | /* |
1251 | * Go back and fix up protections. |
1252 | * [Note that clipping is not necessary the second time.] |
1253 | */ |
1254 | |
1255 | current = entry; |
1256 | |
1257 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1258 | (current->vme_startlinks.start < end)) { |
1259 | |
1260 | vm_prot_t old_prot; |
1261 | |
1262 | vm_map_clip_end(map, current, end)({ if ((end) < (current)->links.end) _vm_map_clip_end(& (map)->hdr,(current),(end)); }); |
1263 | |
1264 | old_prot = current->protection; |
1265 | if (set_max) |
1266 | current->protection = |
1267 | (current->max_protection = new_prot) & |
1268 | old_prot; |
1269 | else |
1270 | current->protection = new_prot; |
1271 | |
1272 | /* |
1273 | * Update physical map if necessary. |
1274 | */ |
1275 | |
1276 | if (current->protection != old_prot) { |
1277 | pmap_protect(map->pmap, current->vme_startlinks.start, |
1278 | current->vme_endlinks.end, |
1279 | current->protection); |
1280 | } |
1281 | current = current->vme_nextlinks.next; |
1282 | } |
1283 | |
1284 | vm_map_unlock(map)lock_done(&(map)->lock); |
1285 | return(KERN_SUCCESS0); |
1286 | } |
1287 | |
1288 | /* |
1289 | * vm_map_inherit: |
1290 | * |
1291 | * Sets the inheritance of the specified address |
1292 | * range in the target map. Inheritance |
1293 | * affects how the map will be shared with |
1294 | * child maps at the time of vm_map_fork. |
1295 | */ |
1296 | kern_return_t vm_map_inherit( |
1297 | vm_map_t map, |
1298 | vm_offset_t start, |
1299 | vm_offset_t end, |
1300 | vm_inherit_t new_inheritance) |
1301 | { |
1302 | vm_map_entry_t entry; |
1303 | vm_map_entry_t temp_entry; |
1304 | |
1305 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1306 | |
1307 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1308 | |
1309 | if (vm_map_lookup_entry(map, start, &temp_entry)) { |
1310 | entry = temp_entry; |
1311 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1312 | } |
1313 | else |
1314 | entry = temp_entry->vme_nextlinks.next; |
1315 | |
1316 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { |
1317 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1318 | |
1319 | entry->inheritance = new_inheritance; |
1320 | |
1321 | entry = entry->vme_nextlinks.next; |
1322 | } |
1323 | |
1324 | vm_map_unlock(map)lock_done(&(map)->lock); |
1325 | return(KERN_SUCCESS0); |
1326 | } |
1327 | |
1328 | /* |
1329 | * vm_map_pageable_common: |
1330 | * |
1331 | * Sets the pageability of the specified address |
1332 | * range in the target map. Regions specified |
1333 | * as not pageable require locked-down physical |
1334 | * memory and physical page maps. access_type indicates |
1335 | * types of accesses that must not generate page faults. |
1336 | * This is checked against protection of memory being locked-down. |
1337 | * access_type of VM_PROT_NONE makes memory pageable. |
1338 | * |
1339 | * The map must not be locked, but a reference |
1340 | * must remain to the map throughout the call. |
1341 | * |
1342 | * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable, |
1343 | * or vm_map_pageable_user); don't call vm_map_pageable directly. |
1344 | */ |
1345 | kern_return_t vm_map_pageable_common( |
1346 | vm_map_t map, |
1347 | vm_offset_t start, |
1348 | vm_offset_t end, |
1349 | vm_prot_t access_type, |
1350 | boolean_t user_wire) |
1351 | { |
1352 | vm_map_entry_t entry; |
1353 | vm_map_entry_t start_entry; |
1354 | |
1355 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1356 | |
1357 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1358 | |
1359 | if (vm_map_lookup_entry(map, start, &start_entry)) { |
1360 | entry = start_entry; |
1361 | /* |
1362 | * vm_map_clip_start will be done later. |
1363 | */ |
1364 | } |
1365 | else { |
1366 | /* |
1367 | * Start address is not in map; this is fatal. |
1368 | */ |
1369 | vm_map_unlock(map)lock_done(&(map)->lock); |
1370 | return(KERN_FAILURE5); |
1371 | } |
1372 | |
1373 | /* |
1374 | * Actions are rather different for wiring and unwiring, |
1375 | * so we have two separate cases. |
1376 | */ |
1377 | |
1378 | if (access_type == VM_PROT_NONE((vm_prot_t) 0x00)) { |
1379 | |
1380 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1381 | |
1382 | /* |
1383 | * Unwiring. First ensure that the range to be |
1384 | * unwired is really wired down. |
1385 | */ |
1386 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1387 | (entry->vme_startlinks.start < end)) { |
1388 | |
1389 | if ((entry->wired_count == 0) || |
1390 | ((entry->vme_endlinks.end < end) && |
1391 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
1392 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || |
1393 | (user_wire && (entry->user_wired_count == 0))) { |
1394 | vm_map_unlock(map)lock_done(&(map)->lock); |
1395 | return(KERN_INVALID_ARGUMENT4); |
1396 | } |
1397 | entry = entry->vme_nextlinks.next; |
1398 | } |
1399 | |
1400 | /* |
1401 | * Now decrement the wiring count for each region. |
1402 | * If a region becomes completely unwired, |
1403 | * unwire its physical pages and mappings. |
1404 | */ |
1405 | entry = start_entry; |
1406 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1407 | (entry->vme_startlinks.start < end)) { |
1408 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1409 | |
1410 | if (user_wire) { |
1411 | if (--(entry->user_wired_count) == 0) |
1412 | entry->wired_count--; |
1413 | } |
1414 | else { |
1415 | entry->wired_count--; |
1416 | } |
1417 | |
1418 | if (entry->wired_count == 0) |
1419 | vm_fault_unwire(map, entry); |
1420 | |
1421 | entry = entry->vme_nextlinks.next; |
1422 | } |
1423 | } |
1424 | |
1425 | else { |
1426 | /* |
1427 | * Wiring. We must do this in two passes: |
1428 | * |
1429 | * 1. Holding the write lock, we create any shadow |
1430 | * or zero-fill objects that need to be created. |
1431 | * Then we clip each map entry to the region to be |
1432 | * wired and increment its wiring count. We |
1433 | * create objects before clipping the map entries |
1434 | * to avoid object proliferation. |
1435 | * |
1436 | * 2. We downgrade to a read lock, and call |
1437 | * vm_fault_wire to fault in the pages for any |
1438 | * newly wired area (wired_count is 1). |
1439 | * |
1440 | * Downgrading to a read lock for vm_fault_wire avoids |
1441 | * a possible deadlock with another thread that may have |
1442 | * faulted on one of the pages to be wired (it would mark |
1443 | * the page busy, blocking us, then in turn block on the |
1444 | * map lock that we hold). Because of problems in the |
1445 | * recursive lock package, we cannot upgrade to a write |
1446 | * lock in vm_map_lookup. Thus, any actions that require |
1447 | * the write lock must be done beforehand. Because we |
1448 | * keep the read lock on the map, the copy-on-write |
1449 | * status of the entries we modify here cannot change. |
1450 | */ |
1451 | |
1452 | /* |
1453 | * Pass 1. |
1454 | */ |
1455 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1456 | (entry->vme_startlinks.start < end)) { |
1457 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1458 | |
1459 | if (entry->wired_count == 0) { |
1460 | |
1461 | /* |
1462 | * Perform actions of vm_map_lookup that need |
1463 | * the write lock on the map: create a shadow |
1464 | * object for a copy-on-write region, or an |
1465 | * object for a zero-fill region. |
1466 | */ |
1467 | if (entry->needs_copy && |
1468 | ((entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02)) != 0)) { |
1469 | |
1470 | vm_object_shadow(&entry->object.vm_object, |
1471 | &entry->offset, |
1472 | (vm_size_t)(entry->vme_endlinks.end |
1473 | - entry->vme_startlinks.start)); |
1474 | entry->needs_copy = FALSE((boolean_t) 0); |
1475 | } |
1476 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
1477 | entry->object.vm_object = |
1478 | vm_object_allocate( |
1479 | (vm_size_t)(entry->vme_endlinks.end |
1480 | - entry->vme_startlinks.start)); |
1481 | entry->offset = (vm_offset_t)0; |
1482 | } |
1483 | } |
1484 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1485 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1486 | |
1487 | if (user_wire) { |
1488 | if ((entry->user_wired_count)++ == 0) |
1489 | entry->wired_count++; |
1490 | } |
1491 | else { |
1492 | entry->wired_count++; |
1493 | } |
1494 | |
1495 | /* |
1496 | * Check for holes and protection mismatch. |
1497 | * Holes: Next entry should be contiguous unless |
1498 | * this is the end of the region. |
1499 | * Protection: Access requested must be allowed. |
1500 | */ |
1501 | if (((entry->vme_endlinks.end < end) && |
1502 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
1503 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || |
1504 | ((entry->protection & access_type) != access_type)) { |
1505 | /* |
1506 | * Found a hole or protection problem. |
1507 | * Object creation actions |
1508 | * do not need to be undone, but the |
1509 | * wired counts need to be restored. |
1510 | */ |
1511 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
1512 | (entry->vme_endlinks.end > start)) { |
1513 | if (user_wire) { |
1514 | if (--(entry->user_wired_count) == 0) |
1515 | entry->wired_count--; |
1516 | } |
1517 | else { |
1518 | entry->wired_count--; |
1519 | } |
1520 | |
1521 | entry = entry->vme_prevlinks.prev; |
1522 | } |
1523 | |
1524 | vm_map_unlock(map)lock_done(&(map)->lock); |
1525 | return(KERN_FAILURE5); |
1526 | } |
1527 | entry = entry->vme_nextlinks.next; |
1528 | } |
1529 | |
1530 | /* |
1531 | * Pass 2. |
1532 | */ |
1533 | |
1534 | /* |
1535 | * HACK HACK HACK HACK |
1536 | * |
1537 | * If we are wiring in the kernel map or a submap of it, |
1538 | * unlock the map to avoid deadlocks. We trust that the |
1539 | * kernel threads are well-behaved, and therefore will |
1540 | * not do anything destructive to this region of the map |
1541 | * while we have it unlocked. We cannot trust user threads |
1542 | * to do the same. |
1543 | * |
1544 | * HACK HACK HACK HACK |
1545 | */ |
1546 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { |
1547 | vm_map_unlock(map)lock_done(&(map)->lock); /* trust me ... */ |
1548 | } |
1549 | else { |
1550 | vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock); |
1551 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
1552 | } |
1553 | |
1554 | entry = start_entry; |
1555 | while (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) && |
1556 | entry->vme_startlinks.start < end) { |
1557 | /* |
1558 | * Wiring cases: |
1559 | * Kernel: wired == 1 && user_wired == 0 |
1560 | * User: wired == 1 && user_wired == 1 |
1561 | * |
1562 | * Don't need to wire if either is > 1. wired = 0 && |
1563 | * user_wired == 1 can't happen. |
1564 | */ |
1565 | |
1566 | /* |
1567 | * XXX This assumes that the faults always succeed. |
1568 | */ |
1569 | if ((entry->wired_count == 1) && |
1570 | (entry->user_wired_count <= 1)) { |
1571 | vm_fault_wire(map, entry); |
1572 | } |
1573 | entry = entry->vme_nextlinks.next; |
1574 | } |
1575 | |
1576 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { |
1577 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1578 | } |
1579 | else { |
1580 | vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock); |
1581 | } |
1582 | } |
1583 | |
1584 | vm_map_unlock(map)lock_done(&(map)->lock); |
1585 | |
1586 | return(KERN_SUCCESS0); |
1587 | } |
1588 | |
1589 | /* |
1590 | * vm_map_entry_delete: [ internal use only ] |
1591 | * |
1592 | * Deallocate the given entry from the target map. |
1593 | */ |
1594 | void vm_map_entry_delete( |
1595 | vm_map_t map, |
1596 | vm_map_entry_t entry) |
1597 | { |
1598 | vm_offset_t s, e; |
1599 | vm_object_t object; |
1600 | extern vm_object_t kernel_object; |
1601 | |
1602 | s = entry->vme_startlinks.start; |
1603 | e = entry->vme_endlinks.end; |
1604 | |
1605 | /*Check if projected buffer*/ |
1606 | if (map != kernel_map && entry->projected_on != 0) { |
1607 | /*Check if projected kernel entry is persistent; |
1608 | may only manipulate directly if it is*/ |
1609 | if (entry->projected_on->projected_on == 0) |
1610 | entry->wired_count = 0; /*Avoid unwire fault*/ |
1611 | else |
1612 | return; |
1613 | } |
1614 | |
1615 | /* |
1616 | * Get the object. Null objects cannot have pmap entries. |
1617 | */ |
1618 | |
1619 | if ((object = entry->object.vm_object) != VM_OBJECT_NULL((vm_object_t) 0)) { |
1620 | |
1621 | /* |
1622 | * Unwire before removing addresses from the pmap; |
1623 | * otherwise, unwiring will put the entries back in |
1624 | * the pmap. |
1625 | */ |
1626 | |
1627 | if (entry->wired_count != 0) { |
1628 | vm_fault_unwire(map, entry); |
1629 | entry->wired_count = 0; |
1630 | entry->user_wired_count = 0; |
1631 | } |
1632 | |
1633 | /* |
1634 | * If the object is shared, we must remove |
1635 | * *all* references to this data, since we can't |
1636 | * find all of the physical maps which are sharing |
1637 | * it. |
1638 | */ |
1639 | |
1640 | if (object == kernel_object) { |
1641 | vm_object_lock(object); |
1642 | vm_object_page_remove(object, entry->offset, |
1643 | entry->offset + (e - s)); |
1644 | vm_object_unlock(object)((void)(&(object)->Lock)); |
1645 | } else if (entry->is_shared) { |
1646 | vm_object_pmap_remove(object, |
1647 | entry->offset, |
1648 | entry->offset + (e - s)); |
1649 | } |
1650 | else { |
1651 | pmap_remove(map->pmap, s, e); |
1652 | } |
1653 | } |
1654 | |
1655 | /* |
1656 | * Deallocate the object only after removing all |
1657 | * pmap entries pointing to its pages. |
1658 | */ |
1659 | |
1660 | if (entry->is_sub_map) |
1661 | vm_map_deallocate(entry->object.sub_map); |
1662 | else |
1663 | vm_object_deallocate(entry->object.vm_object); |
1664 | |
1665 | vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ); |
1666 | map->size -= e - s; |
1667 | |
1668 | vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)); |
1669 | } |
1670 | |
1671 | /* |
1672 | * vm_map_delete: [ internal use only ] |
1673 | * |
1674 | * Deallocates the given address range from the target |
1675 | * map. |
1676 | */ |
1677 | |
1678 | kern_return_t vm_map_delete( |
1679 | vm_map_t map, |
1680 | vm_offset_t start, |
1681 | vm_offset_t end) |
1682 | { |
1683 | vm_map_entry_t entry; |
1684 | vm_map_entry_t first_entry; |
1685 | |
1686 | /* |
1687 | * Find the start of the region, and clip it |
1688 | */ |
1689 | |
1690 | if (!vm_map_lookup_entry(map, start, &first_entry)) |
1691 | entry = first_entry->vme_nextlinks.next; |
1692 | else { |
1693 | entry = first_entry; |
1694 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); |
1695 | |
1696 | /* |
1697 | * Fix the lookup hint now, rather than each |
1698 | * time though the loop. |
1699 | */ |
1700 | |
1701 | SAVE_HINT(map, entry->vme_prev); (map)->hint = (entry->links.prev); ((void)(&(map) ->hint_lock));; |
1702 | } |
1703 | |
1704 | /* |
1705 | * Save the free space hint |
1706 | */ |
1707 | |
1708 | if (map->first_free->vme_startlinks.start >= start) |
1709 | map->first_free = entry->vme_prevlinks.prev; |
1710 | |
1711 | /* |
1712 | * Step through all entries in this region |
1713 | */ |
1714 | |
1715 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { |
1716 | vm_map_entry_t next; |
1717 | |
1718 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); |
1719 | |
1720 | /* |
1721 | * If the entry is in transition, we must wait |
1722 | * for it to exit that state. It could be clipped |
1723 | * while we leave the map unlocked. |
1724 | */ |
1725 | if(entry->in_transition) { |
1726 | /* |
1727 | * Say that we are waiting, and wait for entry. |
1728 | */ |
1729 | entry->needs_wakeup = TRUE((boolean_t) 1); |
1730 | vm_map_entry_wait(map, FALSE)({ assert_wait((event_t)&(map)->hdr, ((boolean_t) 0)); lock_done(&(map)->lock); thread_block((void (*)()) 0) ; }); |
1731 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1732 | |
1733 | /* |
1734 | * The entry could have been clipped or it |
1735 | * may not exist anymore. look it up again. |
1736 | */ |
1737 | if(!vm_map_lookup_entry(map, start, &entry)) { |
1738 | entry = entry->vme_nextlinks.next; |
1739 | } |
1740 | continue; |
1741 | } |
1742 | |
1743 | next = entry->vme_nextlinks.next; |
1744 | |
1745 | vm_map_entry_delete(map, entry); |
1746 | entry = next; |
1747 | } |
1748 | |
1749 | if (map->wait_for_space) |
1750 | thread_wakeup((event_t) map)thread_wakeup_prim(((event_t) map), ((boolean_t) 0), 0); |
1751 | |
1752 | return(KERN_SUCCESS0); |
1753 | } |
1754 | |
1755 | /* |
1756 | * vm_map_remove: |
1757 | * |
1758 | * Remove the given address range from the target map. |
1759 | * This is the exported form of vm_map_delete. |
1760 | */ |
1761 | kern_return_t vm_map_remove( |
1762 | vm_map_t map, |
1763 | vm_offset_t start, |
1764 | vm_offset_t end) |
1765 | { |
1766 | kern_return_t result; |
1767 | |
1768 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
1769 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; |
1770 | result = vm_map_delete(map, start, end); |
1771 | vm_map_unlock(map)lock_done(&(map)->lock); |
1772 | |
1773 | return(result); |
1774 | } |
1775 | |
1776 | |
1777 | /* |
1778 | * vm_map_copy_steal_pages: |
1779 | * |
1780 | * Steal all the pages from a vm_map_copy page_list by copying ones |
1781 | * that have not already been stolen. |
1782 | */ |
1783 | void |
1784 | vm_map_copy_steal_pages(vm_map_copy_t copy) |
1785 | { |
1786 | vm_page_t m, new_m; |
1787 | int i; |
1788 | vm_object_t object; |
1789 | |
1790 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { |
1791 | |
1792 | /* |
1793 | * If the page is not tabled, then it's already stolen. |
1794 | */ |
1795 | m = copy->cpy_page_listc_u.c_p.page_list[i]; |
1796 | if (!m->tabled) |
1797 | continue; |
1798 | |
1799 | /* |
1800 | * Page was not stolen, get a new |
1801 | * one and do the copy now. |
1802 | */ |
1803 | while ((new_m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0)) { |
1804 | VM_PAGE_WAIT((void(*)()) 0)vm_page_wait((void(*)()) 0); |
1805 | } |
1806 | |
1807 | vm_page_copy(m, new_m); |
1808 | |
1809 | object = m->object; |
1810 | vm_object_lock(object); |
1811 | vm_page_lock_queues(); |
1812 | if (!m->active && !m->inactive) |
1813 | vm_page_activate(m); |
1814 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
1815 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
1816 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1816); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
1817 | vm_object_unlock(object)((void)(&(object)->Lock)); |
1818 | |
1819 | copy->cpy_page_listc_u.c_p.page_list[i] = new_m; |
1820 | } |
1821 | } |
1822 | |
1823 | /* |
1824 | * vm_map_copy_page_discard: |
1825 | * |
1826 | * Get rid of the pages in a page_list copy. If the pages are |
1827 | * stolen, they are freed. If the pages are not stolen, they |
1828 | * are unbusied, and associated state is cleaned up. |
1829 | */ |
1830 | void vm_map_copy_page_discard(vm_map_copy_t copy) |
1831 | { |
1832 | while (copy->cpy_npagesc_u.c_p.npages > 0) { |
1833 | vm_page_t m; |
1834 | |
1835 | if((m = copy->cpy_page_listc_u.c_p.page_list[--(copy->cpy_npagesc_u.c_p.npages)]) != |
1836 | VM_PAGE_NULL((vm_page_t) 0)) { |
1837 | |
1838 | /* |
1839 | * If it's not in the table, then it's |
1840 | * a stolen page that goes back |
1841 | * to the free list. Else it belongs |
1842 | * to some object, and we hold a |
1843 | * paging reference on that object. |
1844 | */ |
1845 | if (!m->tabled) { |
1846 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ((void)(&vm_page_queue_lock)); }); |
1847 | } |
1848 | else { |
1849 | vm_object_t object; |
1850 | |
1851 | object = m->object; |
1852 | |
1853 | vm_object_lock(object); |
1854 | vm_page_lock_queues(); |
1855 | if (!m->active && !m->inactive) |
1856 | vm_page_activate(m); |
1857 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
1858 | |
1859 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
1860 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1860); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
1861 | vm_object_unlock(object)((void)(&(object)->Lock)); |
1862 | } |
1863 | } |
1864 | } |
1865 | } |
1866 | |
1867 | /* |
1868 | * Routine: vm_map_copy_discard |
1869 | * |
1870 | * Description: |
1871 | * Dispose of a map copy object (returned by |
1872 | * vm_map_copyin). |
1873 | */ |
1874 | void |
1875 | vm_map_copy_discard(vm_map_copy_t copy) |
1876 | { |
1877 | free_next_copy: |
1878 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
1879 | return; |
1880 | |
1881 | switch (copy->type) { |
1882 | case VM_MAP_COPY_ENTRY_LIST1: |
1883 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != |
1884 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
1885 | vm_map_entry_t entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
1886 | |
1887 | vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }); |
1888 | vm_object_deallocate(entry->object.vm_object); |
1889 | vm_map_copy_entry_dispose(copy, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)); |
1890 | } |
1891 | break; |
1892 | case VM_MAP_COPY_OBJECT2: |
1893 | vm_object_deallocate(copy->cpy_objectc_u.c_o.object); |
1894 | break; |
1895 | case VM_MAP_COPY_PAGE_LIST3: |
1896 | |
1897 | /* |
1898 | * To clean this up, we have to unbusy all the pages |
1899 | * and release the paging references in their objects. |
1900 | */ |
1901 | if (copy->cpy_npagesc_u.c_p.npages > 0) |
1902 | vm_map_copy_page_discard(copy); |
1903 | |
1904 | /* |
1905 | * If there's a continuation, abort it. The |
1906 | * abort routine releases any storage. |
1907 | */ |
1908 | if (vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
1909 | |
1910 | /* |
1911 | * Special case: recognize |
1912 | * vm_map_copy_discard_cont and optimize |
1913 | * here to avoid tail recursion. |
1914 | */ |
1915 | if (copy->cpy_contc_u.c_p.cont == vm_map_copy_discard_cont) { |
1916 | vm_map_copy_t new_copy; |
1917 | |
1918 | new_copy = (vm_map_copy_t) copy->cpy_cont_argsc_u.c_p.cont_args; |
1919 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
1920 | copy = new_copy; |
1921 | goto free_next_copy; |
1922 | } |
1923 | else { |
1924 | vm_map_copy_abort_cont(copy)({ vm_map_copy_page_discard(copy); (*((copy)->c_u.c_p.cont ))((copy)->c_u.c_p.cont_args, (vm_map_copy_t *) 0); (copy) ->c_u.c_p.cont = (kern_return_t (*)()) 0; (copy)->c_u.c_p .cont_args = (char *) 0; }); |
1925 | } |
1926 | } |
1927 | |
1928 | break; |
1929 | } |
1930 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
1931 | } |
1932 | |
1933 | /* |
1934 | * Routine: vm_map_copy_copy |
1935 | * |
1936 | * Description: |
1937 | * Move the information in a map copy object to |
1938 | * a new map copy object, leaving the old one |
1939 | * empty. |
1940 | * |
1941 | * This is used by kernel routines that need |
1942 | * to look at out-of-line data (in copyin form) |
1943 | * before deciding whether to return SUCCESS. |
1944 | * If the routine returns FAILURE, the original |
1945 | * copy object will be deallocated; therefore, |
1946 | * these routines must make a copy of the copy |
1947 | * object and leave the original empty so that |
1948 | * deallocation will not fail. |
1949 | */ |
1950 | vm_map_copy_t |
1951 | vm_map_copy_copy(vm_map_copy_t copy) |
1952 | { |
1953 | vm_map_copy_t new_copy; |
1954 | |
1955 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
1956 | return VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
1957 | |
1958 | /* |
1959 | * Allocate a new copy object, and copy the information |
1960 | * from the old one into it. |
1961 | */ |
1962 | |
1963 | new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
1964 | *new_copy = *copy; |
1965 | |
1966 | if (copy->type == VM_MAP_COPY_ENTRY_LIST1) { |
1967 | /* |
1968 | * The links in the entry chain must be |
1969 | * changed to point to the new copy object. |
1970 | */ |
1971 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)->vme_prevlinks.prev |
1972 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); |
1973 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)->vme_nextlinks.next |
1974 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); |
1975 | } |
1976 | |
1977 | /* |
1978 | * Change the old copy object into one that contains |
1979 | * nothing to be deallocated. |
1980 | */ |
1981 | copy->type = VM_MAP_COPY_OBJECT2; |
1982 | copy->cpy_objectc_u.c_o.object = VM_OBJECT_NULL((vm_object_t) 0); |
1983 | |
1984 | /* |
1985 | * Return the new object. |
1986 | */ |
1987 | return new_copy; |
1988 | } |
1989 | |
1990 | /* |
1991 | * Routine: vm_map_copy_discard_cont |
1992 | * |
1993 | * Description: |
1994 | * A version of vm_map_copy_discard that can be called |
1995 | * as a continuation from a vm_map_copy page list. |
1996 | */ |
1997 | kern_return_t vm_map_copy_discard_cont( |
1998 | vm_map_copyin_args_t cont_args, |
1999 | vm_map_copy_t *copy_result) /* OUT */ |
2000 | { |
2001 | vm_map_copy_discard((vm_map_copy_t) cont_args); |
2002 | if (copy_result != (vm_map_copy_t *)0) |
2003 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
2004 | return(KERN_SUCCESS0); |
2005 | } |
2006 | |
2007 | /* |
2008 | * Routine: vm_map_copy_overwrite |
2009 | * |
2010 | * Description: |
2011 | * Copy the memory described by the map copy |
2012 | * object (copy; returned by vm_map_copyin) onto |
2013 | * the specified destination region (dst_map, dst_addr). |
2014 | * The destination must be writeable. |
2015 | * |
2016 | * Unlike vm_map_copyout, this routine actually |
2017 | * writes over previously-mapped memory. If the |
2018 | * previous mapping was to a permanent (user-supplied) |
2019 | * memory object, it is preserved. |
2020 | * |
2021 | * The attributes (protection and inheritance) of the |
2022 | * destination region are preserved. |
2023 | * |
2024 | * If successful, consumes the copy object. |
2025 | * Otherwise, the caller is responsible for it. |
2026 | * |
2027 | * Implementation notes: |
2028 | * To overwrite temporary virtual memory, it is |
2029 | * sufficient to remove the previous mapping and insert |
2030 | * the new copy. This replacement is done either on |
2031 | * the whole region (if no permanent virtual memory |
2032 | * objects are embedded in the destination region) or |
2033 | * in individual map entries. |
2034 | * |
2035 | * To overwrite permanent virtual memory, it is |
2036 | * necessary to copy each page, as the external |
2037 | * memory management interface currently does not |
2038 | * provide any optimizations. |
2039 | * |
2040 | * Once a page of permanent memory has been overwritten, |
2041 | * it is impossible to interrupt this function; otherwise, |
2042 | * the call would be neither atomic nor location-independent. |
2043 | * The kernel-state portion of a user thread must be |
2044 | * interruptible. |
2045 | * |
2046 | * It may be expensive to forward all requests that might |
2047 | * overwrite permanent memory (vm_write, vm_copy) to |
2048 | * uninterruptible kernel threads. This routine may be |
2049 | * called by interruptible threads; however, success is |
2050 | * not guaranteed -- if the request cannot be performed |
2051 | * atomically and interruptibly, an error indication is |
2052 | * returned. |
2053 | */ |
2054 | kern_return_t vm_map_copy_overwrite( |
2055 | vm_map_t dst_map, |
2056 | vm_offset_t dst_addr, |
2057 | vm_map_copy_t copy, |
2058 | boolean_t interruptible) |
2059 | { |
2060 | vm_size_t size; |
2061 | vm_offset_t start; |
2062 | vm_map_entry_t tmp_entry; |
2063 | vm_map_entry_t entry; |
2064 | |
2065 | boolean_t contains_permanent_objects = FALSE((boolean_t) 0); |
2066 | |
2067 | interruptible = FALSE((boolean_t) 0); /* XXX */ |
2068 | |
2069 | /* |
2070 | * Check for null copy object. |
2071 | */ |
2072 | |
2073 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) |
2074 | return(KERN_SUCCESS0); |
2075 | |
2076 | /* |
2077 | * Only works for entry lists at the moment. Will |
2078 | * support page lists LATER. |
2079 | */ |
2080 | |
2081 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST)({ if (!(copy->type == 1)) Assert("copy->type == VM_MAP_COPY_ENTRY_LIST" , "../vm/vm_map.c", 2081); }); |
2082 | |
2083 | /* |
2084 | * Currently this routine only handles page-aligned |
2085 | * regions. Eventually, it should handle misalignments |
2086 | * by actually copying pages. |
2087 | */ |
2088 | |
2089 | if (!page_aligned(copy->offset)((((vm_offset_t) (copy->offset)) & ((1 << 12)-1) ) == 0) || |
2090 | !page_aligned(copy->size)((((vm_offset_t) (copy->size)) & ((1 << 12)-1)) == 0) || |
2091 | !page_aligned(dst_addr)((((vm_offset_t) (dst_addr)) & ((1 << 12)-1)) == 0)) |
2092 | return(KERN_INVALID_ARGUMENT4); |
2093 | |
2094 | size = copy->size; |
2095 | |
2096 | if (size == 0) { |
2097 | vm_map_copy_discard(copy); |
2098 | return(KERN_SUCCESS0); |
2099 | } |
2100 | |
2101 | /* |
2102 | * Verify that the destination is all writeable |
2103 | * initially. |
2104 | */ |
2105 | start_pass_1: |
2106 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2107 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { |
2108 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2109 | return(KERN_INVALID_ADDRESS1); |
2110 | } |
2111 | vm_map_clip_start(dst_map, tmp_entry, dst_addr)({ if ((dst_addr) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(dst_addr)); }); |
2112 | for (entry = tmp_entry;;) { |
2113 | vm_size_t sub_size = (entry->vme_endlinks.end - entry->vme_startlinks.start); |
2114 | vm_map_entry_t next = entry->vme_nextlinks.next; |
2115 | |
2116 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
2117 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2118 | return(KERN_PROTECTION_FAILURE2); |
2119 | } |
2120 | |
2121 | /* |
2122 | * If the entry is in transition, we must wait |
2123 | * for it to exit that state. Anything could happen |
2124 | * when we unlock the map, so start over. |
2125 | */ |
2126 | if (entry->in_transition) { |
2127 | |
2128 | /* |
2129 | * Say that we are waiting, and wait for entry. |
2130 | */ |
2131 | entry->needs_wakeup = TRUE((boolean_t) 1); |
2132 | vm_map_entry_wait(dst_map, FALSE)({ assert_wait((event_t)&(dst_map)->hdr, ((boolean_t) 0 )); lock_done(&(dst_map)->lock); thread_block((void (* )()) 0); }); |
2133 | |
2134 | goto start_pass_1; |
2135 | } |
2136 | |
2137 | if (size <= sub_size) |
2138 | break; |
2139 | |
2140 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2141 | (next->vme_startlinks.start != entry->vme_endlinks.end)) { |
2142 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2143 | return(KERN_INVALID_ADDRESS1); |
2144 | } |
2145 | |
2146 | |
2147 | /* |
2148 | * Check for permanent objects in the destination. |
2149 | */ |
2150 | |
2151 | if ((entry->object.vm_object != VM_OBJECT_NULL((vm_object_t) 0)) && |
2152 | !entry->object.vm_object->temporary) |
2153 | contains_permanent_objects = TRUE((boolean_t) 1); |
2154 | |
2155 | size -= sub_size; |
2156 | entry = next; |
2157 | } |
2158 | |
2159 | /* |
2160 | * If there are permanent objects in the destination, then |
2161 | * the copy cannot be interrupted. |
2162 | */ |
2163 | |
2164 | if (interruptible && contains_permanent_objects) { |
2165 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2166 | return(KERN_FAILURE5); /* XXX */ |
2167 | } |
2168 | |
2169 | /* |
2170 | * XXXO If there are no permanent objects in the destination, |
2171 | * XXXO and the source and destination map entry caches match, |
2172 | * XXXO and the destination map entry is not shared, |
2173 | * XXXO then the map entries can be deleted and replaced |
2174 | * XXXO with those from the copy. The following code is the |
2175 | * XXXO basic idea of what to do, but there are lots of annoying |
2176 | * XXXO little details about getting protection and inheritance |
2177 | * XXXO right. Should add protection, inheritance, and sharing checks |
2178 | * XXXO to the above pass and make sure that no wiring is involved. |
2179 | */ |
2180 | /* |
2181 | * if (!contains_permanent_objects && |
2182 | * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) { |
2183 | * |
2184 | * * |
2185 | * * Run over copy and adjust entries. Steal code |
2186 | * * from vm_map_copyout() to do this. |
2187 | * * |
2188 | * |
2189 | * tmp_entry = tmp_entry->vme_prev; |
2190 | * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size); |
2191 | * vm_map_copy_insert(dst_map, tmp_entry, copy); |
2192 | * |
2193 | * vm_map_unlock(dst_map); |
2194 | * vm_map_copy_discard(copy); |
2195 | * } |
2196 | */ |
2197 | /* |
2198 | * |
2199 | * Make a second pass, overwriting the data |
2200 | * At the beginning of each loop iteration, |
2201 | * the next entry to be overwritten is "tmp_entry" |
2202 | * (initially, the value returned from the lookup above), |
2203 | * and the starting address expected in that entry |
2204 | * is "start". |
2205 | */ |
2206 | |
2207 | start = dst_addr; |
2208 | |
2209 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
2210 | vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2211 | vm_size_t copy_size = (copy_entry->vme_endlinks.end - copy_entry->vme_startlinks.start); |
2212 | vm_object_t object; |
2213 | |
2214 | entry = tmp_entry; |
2215 | size = (entry->vme_endlinks.end - entry->vme_startlinks.start); |
2216 | /* |
2217 | * Make sure that no holes popped up in the |
2218 | * address map, and that the protection is |
2219 | * still valid, in case the map was unlocked |
2220 | * earlier. |
2221 | */ |
2222 | |
2223 | if (entry->vme_startlinks.start != start) { |
2224 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2225 | return(KERN_INVALID_ADDRESS1); |
2226 | } |
2227 | assert(entry != vm_map_to_entry(dst_map))({ if (!(entry != ((struct vm_map_entry *) &(dst_map)-> hdr.links))) Assert("entry != vm_map_to_entry(dst_map)", "../vm/vm_map.c" , 2227); }); |
2228 | |
2229 | /* |
2230 | * Check protection again |
2231 | */ |
2232 | |
2233 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
2234 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2235 | return(KERN_PROTECTION_FAILURE2); |
2236 | } |
2237 | |
2238 | /* |
2239 | * Adjust to source size first |
2240 | */ |
2241 | |
2242 | if (copy_size < size) { |
2243 | vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size)({ if ((entry->links.start + copy_size) < (entry)->links .end) _vm_map_clip_end(&(dst_map)->hdr,(entry),(entry-> links.start + copy_size)); }); |
2244 | size = copy_size; |
2245 | } |
2246 | |
2247 | /* |
2248 | * Adjust to destination size |
2249 | */ |
2250 | |
2251 | if (size < copy_size) { |
2252 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }) |
2253 | copy_entry->vme_start + size)({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }); |
2254 | copy_size = size; |
2255 | } |
2256 | |
2257 | assert((entry->vme_end - entry->vme_start) == size)({ if (!((entry->links.end - entry->links.start) == size )) Assert("(entry->vme_end - entry->vme_start) == size" , "../vm/vm_map.c", 2257); }); |
2258 | assert((tmp_entry->vme_end - tmp_entry->vme_start) == size)({ if (!((tmp_entry->links.end - tmp_entry->links.start ) == size)) Assert("(tmp_entry->vme_end - tmp_entry->vme_start) == size" , "../vm/vm_map.c", 2258); }); |
2259 | assert((copy_entry->vme_end - copy_entry->vme_start) == size)({ if (!((copy_entry->links.end - copy_entry->links.start ) == size)) Assert("(copy_entry->vme_end - copy_entry->vme_start) == size" , "../vm/vm_map.c", 2259); }); |
2260 | |
2261 | /* |
2262 | * If the destination contains temporary unshared memory, |
2263 | * we can perform the copy by throwing it away and |
2264 | * installing the source data. |
2265 | */ |
2266 | |
2267 | object = entry->object.vm_object; |
2268 | if (!entry->is_shared && |
2269 | ((object == VM_OBJECT_NULL((vm_object_t) 0)) || object->temporary)) { |
2270 | vm_object_t old_object = entry->object.vm_object; |
2271 | vm_offset_t old_offset = entry->offset; |
2272 | |
2273 | entry->object = copy_entry->object; |
2274 | entry->offset = copy_entry->offset; |
2275 | entry->needs_copy = copy_entry->needs_copy; |
2276 | entry->wired_count = 0; |
2277 | entry->user_wired_count = 0; |
2278 | |
2279 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); |
2280 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); |
2281 | |
2282 | vm_object_pmap_protect( |
2283 | old_object, |
2284 | old_offset, |
2285 | size, |
2286 | dst_map->pmap, |
2287 | tmp_entry->vme_startlinks.start, |
2288 | VM_PROT_NONE((vm_prot_t) 0x00)); |
2289 | |
2290 | vm_object_deallocate(old_object); |
2291 | |
2292 | /* |
2293 | * Set up for the next iteration. The map |
2294 | * has not been unlocked, so the next |
2295 | * address should be at the end of this |
2296 | * entry, and the next map entry should be |
2297 | * the one following it. |
2298 | */ |
2299 | |
2300 | start = tmp_entry->vme_endlinks.end; |
2301 | tmp_entry = tmp_entry->vme_nextlinks.next; |
2302 | } else { |
2303 | vm_map_version_t version; |
2304 | vm_object_t dst_object = entry->object.vm_object; |
2305 | vm_offset_t dst_offset = entry->offset; |
2306 | kern_return_t r; |
2307 | |
2308 | /* |
2309 | * Take an object reference, and record |
2310 | * the map version information so that the |
2311 | * map can be safely unlocked. |
2312 | */ |
2313 | |
2314 | vm_object_reference(dst_object); |
2315 | |
2316 | version.main_timestamp = dst_map->timestamp; |
2317 | |
2318 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2319 | |
2320 | /* |
2321 | * Copy as much as possible in one pass |
2322 | */ |
2323 | |
2324 | copy_size = size; |
2325 | r = vm_fault_copy( |
2326 | copy_entry->object.vm_object, |
2327 | copy_entry->offset, |
2328 | ©_size, |
2329 | dst_object, |
2330 | dst_offset, |
2331 | dst_map, |
2332 | &version, |
2333 | FALSE((boolean_t) 0) /* XXX interruptible */ ); |
2334 | |
2335 | /* |
2336 | * Release the object reference |
2337 | */ |
2338 | |
2339 | vm_object_deallocate(dst_object); |
2340 | |
2341 | /* |
2342 | * If a hard error occurred, return it now |
2343 | */ |
2344 | |
2345 | if (r != KERN_SUCCESS0) |
2346 | return(r); |
2347 | |
2348 | if (copy_size != 0) { |
2349 | /* |
2350 | * Dispose of the copied region |
2351 | */ |
2352 | |
2353 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }) |
2354 | copy_entry->vme_start + copy_size)({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }); |
2355 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); |
2356 | vm_object_deallocate(copy_entry->object.vm_object); |
2357 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); |
2358 | } |
2359 | |
2360 | /* |
2361 | * Pick up in the destination map where we left off. |
2362 | * |
2363 | * Use the version information to avoid a lookup |
2364 | * in the normal case. |
2365 | */ |
2366 | |
2367 | start += copy_size; |
2368 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2369 | if ((version.main_timestamp + 1) == dst_map->timestamp) { |
2370 | /* We can safely use saved tmp_entry value */ |
2371 | |
2372 | vm_map_clip_end(dst_map, tmp_entry, start)({ if ((start) < (tmp_entry)->links.end) _vm_map_clip_end (&(dst_map)->hdr,(tmp_entry),(start)); }); |
2373 | tmp_entry = tmp_entry->vme_nextlinks.next; |
2374 | } else { |
2375 | /* Must do lookup of tmp_entry */ |
2376 | |
2377 | if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { |
2378 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2379 | return(KERN_INVALID_ADDRESS1); |
2380 | } |
2381 | vm_map_clip_start(dst_map, tmp_entry, start)({ if ((start) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(start)); }); |
2382 | } |
2383 | } |
2384 | |
2385 | } |
2386 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2387 | |
2388 | /* |
2389 | * Throw away the vm_map_copy object |
2390 | */ |
2391 | vm_map_copy_discard(copy); |
2392 | |
2393 | return(KERN_SUCCESS0); |
2394 | } |
2395 | |
2396 | /* |
2397 | * Macro: vm_map_copy_insert |
2398 | * |
2399 | * Description: |
2400 | * Link a copy chain ("copy") into a map at the |
2401 | * specified location (after "where"). |
2402 | * Side effects: |
2403 | * The copy chain is destroyed. |
2404 | * Warning: |
2405 | * The arguments are evaluated multiple times. |
2406 | */ |
2407 | #define vm_map_copy_insert(map, where, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (map)->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2407); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); (((where)->links.next )->links.prev = ((copy)->c_u.hdr.links.prev)) ->links .next = ((where)->links.next); ((where)->links.next = ( (copy)->c_u.hdr.links.next)) ->links.prev = (where); (map )->hdr.nentries += (copy)->c_u.hdr.nentries; kmem_cache_free (&vm_map_copy_cache, (vm_offset_t) copy); }) \ |
2408 | MACRO_BEGIN({ \ |
2409 | struct rbtree_node *node, *tmp; \ |
2410 | rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp)for (node = rbtree_postwalk_deepest(&(copy)->c_u.hdr.tree ), tmp = rbtree_postwalk_unlink(node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink(node)) \ |
2411 | rbtree_insert(&(map)->hdr.tree, node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2412); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }) |
2412 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2412); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); \ |
2413 | (((where)->vme_nextlinks.next)->vme_prevlinks.prev = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)) \ |
2414 | ->vme_nextlinks.next = ((where)->vme_nextlinks.next); \ |
2415 | ((where)->vme_nextlinks.next = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)) \ |
2416 | ->vme_prevlinks.prev = (where); \ |
2417 | (map)->hdr.nentries += (copy)->cpy_hdrc_u.hdr.nentries; \ |
2418 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ |
2419 | MACRO_END}) |
2420 | |
2421 | /* |
2422 | * Routine: vm_map_copyout |
2423 | * |
2424 | * Description: |
2425 | * Copy out a copy chain ("copy") into newly-allocated |
2426 | * space in the destination map. |
2427 | * |
2428 | * If successful, consumes the copy object. |
2429 | * Otherwise, the caller is responsible for it. |
2430 | */ |
2431 | kern_return_t vm_map_copyout( |
2432 | vm_map_t dst_map, |
2433 | vm_offset_t *dst_addr, /* OUT */ |
2434 | vm_map_copy_t copy) |
2435 | { |
2436 | vm_size_t size; |
2437 | vm_size_t adjustment; |
2438 | vm_offset_t start; |
2439 | vm_offset_t vm_copy_start; |
2440 | vm_map_entry_t last; |
2441 | vm_map_entry_t entry; |
2442 | |
2443 | /* |
2444 | * Check for null copy object. |
2445 | */ |
2446 | |
2447 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { |
2448 | *dst_addr = 0; |
2449 | return(KERN_SUCCESS0); |
2450 | } |
2451 | |
2452 | /* |
2453 | * Check for special copy object, created |
2454 | * by vm_map_copyin_object. |
2455 | */ |
2456 | |
2457 | if (copy->type == VM_MAP_COPY_OBJECT2) { |
2458 | vm_object_t object = copy->cpy_objectc_u.c_o.object; |
2459 | vm_size_t offset = copy->offset; |
2460 | vm_size_t tmp_size = copy->size; |
2461 | kern_return_t kr; |
2462 | |
2463 | *dst_addr = 0; |
2464 | kr = vm_map_enter(dst_map, dst_addr, tmp_size, |
2465 | (vm_offset_t) 0, TRUE((boolean_t) 1), |
2466 | object, offset, FALSE((boolean_t) 0), |
2467 | VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), |
2468 | VM_INHERIT_DEFAULT((vm_inherit_t) 1)); |
2469 | if (kr != KERN_SUCCESS0) |
2470 | return(kr); |
2471 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
2472 | return(KERN_SUCCESS0); |
2473 | } |
2474 | |
2475 | if (copy->type == VM_MAP_COPY_PAGE_LIST3) |
2476 | return(vm_map_copyout_page_list(dst_map, dst_addr, copy)); |
2477 | |
2478 | /* |
2479 | * Find space for the data |
2480 | */ |
2481 | |
2482 | vm_copy_start = trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); |
2483 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - vm_copy_start; |
2484 | |
2485 | StartAgain: ; |
2486 | |
2487 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2488 | start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ? |
2489 | vm_map_min(dst_map)((dst_map)->hdr.links.start) : last->vme_endlinks.end; |
2490 | |
2491 | while (TRUE((boolean_t) 1)) { |
2492 | vm_map_entry_t next = last->vme_nextlinks.next; |
2493 | vm_offset_t end = start + size; |
2494 | |
2495 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { |
2496 | if (dst_map->wait_for_space) { |
2497 | if (size <= (dst_map->max_offsethdr.links.end - dst_map->min_offsethdr.links.start)) { |
2498 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); |
2499 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2500 | thread_block((void (*)()) 0); |
2501 | goto StartAgain; |
2502 | } |
2503 | } |
2504 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2505 | printf_once("no more room for vm_map_copyout in %p\n", dst_map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_copyout in %p\n" , dst_map); __once = 1; } }); |
2506 | return(KERN_NO_SPACE3); |
2507 | } |
2508 | |
2509 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2510 | (next->vme_startlinks.start >= end)) |
2511 | break; |
2512 | |
2513 | last = next; |
2514 | start = last->vme_endlinks.end; |
2515 | } |
2516 | |
2517 | /* |
2518 | * Since we're going to just drop the map |
2519 | * entries from the copy into the destination |
2520 | * map, they must come from the same pool. |
2521 | */ |
2522 | |
2523 | if (copy->cpy_hdrc_u.hdr.entries_pageable != dst_map->hdr.entries_pageable) { |
2524 | /* |
2525 | * Mismatches occur when dealing with the default |
2526 | * pager. |
2527 | */ |
2528 | kmem_cache_t old_cache; |
2529 | vm_map_entry_t next, new; |
2530 | |
2531 | /* |
2532 | * Find the cache that the copies were allocated from |
2533 | */ |
2534 | old_cache = (copy->cpy_hdrc_u.hdr.entries_pageable) |
2535 | ? &vm_map_entry_cache |
2536 | : &vm_map_kentry_cache; |
2537 | entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2538 | |
2539 | /* |
2540 | * Reinitialize the copy so that vm_map_copy_entry_link |
2541 | * will work. |
2542 | */ |
2543 | copy->cpy_hdrc_u.hdr.nentries = 0; |
2544 | copy->cpy_hdrc_u.hdr.entries_pageable = dst_map->hdr.entries_pageable; |
2545 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
2546 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = |
2547 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
2548 | |
2549 | /* |
2550 | * Copy each entry. |
2551 | */ |
2552 | while (entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { |
2553 | new = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); |
2554 | vm_map_entry_copy_full(new, entry)(*(new) = *(entry)); |
2555 | vm_map_copy_entry_link(copy,({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2557); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) |
2556 | vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2557); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) |
2557 | new)({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2557); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }); |
2558 | next = entry->vme_nextlinks.next; |
2559 | kmem_cache_free(old_cache, (vm_offset_t) entry); |
2560 | entry = next; |
2561 | } |
2562 | } |
2563 | |
2564 | /* |
2565 | * Adjust the addresses in the copy chain, and |
2566 | * reset the region attributes. |
2567 | */ |
2568 | |
2569 | adjustment = start - vm_copy_start; |
2570 | for (entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); |
2571 | entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
2572 | entry = entry->vme_nextlinks.next) { |
2573 | entry->vme_startlinks.start += adjustment; |
2574 | entry->vme_endlinks.end += adjustment; |
2575 | |
2576 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
2577 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
2578 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
2579 | entry->projected_on = 0; |
2580 | |
2581 | /* |
2582 | * If the entry is now wired, |
2583 | * map the pages into the destination map. |
2584 | */ |
2585 | if (entry->wired_count != 0) { |
2586 | vm_offset_t va; |
2587 | vm_offset_t offset; |
2588 | vm_object_t object; |
2589 | |
2590 | object = entry->object.vm_object; |
2591 | offset = entry->offset; |
2592 | va = entry->vme_startlinks.start; |
2593 | |
2594 | pmap_pageable(dst_map->pmap, |
2595 | entry->vme_startlinks.start, |
2596 | entry->vme_endlinks.end, |
2597 | TRUE((boolean_t) 1)); |
2598 | |
2599 | while (va < entry->vme_endlinks.end) { |
2600 | vm_page_t m; |
2601 | |
2602 | /* |
2603 | * Look up the page in the object. |
2604 | * Assert that the page will be found in the |
2605 | * top object: |
2606 | * either |
2607 | * the object was newly created by |
2608 | * vm_object_copy_slowly, and has |
2609 | * copies of all of the pages from |
2610 | * the source object |
2611 | * or |
2612 | * the object was moved from the old |
2613 | * map entry; because the old map |
2614 | * entry was wired, all of the pages |
2615 | * were in the top-level object. |
2616 | * (XXX not true if we wire pages for |
2617 | * reading) |
2618 | */ |
2619 | vm_object_lock(object); |
2620 | vm_object_paging_begin(object)((object)->paging_in_progress++); |
2621 | |
2622 | m = vm_page_lookup(object, offset); |
2623 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->wire_count == 0 || |
2624 | m->absent) |
2625 | panic("vm_map_copyout: wiring 0x%x", m); |
2626 | |
2627 | m->busy = TRUE((boolean_t) 1); |
2628 | vm_object_unlock(object)((void)(&(object)->Lock)); |
2629 | |
2630 | PMAP_ENTER(dst_map->pmap, va, m,({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2631 | entry->protection, TRUE)({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }); |
2632 | |
2633 | vm_object_lock(object); |
2634 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
2635 | /* the page is wired, so we don't have to activate */ |
2636 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 2636); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); |
2637 | vm_object_unlock(object)((void)(&(object)->Lock)); |
2638 | |
2639 | offset += PAGE_SIZE(1 << 12); |
2640 | va += PAGE_SIZE(1 << 12); |
2641 | } |
2642 | } |
2643 | |
2644 | |
2645 | } |
2646 | |
2647 | /* |
2648 | * Correct the page alignment for the result |
2649 | */ |
2650 | |
2651 | *dst_addr = start + (copy->offset - vm_copy_start); |
2652 | |
2653 | /* |
2654 | * Update the hints and the map size |
2655 | */ |
2656 | |
2657 | if (dst_map->first_free == last) |
2658 | dst_map->first_free = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev); |
2659 | SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); (dst_map)->hint = (((copy)->c_u.hdr.links.prev)); ((void )(&(dst_map)->hint_lock));; |
2660 | |
2661 | dst_map->size += size; |
2662 | |
2663 | /* |
2664 | * Link in the copy |
2665 | */ |
2666 | |
2667 | vm_map_copy_insert(dst_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (dst_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2667 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (dst_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (dst_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); |
2668 | |
2669 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2670 | |
2671 | /* |
2672 | * XXX If wiring_required, call vm_map_pageable |
2673 | */ |
2674 | |
2675 | return(KERN_SUCCESS0); |
2676 | } |
2677 | |
2678 | /* |
2679 | * |
2680 | * vm_map_copyout_page_list: |
2681 | * |
2682 | * Version of vm_map_copyout() for page list vm map copies. |
2683 | * |
2684 | */ |
2685 | kern_return_t vm_map_copyout_page_list( |
2686 | vm_map_t dst_map, |
2687 | vm_offset_t *dst_addr, /* OUT */ |
2688 | vm_map_copy_t copy) |
2689 | { |
2690 | vm_size_t size; |
2691 | vm_offset_t start; |
2692 | vm_offset_t end; |
2693 | vm_offset_t offset; |
2694 | vm_map_entry_t last; |
2695 | vm_object_t object; |
2696 | vm_page_t *page_list, m; |
2697 | vm_map_entry_t entry; |
2698 | vm_offset_t old_last_offset; |
2699 | boolean_t cont_invoked, needs_wakeup = FALSE((boolean_t) 0); |
2700 | kern_return_t result = KERN_SUCCESS0; |
2701 | vm_map_copy_t orig_copy; |
2702 | vm_offset_t dst_offset; |
2703 | boolean_t must_wire; |
2704 | |
2705 | /* |
2706 | * Make sure the pages are stolen, because we are |
2707 | * going to put them in a new object. Assume that |
2708 | * all pages are identical to first in this regard. |
2709 | */ |
2710 | |
2711 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; |
2712 | if ((*page_list)->tabled) |
2713 | vm_map_copy_steal_pages(copy); |
2714 | |
2715 | /* |
2716 | * Find space for the data |
2717 | */ |
2718 | |
2719 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - |
2720 | trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); |
2721 | StartAgain: |
2722 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2723 | must_wire = dst_map->wiring_required; |
2724 | |
2725 | last = dst_map->first_free; |
2726 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) { |
2727 | start = vm_map_min(dst_map)((dst_map)->hdr.links.start); |
2728 | } else { |
2729 | start = last->vme_endlinks.end; |
2730 | } |
2731 | |
2732 | while (TRUE((boolean_t) 1)) { |
2733 | vm_map_entry_t next = last->vme_nextlinks.next; |
2734 | end = start + size; |
2735 | |
2736 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { |
2737 | if (dst_map->wait_for_space) { |
2738 | if (size <= (dst_map->max_offsethdr.links.end - |
2739 | dst_map->min_offsethdr.links.start)) { |
2740 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); |
2741 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2742 | thread_block((void (*)()) 0); |
2743 | goto StartAgain; |
2744 | } |
2745 | } |
2746 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2747 | printf_once("no more room for vm_map_copyout_page_list in %p\n", dst_map)({ static int __once = 0; if (!__once) { db_printf("no more room for vm_map_copyout_page_list in %p\n" , dst_map); __once = 1; } }); |
2748 | return(KERN_NO_SPACE3); |
2749 | } |
2750 | |
2751 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || |
2752 | (next->vme_startlinks.start >= end)) { |
2753 | break; |
2754 | } |
2755 | |
2756 | last = next; |
2757 | start = last->vme_endlinks.end; |
2758 | } |
2759 | |
2760 | /* |
2761 | * See whether we can avoid creating a new entry (and object) by |
2762 | * extending one of our neighbors. [So far, we only attempt to |
2763 | * extend from below.] |
2764 | * |
2765 | * The code path below here is a bit twisted. If any of the |
2766 | * extension checks fails, we branch to create_object. If |
2767 | * it all works, we fall out the bottom and goto insert_pages. |
2768 | */ |
2769 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links) || |
2770 | last->vme_endlinks.end != start || |
2771 | last->is_shared != FALSE((boolean_t) 0) || |
2772 | last->is_sub_map != FALSE((boolean_t) 0) || |
2773 | last->inheritance != VM_INHERIT_DEFAULT((vm_inherit_t) 1) || |
2774 | last->protection != VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)) || |
2775 | last->max_protection != VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) || |
2776 | (must_wire ? (last->wired_count != 1 || |
2777 | last->user_wired_count != 1) : |
2778 | (last->wired_count != 0))) { |
2779 | goto create_object; |
2780 | } |
2781 | |
2782 | /* |
2783 | * If this entry needs an object, make one. |
2784 | */ |
2785 | if (last->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
2786 | object = vm_object_allocate( |
2787 | (vm_size_t)(last->vme_endlinks.end - last->vme_startlinks.start + size)); |
2788 | last->object.vm_object = object; |
2789 | last->offset = 0; |
2790 | vm_object_lock(object); |
2791 | } |
2792 | else { |
2793 | vm_offset_t prev_offset = last->offset; |
2794 | vm_size_t prev_size = start - last->vme_startlinks.start; |
2795 | vm_size_t new_size; |
2796 | |
2797 | /* |
2798 | * This is basically vm_object_coalesce. |
2799 | */ |
2800 | |
2801 | object = last->object.vm_object; |
2802 | vm_object_lock(object); |
2803 | |
2804 | /* |
2805 | * Try to collapse the object first |
2806 | */ |
2807 | vm_object_collapse(object); |
2808 | |
2809 | /* |
2810 | * Can't coalesce if pages not mapped to |
2811 | * last may be in use anyway: |
2812 | * . more than one reference |
2813 | * . paged out |
2814 | * . shadows another object |
2815 | * . has a copy elsewhere |
2816 | * . paging references (pages might be in page-list) |
2817 | */ |
2818 | |
2819 | if ((object->ref_count > 1) || |
2820 | object->pager_created || |
2821 | (object->shadow != VM_OBJECT_NULL((vm_object_t) 0)) || |
2822 | (object->copy != VM_OBJECT_NULL((vm_object_t) 0)) || |
2823 | (object->paging_in_progress != 0)) { |
2824 | vm_object_unlock(object)((void)(&(object)->Lock)); |
2825 | goto create_object; |
2826 | } |
2827 | |
2828 | /* |
2829 | * Extend the object if necessary. Don't have to call |
2830 | * vm_object_page_remove because the pages aren't mapped, |
2831 | * and vm_page_replace will free up any old ones it encounters. |
2832 | */ |
2833 | new_size = prev_offset + prev_size + size; |
2834 | if (new_size > object->size) |
2835 | object->size = new_size; |
2836 | } |
2837 | |
2838 | /* |
2839 | * Coalesced the two objects - can extend |
2840 | * the previous map entry to include the |
2841 | * new range. |
2842 | */ |
2843 | dst_map->size += size; |
2844 | last->vme_endlinks.end = end; |
2845 | |
2846 | SAVE_HINT(dst_map, last); (dst_map)->hint = (last); ((void)(&(dst_map)->hint_lock ));; |
2847 | |
2848 | goto insert_pages; |
2849 | |
2850 | create_object: |
2851 | |
2852 | /* |
2853 | * Create object |
2854 | */ |
2855 | object = vm_object_allocate(size); |
2856 | |
2857 | /* |
2858 | * Create entry |
2859 | */ |
2860 | |
2861 | entry = vm_map_entry_create(dst_map)_vm_map_entry_create(&(dst_map)->hdr); |
2862 | |
2863 | entry->object.vm_object = object; |
2864 | entry->offset = 0; |
2865 | |
2866 | entry->is_shared = FALSE((boolean_t) 0); |
2867 | entry->is_sub_map = FALSE((boolean_t) 0); |
2868 | entry->needs_copy = FALSE((boolean_t) 0); |
2869 | |
2870 | if (must_wire) { |
2871 | entry->wired_count = 1; |
2872 | entry->user_wired_count = 1; |
2873 | } else { |
2874 | entry->wired_count = 0; |
2875 | entry->user_wired_count = 0; |
2876 | } |
2877 | |
2878 | entry->in_transition = TRUE((boolean_t) 1); |
2879 | entry->needs_wakeup = FALSE((boolean_t) 0); |
2880 | |
2881 | entry->vme_startlinks.start = start; |
2882 | entry->vme_endlinks.end = start + size; |
2883 | |
2884 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); |
2885 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); |
2886 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); |
2887 | entry->projected_on = 0; |
2888 | |
2889 | vm_object_lock(object); |
2890 | |
2891 | /* |
2892 | * Update the hints and the map size |
2893 | */ |
2894 | if (dst_map->first_free == last) { |
2895 | dst_map->first_free = entry; |
2896 | } |
2897 | SAVE_HINT(dst_map, entry); (dst_map)->hint = (entry); ((void)(&(dst_map)->hint_lock ));; |
2898 | dst_map->size += size; |
2899 | |
2900 | /* |
2901 | * Link in the entry |
2902 | */ |
2903 | vm_map_entry_link(dst_map, last, entry)({ (&(dst_map)->hdr)->nentries++; (entry)->links .prev = (last); (entry)->links.next = (last)->links.next ; (entry)->links.prev->links.next = (entry)->links.next ->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(dst_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2903); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(dst_map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }); |
2904 | last = entry; |
2905 | |
2906 | /* |
2907 | * Transfer pages into new object. |
2908 | * Scan page list in vm_map_copy. |
2909 | */ |
2910 | insert_pages: |
2911 | dst_offset = copy->offset & PAGE_MASK((1 << 12)-1); |
2912 | cont_invoked = FALSE((boolean_t) 0); |
2913 | orig_copy = copy; |
2914 | last->in_transition = TRUE((boolean_t) 1); |
2915 | old_last_offset = last->offset |
2916 | + (start - last->vme_startlinks.start); |
2917 | |
2918 | vm_page_lock_queues(); |
2919 | |
2920 | for (offset = 0; offset < size; offset += PAGE_SIZE(1 << 12)) { |
2921 | m = *page_list; |
2922 | assert(m && !m->tabled)({ if (!(m && !m->tabled)) Assert("m && !m->tabled" , "../vm/vm_map.c", 2922); }); |
2923 | |
2924 | /* |
2925 | * Must clear busy bit in page before inserting it. |
2926 | * Ok to skip wakeup logic because nobody else |
2927 | * can possibly know about this page. |
2928 | * The page is dirty in its new object. |
2929 | */ |
2930 | |
2931 | assert(!m->wanted)({ if (!(!m->wanted)) Assert("!m->wanted", "../vm/vm_map.c" , 2931); }); |
2932 | |
2933 | m->busy = FALSE((boolean_t) 0); |
2934 | m->dirty = TRUE((boolean_t) 1); |
2935 | vm_page_replace(m, object, old_last_offset + offset); |
2936 | if (must_wire) { |
2937 | vm_page_wire(m); |
2938 | PMAP_ENTER(dst_map->pmap,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2939 | last->vme_start + m->offset - last->offset,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) |
2940 | m, last->protection, TRUE)({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }); |
2941 | } else { |
2942 | vm_page_activate(m); |
2943 | } |
2944 | |
2945 | *page_list++ = VM_PAGE_NULL((vm_page_t) 0); |
2946 | if (--(copy->cpy_npagesc_u.c_p.npages) == 0 && |
2947 | vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
2948 | vm_map_copy_t new_copy; |
2949 | |
2950 | /* |
2951 | * Ok to unlock map because entry is |
2952 | * marked in_transition. |
2953 | */ |
2954 | cont_invoked = TRUE((boolean_t) 1); |
2955 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
2956 | vm_object_unlock(object)((void)(&(object)->Lock)); |
2957 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
2958 | vm_map_copy_invoke_cont(copy, &new_copy, &result)({ vm_map_copy_page_discard(copy); *&result = (*((copy)-> c_u.c_p.cont))((copy)->c_u.c_p.cont_args, &new_copy); ( copy)->c_u.c_p.cont = (kern_return_t (*)()) 0; }); |
2959 | |
2960 | if (result == KERN_SUCCESS0) { |
2961 | |
2962 | /* |
2963 | * If we got back a copy with real pages, |
2964 | * steal them now. Either all of the |
2965 | * pages in the list are tabled or none |
2966 | * of them are; mixtures are not possible. |
2967 | * |
2968 | * Save original copy for consume on |
2969 | * success logic at end of routine. |
2970 | */ |
2971 | if (copy != orig_copy) |
2972 | vm_map_copy_discard(copy); |
2973 | |
2974 | if ((copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { |
2975 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; |
2976 | if ((*page_list)->tabled) |
2977 | vm_map_copy_steal_pages(copy); |
2978 | } |
2979 | } |
2980 | else { |
2981 | /* |
2982 | * Continuation failed. |
2983 | */ |
2984 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2985 | goto error; |
2986 | } |
2987 | |
2988 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); |
2989 | vm_object_lock(object); |
2990 | vm_page_lock_queues(); |
2991 | } |
2992 | } |
2993 | |
2994 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
2995 | vm_object_unlock(object)((void)(&(object)->Lock)); |
2996 | |
2997 | *dst_addr = start + dst_offset; |
2998 | |
2999 | /* |
3000 | * Clear the in transition bits. This is easy if we |
3001 | * didn't have a continuation. |
3002 | */ |
3003 | error: |
3004 | if (!cont_invoked) { |
3005 | /* |
3006 | * We didn't unlock the map, so nobody could |
3007 | * be waiting. |
3008 | */ |
3009 | last->in_transition = FALSE((boolean_t) 0); |
3010 | assert(!last->needs_wakeup)({ if (!(!last->needs_wakeup)) Assert("!last->needs_wakeup" , "../vm/vm_map.c", 3010); }); |
3011 | needs_wakeup = FALSE((boolean_t) 0); |
3012 | } |
3013 | else { |
3014 | if (!vm_map_lookup_entry(dst_map, start, &entry)) |
3015 | panic("vm_map_copyout_page_list: missing entry"); |
3016 | |
3017 | /* |
3018 | * Clear transition bit for all constituent entries that |
3019 | * were in the original entry. Also check for waiters. |
3020 | */ |
3021 | while((entry != vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) && |
3022 | (entry->vme_startlinks.start < end)) { |
3023 | assert(entry->in_transition)({ if (!(entry->in_transition)) Assert("entry->in_transition" , "../vm/vm_map.c", 3023); }); |
3024 | entry->in_transition = FALSE((boolean_t) 0); |
3025 | if(entry->needs_wakeup) { |
3026 | entry->needs_wakeup = FALSE((boolean_t) 0); |
3027 | needs_wakeup = TRUE((boolean_t) 1); |
3028 | } |
3029 | entry = entry->vme_nextlinks.next; |
3030 | } |
3031 | } |
3032 | |
3033 | if (result != KERN_SUCCESS0) |
3034 | vm_map_delete(dst_map, start, end); |
3035 | |
3036 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); |
3037 | |
3038 | if (needs_wakeup) |
3039 | vm_map_entry_wakeup(dst_map)thread_wakeup_prim(((event_t)&(dst_map)->hdr), ((boolean_t ) 0), 0); |
3040 | |
3041 | /* |
3042 | * Consume on success logic. |
3043 | */ |
3044 | if (copy != orig_copy) { |
3045 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); |
3046 | } |
3047 | if (result == KERN_SUCCESS0) { |
3048 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); |
3049 | } |
3050 | |
3051 | return(result); |
3052 | } |
3053 | |
3054 | /* |
3055 | * Routine: vm_map_copyin |
3056 | * |
3057 | * Description: |
3058 | * Copy the specified region (src_addr, len) from the |
3059 | * source address space (src_map), possibly removing |
3060 | * the region from the source address space (src_destroy). |
3061 | * |
3062 | * Returns: |
3063 | * A vm_map_copy_t object (copy_result), suitable for |
3064 | * insertion into another address space (using vm_map_copyout), |
3065 | * copying over another address space region (using |
3066 | * vm_map_copy_overwrite). If the copy is unused, it |
3067 | * should be destroyed (using vm_map_copy_discard). |
3068 | * |
3069 | * In/out conditions: |
3070 | * The source map should not be locked on entry. |
3071 | */ |
3072 | kern_return_t vm_map_copyin( |
3073 | vm_map_t src_map, |
3074 | vm_offset_t src_addr, |
3075 | vm_size_t len, |
3076 | boolean_t src_destroy, |
3077 | vm_map_copy_t *copy_result) /* OUT */ |
3078 | { |
3079 | vm_map_entry_t tmp_entry; /* Result of last map lookup -- |
3080 | * in multi-level lookup, this |
3081 | * entry contains the actual |
3082 | * vm_object/offset. |
3083 | */ |
3084 | |
3085 | vm_offset_t src_start; /* Start of current entry -- |
3086 | * where copy is taking place now |
3087 | */ |
3088 | vm_offset_t src_end; /* End of entire region to be |
3089 | * copied */ |
3090 | |
3091 | vm_map_copy_t copy; /* Resulting copy */ |
3092 | |
3093 | /* |
3094 | * Check for copies of zero bytes. |
3095 | */ |
3096 | |
3097 | if (len == 0) { |
3098 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3099 | return(KERN_SUCCESS0); |
3100 | } |
3101 | |
3102 | /* |
3103 | * Compute start and end of region |
3104 | */ |
3105 | |
3106 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3107 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
3108 | |
3109 | /* |
3110 | * Check that the end address doesn't overflow |
3111 | */ |
3112 | |
3113 | if (src_end <= src_start) |
3114 | if ((src_end < src_start) || (src_start != 0)) |
3115 | return(KERN_INVALID_ADDRESS1); |
3116 | |
3117 | /* |
3118 | * Allocate a header element for the list. |
3119 | * |
3120 | * Use the start and end in the header to |
3121 | * remember the endpoints prior to rounding. |
3122 | */ |
3123 | |
3124 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3125 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
3126 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); |
3127 | copy->type = VM_MAP_COPY_ENTRY_LIST1; |
3128 | copy->cpy_hdrc_u.hdr.nentries = 0; |
3129 | copy->cpy_hdrc_u.hdr.entries_pageable = TRUE((boolean_t) 1); |
3130 | rbtree_init(©->cpy_hdrc_u.hdr.tree); |
3131 | |
3132 | copy->offset = src_addr; |
3133 | copy->size = len; |
3134 | |
3135 | #define RETURN(x) \ |
3136 | MACRO_BEGIN({ \ |
3137 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); \ |
3138 | vm_map_copy_discard(copy); \ |
3139 | MACRO_RETURNif (((boolean_t) 1)) return(x); \ |
3140 | MACRO_END}) |
3141 | |
3142 | /* |
3143 | * Find the beginning of the region. |
3144 | */ |
3145 | |
3146 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3147 | |
3148 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) |
3149 | RETURN(KERN_INVALID_ADDRESS1); |
3150 | vm_map_clip_start(src_map, tmp_entry, src_start)({ if ((src_start) > (tmp_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(tmp_entry),(src_start)); }); |
3151 | |
3152 | /* |
3153 | * Go through entries until we get to the end. |
3154 | */ |
3155 | |
3156 | while (TRUE((boolean_t) 1)) { |
3157 | vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ |
3158 | vm_size_t src_size; /* Size of source |
3159 | * map entry (in both |
3160 | * maps) |
3161 | */ |
3162 | |
3163 | vm_object_t src_object; /* Object to copy */ |
3164 | vm_offset_t src_offset; |
3165 | |
3166 | boolean_t src_needs_copy; /* Should source map |
3167 | * be made read-only |
3168 | * for copy-on-write? |
3169 | */ |
3170 | |
3171 | vm_map_entry_t new_entry; /* Map entry for copy */ |
3172 | boolean_t new_entry_needs_copy; /* Will new entry be COW? */ |
3173 | |
3174 | boolean_t was_wired; /* Was source wired? */ |
3175 | vm_map_version_t version; /* Version before locks |
3176 | * dropped to make copy |
3177 | */ |
3178 | |
3179 | /* |
3180 | * Verify that the region can be read. |
3181 | */ |
3182 | |
3183 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) |
3184 | RETURN(KERN_PROTECTION_FAILURE2); |
3185 | |
3186 | /* |
3187 | * Clip against the endpoints of the entire region. |
3188 | */ |
3189 | |
3190 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); |
3191 | |
3192 | src_size = src_entry->vme_endlinks.end - src_start; |
3193 | src_object = src_entry->object.vm_object; |
3194 | src_offset = src_entry->offset; |
3195 | was_wired = (src_entry->wired_count != 0); |
3196 | |
3197 | /* |
3198 | * Create a new address map entry to |
3199 | * hold the result. Fill in the fields from |
3200 | * the appropriate source entries. |
3201 | */ |
3202 | |
3203 | new_entry = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); |
3204 | vm_map_entry_copy(new_entry, src_entry)({ *(new_entry) = *(src_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
3205 | |
3206 | /* |
3207 | * Attempt non-blocking copy-on-write optimizations. |
3208 | */ |
3209 | |
3210 | if (src_destroy && |
3211 | (src_object == VM_OBJECT_NULL((vm_object_t) 0) || |
3212 | (src_object->temporary && !src_object->use_shared_copy))) |
3213 | { |
3214 | /* |
3215 | * If we are destroying the source, and the object |
3216 | * is temporary, and not shared writable, |
3217 | * we can move the object reference |
3218 | * from the source to the copy. The copy is |
3219 | * copy-on-write only if the source is. |
3220 | * We make another reference to the object, because |
3221 | * destroying the source entry will deallocate it. |
3222 | */ |
3223 | vm_object_reference(src_object); |
3224 | |
3225 | /* |
3226 | * Copy is always unwired. vm_map_copy_entry |
3227 | * set its wired count to zero. |
3228 | */ |
3229 | |
3230 | goto CopySuccessful; |
3231 | } |
3232 | |
3233 | if (!was_wired && |
3234 | vm_object_copy_temporary( |
3235 | &new_entry->object.vm_object, |
3236 | &new_entry->offset, |
3237 | &src_needs_copy, |
3238 | &new_entry_needs_copy)) { |
3239 | |
3240 | new_entry->needs_copy = new_entry_needs_copy; |
3241 | |
3242 | /* |
3243 | * Handle copy-on-write obligations |
3244 | */ |
3245 | |
3246 | if (src_needs_copy && !tmp_entry->needs_copy) { |
3247 | vm_object_pmap_protect( |
3248 | src_object, |
3249 | src_offset, |
3250 | src_size, |
3251 | (src_entry->is_shared ? PMAP_NULL((pmap_t) 0) |
3252 | : src_map->pmap), |
3253 | src_entry->vme_startlinks.start, |
3254 | src_entry->protection & |
3255 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
3256 | |
3257 | tmp_entry->needs_copy = TRUE((boolean_t) 1); |
3258 | } |
3259 | |
3260 | /* |
3261 | * The map has never been unlocked, so it's safe to |
3262 | * move to the next entry rather than doing another |
3263 | * lookup. |
3264 | */ |
3265 | |
3266 | goto CopySuccessful; |
3267 | } |
3268 | |
3269 | new_entry->needs_copy = FALSE((boolean_t) 0); |
3270 | |
3271 | /* |
3272 | * Take an object reference, so that we may |
3273 | * release the map lock(s). |
3274 | */ |
3275 | |
3276 | assert(src_object != VM_OBJECT_NULL)({ if (!(src_object != ((vm_object_t) 0))) Assert("src_object != VM_OBJECT_NULL" , "../vm/vm_map.c", 3276); }); |
3277 | vm_object_reference(src_object); |
3278 | |
3279 | /* |
3280 | * Record the timestamp for later verification. |
3281 | * Unlock the map. |
3282 | */ |
3283 | |
3284 | version.main_timestamp = src_map->timestamp; |
3285 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3286 | |
3287 | /* |
3288 | * Perform the copy |
3289 | */ |
3290 | |
3291 | if (was_wired) { |
3292 | vm_object_lock(src_object); |
3293 | (void) vm_object_copy_slowly( |
3294 | src_object, |
3295 | src_offset, |
3296 | src_size, |
3297 | FALSE((boolean_t) 0), |
3298 | &new_entry->object.vm_object); |
3299 | new_entry->offset = 0; |
3300 | new_entry->needs_copy = FALSE((boolean_t) 0); |
3301 | } else { |
3302 | kern_return_t result; |
3303 | |
3304 | result = vm_object_copy_strategically(src_object, |
3305 | src_offset, |
3306 | src_size, |
3307 | &new_entry->object.vm_object, |
3308 | &new_entry->offset, |
3309 | &new_entry_needs_copy); |
3310 | |
3311 | new_entry->needs_copy = new_entry_needs_copy; |
3312 | |
3313 | |
3314 | if (result != KERN_SUCCESS0) { |
3315 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3316 | |
3317 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3318 | RETURN(result); |
3319 | } |
3320 | |
3321 | } |
3322 | |
3323 | /* |
3324 | * Throw away the extra reference |
3325 | */ |
3326 | |
3327 | vm_object_deallocate(src_object); |
3328 | |
3329 | /* |
3330 | * Verify that the map has not substantially |
3331 | * changed while the copy was being made. |
3332 | */ |
3333 | |
3334 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); /* Increments timestamp once! */ |
3335 | |
3336 | if ((version.main_timestamp + 1) == src_map->timestamp) |
3337 | goto CopySuccessful; |
3338 | |
3339 | /* |
3340 | * Simple version comparison failed. |
3341 | * |
3342 | * Retry the lookup and verify that the |
3343 | * same object/offset are still present. |
3344 | * |
3345 | * [Note: a memory manager that colludes with |
3346 | * the calling task can detect that we have |
3347 | * cheated. While the map was unlocked, the |
3348 | * mapping could have been changed and restored.] |
3349 | */ |
3350 | |
3351 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { |
3352 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3353 | RETURN(KERN_INVALID_ADDRESS1); |
3354 | } |
3355 | |
3356 | src_entry = tmp_entry; |
3357 | vm_map_clip_start(src_map, src_entry, src_start)({ if ((src_start) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(src_start)); }); |
3358 | |
3359 | if ((src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01)) == VM_PROT_NONE((vm_prot_t) 0x00)) |
3360 | goto VerificationFailed; |
3361 | |
3362 | if (src_entry->vme_endlinks.end < new_entry->vme_endlinks.end) |
3363 | src_size = (new_entry->vme_endlinks.end = src_entry->vme_endlinks.end) - src_start; |
Value stored to 'src_size' is never read | |
3364 | |
3365 | if ((src_entry->object.vm_object != src_object) || |
3366 | (src_entry->offset != src_offset) ) { |
3367 | |
3368 | /* |
3369 | * Verification failed. |
3370 | * |
3371 | * Start over with this top-level entry. |
3372 | */ |
3373 | |
3374 | VerificationFailed: ; |
3375 | |
3376 | vm_object_deallocate(new_entry->object.vm_object); |
3377 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); |
3378 | tmp_entry = src_entry; |
3379 | continue; |
3380 | } |
3381 | |
3382 | /* |
3383 | * Verification succeeded. |
3384 | */ |
3385 | |
3386 | CopySuccessful: ; |
3387 | |
3388 | /* |
3389 | * Link in the new copy entry. |
3390 | */ |
3391 | |
3392 | vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3393); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }) |
3393 | new_entry)({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3393); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }); |
3394 | |
3395 | /* |
3396 | * Determine whether the entire region |
3397 | * has been copied. |
3398 | */ |
3399 | src_start = new_entry->vme_endlinks.end; |
3400 | if ((src_start >= src_end) && (src_end != 0)) |
3401 | break; |
3402 | |
3403 | /* |
3404 | * Verify that there are no gaps in the region |
3405 | */ |
3406 | |
3407 | tmp_entry = src_entry->vme_nextlinks.next; |
3408 | if (tmp_entry->vme_startlinks.start != src_start) |
3409 | RETURN(KERN_INVALID_ADDRESS1); |
3410 | } |
3411 | |
3412 | /* |
3413 | * If the source should be destroyed, do it now, since the |
3414 | * copy was successful. |
3415 | */ |
3416 | if (src_destroy) |
3417 | (void) vm_map_delete(src_map, trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))), src_end); |
3418 | |
3419 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3420 | |
3421 | *copy_result = copy; |
3422 | return(KERN_SUCCESS0); |
3423 | |
3424 | #undef RETURN |
3425 | } |
3426 | |
3427 | /* |
3428 | * vm_map_copyin_object: |
3429 | * |
3430 | * Create a copy object from an object. |
3431 | * Our caller donates an object reference. |
3432 | */ |
3433 | |
3434 | kern_return_t vm_map_copyin_object( |
3435 | vm_object_t object, |
3436 | vm_offset_t offset, /* offset of region in object */ |
3437 | vm_size_t size, /* size of region in object */ |
3438 | vm_map_copy_t *copy_result) /* OUT */ |
3439 | { |
3440 | vm_map_copy_t copy; /* Resulting copy */ |
3441 | |
3442 | /* |
3443 | * We drop the object into a special copy object |
3444 | * that contains the object directly. These copy objects |
3445 | * are distinguished by entries_pageable == FALSE |
3446 | * and null links. |
3447 | */ |
3448 | |
3449 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3450 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = |
3451 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = VM_MAP_ENTRY_NULL((vm_map_entry_t) 0); |
3452 | copy->type = VM_MAP_COPY_OBJECT2; |
3453 | copy->cpy_objectc_u.c_o.object = object; |
3454 | copy->offset = offset; |
3455 | copy->size = size; |
3456 | |
3457 | *copy_result = copy; |
3458 | return(KERN_SUCCESS0); |
3459 | } |
3460 | |
3461 | /* |
3462 | * vm_map_copyin_page_list_cont: |
3463 | * |
3464 | * Continuation routine for vm_map_copyin_page_list. |
3465 | * |
3466 | * If vm_map_copyin_page_list can't fit the entire vm range |
3467 | * into a single page list object, it creates a continuation. |
3468 | * When the target of the operation has used the pages in the |
3469 | * initial page list, it invokes the continuation, which calls |
3470 | * this routine. If an error happens, the continuation is aborted |
3471 | * (abort arg to this routine is TRUE). To avoid deadlocks, the |
3472 | * pages are discarded from the initial page list before invoking |
3473 | * the continuation. |
3474 | * |
3475 | * NOTE: This is not the same sort of continuation used by |
3476 | * the scheduler. |
3477 | */ |
3478 | |
3479 | kern_return_t vm_map_copyin_page_list_cont( |
3480 | vm_map_copyin_args_t cont_args, |
3481 | vm_map_copy_t *copy_result) /* OUT */ |
3482 | { |
3483 | kern_return_t result = 0; /* '=0' to quiet gcc warnings */ |
3484 | boolean_t do_abort, src_destroy, src_destroy_only; |
3485 | |
3486 | /* |
3487 | * Check for cases that only require memory destruction. |
3488 | */ |
3489 | do_abort = (copy_result == (vm_map_copy_t *) 0); |
3490 | src_destroy = (cont_args->destroy_len != (vm_size_t) 0); |
3491 | src_destroy_only = (cont_args->src_len == (vm_size_t) 0); |
3492 | |
3493 | if (do_abort || src_destroy_only) { |
3494 | if (src_destroy) |
3495 | result = vm_map_remove(cont_args->map, |
3496 | cont_args->destroy_addr, |
3497 | cont_args->destroy_addr + cont_args->destroy_len); |
3498 | if (!do_abort) |
3499 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3500 | } |
3501 | else { |
3502 | result = vm_map_copyin_page_list(cont_args->map, |
3503 | cont_args->src_addr, cont_args->src_len, src_destroy, |
3504 | cont_args->steal_pages, copy_result, TRUE((boolean_t) 1)); |
3505 | |
3506 | if (src_destroy && !cont_args->steal_pages && |
3507 | vm_map_copy_has_cont(*copy_result)(((*copy_result)->c_u.c_p.cont) != (kern_return_t (*)()) 0 )) { |
3508 | vm_map_copyin_args_t new_args; |
3509 | /* |
3510 | * Transfer old destroy info. |
3511 | */ |
3512 | new_args = (vm_map_copyin_args_t) |
3513 | (*copy_result)->cpy_cont_argsc_u.c_p.cont_args; |
3514 | new_args->destroy_addr = cont_args->destroy_addr; |
3515 | new_args->destroy_len = cont_args->destroy_len; |
3516 | } |
3517 | } |
3518 | |
3519 | vm_map_deallocate(cont_args->map); |
3520 | kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t)); |
3521 | |
3522 | return(result); |
3523 | } |
3524 | |
3525 | /* |
3526 | * vm_map_copyin_page_list: |
3527 | * |
3528 | * This is a variant of vm_map_copyin that copies in a list of pages. |
3529 | * If steal_pages is TRUE, the pages are only in the returned list. |
3530 | * If steal_pages is FALSE, the pages are busy and still in their |
3531 | * objects. A continuation may be returned if not all the pages fit: |
3532 | * the recipient of this copy_result must be prepared to deal with it. |
3533 | */ |
3534 | |
3535 | kern_return_t vm_map_copyin_page_list( |
3536 | vm_map_t src_map, |
3537 | vm_offset_t src_addr, |
3538 | vm_size_t len, |
3539 | boolean_t src_destroy, |
3540 | boolean_t steal_pages, |
3541 | vm_map_copy_t *copy_result, /* OUT */ |
3542 | boolean_t is_cont) |
3543 | { |
3544 | vm_map_entry_t src_entry; |
3545 | vm_page_t m; |
3546 | vm_offset_t src_start; |
3547 | vm_offset_t src_end; |
3548 | vm_size_t src_size; |
3549 | vm_object_t src_object; |
3550 | vm_offset_t src_offset; |
3551 | vm_offset_t src_last_offset; |
3552 | vm_map_copy_t copy; /* Resulting copy */ |
3553 | kern_return_t result = KERN_SUCCESS0; |
3554 | boolean_t need_map_lookup; |
3555 | vm_map_copyin_args_t cont_args; |
3556 | |
3557 | /* |
3558 | * If steal_pages is FALSE, this leaves busy pages in |
3559 | * the object. A continuation must be used if src_destroy |
3560 | * is true in this case (!steal_pages && src_destroy). |
3561 | * |
3562 | * XXX Still have a more general problem of what happens |
3563 | * XXX if the same page occurs twice in a list. Deadlock |
3564 | * XXX can happen if vm_fault_page was called. A |
3565 | * XXX possible solution is to use a continuation if vm_fault_page |
3566 | * XXX is called and we cross a map entry boundary. |
3567 | */ |
3568 | |
3569 | /* |
3570 | * Check for copies of zero bytes. |
3571 | */ |
3572 | |
3573 | if (len == 0) { |
3574 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); |
3575 | return(KERN_SUCCESS0); |
3576 | } |
3577 | |
3578 | /* |
3579 | * Compute start and end of region |
3580 | */ |
3581 | |
3582 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3583 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
3584 | |
3585 | /* |
3586 | * Check that the end address doesn't overflow |
3587 | */ |
3588 | |
3589 | if (src_end <= src_start && (src_end < src_start || src_start != 0)) { |
3590 | return KERN_INVALID_ADDRESS1; |
3591 | } |
3592 | |
3593 | /* |
3594 | * Allocate a header element for the page list. |
3595 | * |
3596 | * Record original offset and size, as caller may not |
3597 | * be page-aligned. |
3598 | */ |
3599 | |
3600 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); |
3601 | copy->type = VM_MAP_COPY_PAGE_LIST3; |
3602 | copy->cpy_npagesc_u.c_p.npages = 0; |
3603 | copy->offset = src_addr; |
3604 | copy->size = len; |
3605 | copy->cpy_contc_u.c_p.cont = ((kern_return_t (*)()) 0); |
3606 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL((vm_map_copyin_args_t) 0); |
3607 | |
3608 | /* |
3609 | * Find the beginning of the region. |
3610 | */ |
3611 | |
3612 | do_map_lookup: |
3613 | |
3614 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3615 | |
3616 | if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) { |
3617 | result = KERN_INVALID_ADDRESS1; |
3618 | goto error; |
3619 | } |
3620 | need_map_lookup = FALSE((boolean_t) 0); |
3621 | |
3622 | /* |
3623 | * Go through entries until we get to the end. |
3624 | */ |
3625 | |
3626 | while (TRUE((boolean_t) 1)) { |
3627 | |
3628 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) { |
3629 | result = KERN_PROTECTION_FAILURE2; |
3630 | goto error; |
3631 | } |
3632 | |
3633 | if (src_end > src_entry->vme_endlinks.end) |
3634 | src_size = src_entry->vme_endlinks.end - src_start; |
3635 | else |
3636 | src_size = src_end - src_start; |
3637 | |
3638 | src_object = src_entry->object.vm_object; |
3639 | src_offset = src_entry->offset + |
3640 | (src_start - src_entry->vme_startlinks.start); |
3641 | |
3642 | /* |
3643 | * If src_object is NULL, allocate it now; |
3644 | * we're going to fault on it shortly. |
3645 | */ |
3646 | if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
3647 | src_object = vm_object_allocate((vm_size_t) |
3648 | src_entry->vme_endlinks.end - |
3649 | src_entry->vme_startlinks.start); |
3650 | src_entry->object.vm_object = src_object; |
3651 | } |
3652 | |
3653 | /* |
3654 | * Iterate over pages. Fault in ones that aren't present. |
3655 | */ |
3656 | src_last_offset = src_offset + src_size; |
3657 | for (; (src_offset < src_last_offset && !need_map_lookup); |
3658 | src_offset += PAGE_SIZE(1 << 12), src_start += PAGE_SIZE(1 << 12)) { |
3659 | |
3660 | if (copy->cpy_npagesc_u.c_p.npages == VM_MAP_COPY_PAGE_LIST_MAX64) { |
3661 | make_continuation: |
3662 | /* |
3663 | * At this point we have the max number of |
3664 | * pages busy for this thread that we're |
3665 | * willing to allow. Stop here and record |
3666 | * arguments for the remainder. Note: |
3667 | * this means that this routine isn't atomic, |
3668 | * but that's the breaks. Note that only |
3669 | * the first vm_map_copy_t that comes back |
3670 | * from this routine has the right offset |
3671 | * and size; those from continuations are |
3672 | * page rounded, and short by the amount |
3673 | * already done. |
3674 | * |
3675 | * Reset src_end so the src_destroy |
3676 | * code at the bottom doesn't do |
3677 | * something stupid. |
3678 | */ |
3679 | |
3680 | cont_args = (vm_map_copyin_args_t) |
3681 | kalloc(sizeof(vm_map_copyin_args_data_t)); |
3682 | cont_args->map = src_map; |
3683 | vm_map_reference(src_map); |
3684 | cont_args->src_addr = src_start; |
3685 | cont_args->src_len = len - (src_start - src_addr); |
3686 | if (src_destroy) { |
3687 | cont_args->destroy_addr = cont_args->src_addr; |
3688 | cont_args->destroy_len = cont_args->src_len; |
3689 | } |
3690 | else { |
3691 | cont_args->destroy_addr = (vm_offset_t) 0; |
3692 | cont_args->destroy_len = (vm_offset_t) 0; |
3693 | } |
3694 | cont_args->steal_pages = steal_pages; |
3695 | |
3696 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; |
3697 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; |
3698 | |
3699 | src_end = src_start; |
3700 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); |
3701 | break; |
3702 | } |
3703 | |
3704 | /* |
3705 | * Try to find the page of data. |
3706 | */ |
3707 | vm_object_lock(src_object); |
3708 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3709 | if (((m = vm_page_lookup(src_object, src_offset)) != |
3710 | VM_PAGE_NULL((vm_page_t) 0)) && !m->busy && !m->fictitious && |
3711 | !m->absent && !m->error) { |
3712 | |
3713 | /* |
3714 | * This is the page. Mark it busy |
3715 | * and keep the paging reference on |
3716 | * the object whilst we do our thing. |
3717 | */ |
3718 | m->busy = TRUE((boolean_t) 1); |
3719 | |
3720 | /* |
3721 | * Also write-protect the page, so |
3722 | * that the map`s owner cannot change |
3723 | * the data. The busy bit will prevent |
3724 | * faults on the page from succeeding |
3725 | * until the copy is released; after |
3726 | * that, the page can be re-entered |
3727 | * as writable, since we didn`t alter |
3728 | * the map entry. This scheme is a |
3729 | * cheap copy-on-write. |
3730 | * |
3731 | * Don`t forget the protection and |
3732 | * the page_lock value! |
3733 | * |
3734 | * If the source is being destroyed |
3735 | * AND not shared writable, we don`t |
3736 | * have to protect the page, since |
3737 | * we will destroy the (only) |
3738 | * writable mapping later. |
3739 | */ |
3740 | if (!src_destroy || |
3741 | src_object->use_shared_copy) |
3742 | { |
3743 | pmap_page_protect(m->phys_addr, |
3744 | src_entry->protection |
3745 | & ~m->page_lock |
3746 | & ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
3747 | } |
3748 | |
3749 | } |
3750 | else { |
3751 | vm_prot_t result_prot; |
3752 | vm_page_t top_page; |
3753 | kern_return_t kr; |
3754 | |
3755 | /* |
3756 | * Have to fault the page in; must |
3757 | * unlock the map to do so. While |
3758 | * the map is unlocked, anything |
3759 | * can happen, we must lookup the |
3760 | * map entry before continuing. |
3761 | */ |
3762 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3763 | need_map_lookup = TRUE((boolean_t) 1); |
3764 | retry: |
3765 | result_prot = VM_PROT_READ((vm_prot_t) 0x01); |
3766 | |
3767 | kr = vm_fault_page(src_object, src_offset, |
3768 | VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), FALSE((boolean_t) 0), |
3769 | &result_prot, &m, &top_page, |
3770 | FALSE((boolean_t) 0), (void (*)()) 0); |
3771 | /* |
3772 | * Cope with what happened. |
3773 | */ |
3774 | switch (kr) { |
3775 | case VM_FAULT_SUCCESS0: |
3776 | break; |
3777 | case VM_FAULT_INTERRUPTED2: /* ??? */ |
3778 | case VM_FAULT_RETRY1: |
3779 | vm_object_lock(src_object); |
3780 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3781 | goto retry; |
3782 | case VM_FAULT_MEMORY_SHORTAGE3: |
3783 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); |
3784 | vm_object_lock(src_object); |
3785 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3786 | goto retry; |
3787 | case VM_FAULT_FICTITIOUS_SHORTAGE4: |
3788 | vm_page_more_fictitious(); |
3789 | vm_object_lock(src_object); |
3790 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); |
3791 | goto retry; |
3792 | case VM_FAULT_MEMORY_ERROR5: |
3793 | /* |
3794 | * Something broke. If this |
3795 | * is a continuation, return |
3796 | * a partial result if possible, |
3797 | * else fail the whole thing. |
3798 | * In the continuation case, the |
3799 | * next continuation call will |
3800 | * get this error if it persists. |
3801 | */ |
3802 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3803 | if (is_cont && |
3804 | copy->cpy_npagesc_u.c_p.npages != 0) |
3805 | goto make_continuation; |
3806 | |
3807 | result = KERN_MEMORY_ERROR10; |
3808 | goto error; |
3809 | } |
3810 | |
3811 | if (top_page != VM_PAGE_NULL((vm_page_t) 0)) { |
3812 | vm_object_lock(src_object); |
3813 | VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ((void)(&vm_page_queue_lock) ); }); |
3814 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 3814); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); |
3815 | vm_object_unlock(src_object)((void)(&(src_object)->Lock)); |
3816 | } |
3817 | |
3818 | /* |
3819 | * We do not need to write-protect |
3820 | * the page, since it cannot have |
3821 | * been in the pmap (and we did not |
3822 | * enter it above). The busy bit |
3823 | * will protect the page from being |
3824 | * entered as writable until it is |
3825 | * unlocked. |
3826 | */ |
3827 | |
3828 | } |
3829 | |
3830 | /* |
3831 | * The page is busy, its object is locked, and |
3832 | * we have a paging reference on it. Either |
3833 | * the map is locked, or need_map_lookup is |
3834 | * TRUE. |
3835 | * |
3836 | * Put the page in the page list. |
3837 | */ |
3838 | copy->cpy_page_listc_u.c_p.page_list[copy->cpy_npagesc_u.c_p.npages++] = m; |
3839 | vm_object_unlock(m->object)((void)(&(m->object)->Lock)); |
3840 | } |
3841 | |
3842 | /* |
3843 | * DETERMINE whether the entire region |
3844 | * has been copied. |
3845 | */ |
3846 | if (src_start >= src_end && src_end != 0) { |
3847 | if (need_map_lookup) |
3848 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
3849 | break; |
3850 | } |
3851 | |
3852 | /* |
3853 | * If need_map_lookup is TRUE, have to start over with |
3854 | * another map lookup. Note that we dropped the map |
3855 | * lock (to call vm_fault_page) above only in this case. |
3856 | */ |
3857 | if (need_map_lookup) |
3858 | goto do_map_lookup; |
3859 | |
3860 | /* |
3861 | * Verify that there are no gaps in the region |
3862 | */ |
3863 | |
3864 | src_start = src_entry->vme_endlinks.end; |
3865 | src_entry = src_entry->vme_nextlinks.next; |
3866 | if (src_entry->vme_startlinks.start != src_start) { |
3867 | result = KERN_INVALID_ADDRESS1; |
3868 | goto error; |
3869 | } |
3870 | } |
3871 | |
3872 | /* |
3873 | * If steal_pages is true, make sure all |
3874 | * pages in the copy are not in any object |
3875 | * We try to remove them from the original |
3876 | * object, but we may have to copy them. |
3877 | * |
3878 | * At this point every page in the list is busy |
3879 | * and holds a paging reference to its object. |
3880 | * When we're done stealing, every page is busy, |
3881 | * and in no object (m->tabled == FALSE). |
3882 | */ |
3883 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); |
3884 | if (steal_pages) { |
3885 | int i; |
3886 | vm_offset_t unwire_end; |
3887 | |
3888 | unwire_end = src_start; |
3889 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { |
3890 | |
3891 | /* |
3892 | * Remove the page from its object if it |
3893 | * can be stolen. It can be stolen if: |
3894 | * |
3895 | * (1) The source is being destroyed, |
3896 | * the object is temporary, and |
3897 | * not shared. |
3898 | * (2) The page is not precious. |
3899 | * |
3900 | * The not shared check consists of two |
3901 | * parts: (a) there are no objects that |
3902 | * shadow this object. (b) it is not the |
3903 | * object in any shared map entries (i.e., |
3904 | * use_shared_copy is not set). |
3905 | * |
3906 | * The first check (a) means that we can't |
3907 | * steal pages from objects that are not |
3908 | * at the top of their shadow chains. This |
3909 | * should not be a frequent occurrence. |
3910 | * |
3911 | * Stealing wired pages requires telling the |
3912 | * pmap module to let go of them. |
3913 | * |
3914 | * NOTE: stealing clean pages from objects |
3915 | * whose mappings survive requires a call to |
3916 | * the pmap module. Maybe later. |
3917 | */ |
3918 | m = copy->cpy_page_listc_u.c_p.page_list[i]; |
3919 | src_object = m->object; |
3920 | vm_object_lock(src_object); |
3921 | |
3922 | if (src_destroy && |
3923 | src_object->temporary && |
3924 | (!src_object->shadowed) && |
3925 | (!src_object->use_shared_copy) && |
3926 | !m->precious) { |
3927 | vm_offset_t page_vaddr; |
3928 | |
3929 | page_vaddr = src_start + (i * PAGE_SIZE(1 << 12)); |
3930 | if (m->wire_count > 0) { |
3931 | |
3932 | assert(m->wire_count == 1)({ if (!(m->wire_count == 1)) Assert("m->wire_count == 1" , "../vm/vm_map.c", 3932); }); |
3933 | /* |
3934 | * In order to steal a wired |
3935 | * page, we have to unwire it |
3936 | * first. We do this inline |
3937 | * here because we have the page. |
3938 | * |
3939 | * Step 1: Unwire the map entry. |
3940 | * Also tell the pmap module |
3941 | * that this piece of the |
3942 | * pmap is pageable. |
3943 | */ |
3944 | vm_object_unlock(src_object)((void)(&(src_object)->Lock)); |
3945 | if (page_vaddr >= unwire_end) { |
3946 | if (!vm_map_lookup_entry(src_map, |
3947 | page_vaddr, &src_entry)) |
3948 | panic("vm_map_copyin_page_list: missing wired map entry"); |
3949 | |
3950 | vm_map_clip_start(src_map, src_entry,({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }) |
3951 | page_vaddr)({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }); |
3952 | vm_map_clip_end(src_map, src_entry,({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }) |
3953 | src_start + src_size)({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }); |
3954 | |
3955 | assert(src_entry->wired_count > 0)({ if (!(src_entry->wired_count > 0)) Assert("src_entry->wired_count > 0" , "../vm/vm_map.c", 3955); }); |
3956 | src_entry->wired_count = 0; |
3957 | src_entry->user_wired_count = 0; |
3958 | unwire_end = src_entry->vme_endlinks.end; |
3959 | pmap_pageable(vm_map_pmap(src_map)((src_map)->pmap), |
3960 | page_vaddr, unwire_end, TRUE((boolean_t) 1)); |
3961 | } |
3962 | |
3963 | /* |
3964 | * Step 2: Unwire the page. |
3965 | * pmap_remove handles this for us. |
3966 | */ |
3967 | vm_object_lock(src_object); |
3968 | } |
3969 | |
3970 | /* |
3971 | * Don't need to remove the mapping; |
3972 | * vm_map_delete will handle it. |
3973 | * |
3974 | * Steal the page. Setting the wire count |
3975 | * to zero is vm_page_unwire without |
3976 | * activating the page. |
3977 | */ |
3978 | vm_page_lock_queues(); |
3979 | vm_page_remove(m); |
3980 | if (m->wire_count > 0) { |
3981 | m->wire_count = 0; |
3982 | vm_page_wire_count--; |
3983 | } else { |
3984 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { queue_entry_t next, prev; next = (m) ->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq. prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive )->prev = prev; else ((vm_page_t)next)->pageq.prev = prev ; if ((&vm_page_queue_inactive) == prev) (&vm_page_queue_inactive )->next = next; else ((vm_page_t)prev)->pageq.next = next ; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count --; } }); |
3985 | } |
3986 | vm_page_unlock_queues()((void)(&vm_page_queue_lock)); |
3987 | } |
3988 | else { |
3989 | /* |
3990 | * Have to copy this page. Have to |
3991 | * unlock the map while copying, |
3992 | * hence no further page stealing. |
3993 | * Hence just copy all the pages. |
3994 | * Unlock the map while copying; |
3995 | * This means no further page stealing. |
3996 | */ |
3997 | vm_object_unlock(src_object)((void)(&(src_object)->Lock)); |
3998 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
3999 | |
4000 | vm_map_copy_steal_pages(copy); |
4001 | |
4002 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); |
4003 | break; |
4004 | } |
4005 | |
4006 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 4006); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); |
4007 | vm_object_unlock(src_object)((void)(&(src_object)->Lock)); |
4008 | } |
4009 | |
4010 | /* |
4011 | * If the source should be destroyed, do it now, since the |
4012 | * copy was successful. |
4013 | */ |
4014 | |
4015 | if (src_destroy) { |
4016 | (void) vm_map_delete(src_map, src_start, src_end); |
4017 | } |
4018 | } |
4019 | else { |
4020 | /* |
4021 | * !steal_pages leaves busy pages in the map. |
4022 | * This will cause src_destroy to hang. Use |
4023 | * a continuation to prevent this. |
4024 | */ |
4025 | if (src_destroy && !vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { |
4026 | cont_args = (vm_map_copyin_args_t) |
4027 | kalloc(sizeof(vm_map_copyin_args_data_t)); |
4028 | vm_map_reference(src_map); |
4029 | cont_args->map = src_map; |
4030 | cont_args->src_addr = (vm_offset_t) 0; |
4031 | cont_args->src_len = (vm_size_t) 0; |
4032 | cont_args->destroy_addr = src_start; |
4033 | cont_args->destroy_len = src_end - src_start; |
4034 | cont_args->steal_pages = FALSE((boolean_t) 0); |
4035 | |
4036 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; |
4037 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; |
4038 | } |
4039 | |
4040 | } |
4041 | |
4042 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
4043 | |
4044 | *copy_result = copy; |
4045 | return(result); |
4046 | |
4047 | error: |
4048 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); |
4049 | vm_map_copy_discard(copy); |
4050 | return(result); |
4051 | } |
4052 | |
4053 | /* |
4054 | * vm_map_fork: |
4055 | * |
4056 | * Create and return a new map based on the old |
4057 | * map, according to the inheritance values on the |
4058 | * regions in that map. |
4059 | * |
4060 | * The source map must not be locked. |
4061 | */ |
4062 | vm_map_t vm_map_fork(vm_map_t old_map) |
4063 | { |
4064 | vm_map_t new_map; |
4065 | vm_map_entry_t old_entry; |
4066 | vm_map_entry_t new_entry; |
4067 | pmap_t new_pmap = pmap_create((vm_size_t) 0); |
4068 | vm_size_t new_size = 0; |
4069 | vm_size_t entry_size; |
4070 | vm_object_t object; |
4071 | |
4072 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4073 | |
4074 | new_map = vm_map_create(new_pmap, |
4075 | old_map->min_offsethdr.links.start, |
4076 | old_map->max_offsethdr.links.end, |
4077 | old_map->hdr.entries_pageable); |
4078 | |
4079 | for ( |
4080 | old_entry = vm_map_first_entry(old_map)((old_map)->hdr.links.next); |
4081 | old_entry != vm_map_to_entry(old_map)((struct vm_map_entry *) &(old_map)->hdr.links); |
4082 | ) { |
4083 | if (old_entry->is_sub_map) |
4084 | panic("vm_map_fork: encountered a submap"); |
4085 | |
4086 | entry_size = (old_entry->vme_endlinks.end - old_entry->vme_startlinks.start); |
4087 | |
4088 | switch (old_entry->inheritance) { |
4089 | case VM_INHERIT_NONE((vm_inherit_t) 2): |
4090 | break; |
4091 | |
4092 | case VM_INHERIT_SHARE((vm_inherit_t) 0): |
4093 | /* |
4094 | * New sharing code. New map entry |
4095 | * references original object. Temporary |
4096 | * objects use asynchronous copy algorithm for |
4097 | * future copies. First make sure we have |
4098 | * the right object. If we need a shadow, |
4099 | * or someone else already has one, then |
4100 | * make a new shadow and share it. |
4101 | */ |
4102 | |
4103 | object = old_entry->object.vm_object; |
4104 | if (object == VM_OBJECT_NULL((vm_object_t) 0)) { |
4105 | object = vm_object_allocate( |
4106 | (vm_size_t)(old_entry->vme_endlinks.end - |
4107 | old_entry->vme_startlinks.start)); |
4108 | old_entry->offset = 0; |
4109 | old_entry->object.vm_object = object; |
4110 | assert(!old_entry->needs_copy)({ if (!(!old_entry->needs_copy)) Assert("!old_entry->needs_copy" , "../vm/vm_map.c", 4110); }); |
4111 | } |
4112 | else if (old_entry->needs_copy || object->shadowed || |
4113 | (object->temporary && !old_entry->is_shared && |
4114 | object->size > (vm_size_t)(old_entry->vme_endlinks.end - |
4115 | old_entry->vme_startlinks.start))) { |
4116 | |
4117 | assert(object->temporary)({ if (!(object->temporary)) Assert("object->temporary" , "../vm/vm_map.c", 4117); }); |
4118 | assert(!(object->shadowed && old_entry->is_shared))({ if (!(!(object->shadowed && old_entry->is_shared ))) Assert("!(object->shadowed && old_entry->is_shared)" , "../vm/vm_map.c", 4118); }); |
4119 | vm_object_shadow( |
4120 | &old_entry->object.vm_object, |
4121 | &old_entry->offset, |
4122 | (vm_size_t) (old_entry->vme_endlinks.end - |
4123 | old_entry->vme_startlinks.start)); |
4124 | |
4125 | /* |
4126 | * If we're making a shadow for other than |
4127 | * copy on write reasons, then we have |
4128 | * to remove write permission. |
4129 | */ |
4130 | |
4131 | if (!old_entry->needs_copy && |
4132 | (old_entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
4133 | pmap_protect(vm_map_pmap(old_map)((old_map)->pmap), |
4134 | old_entry->vme_startlinks.start, |
4135 | old_entry->vme_endlinks.end, |
4136 | old_entry->protection & |
4137 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4138 | } |
4139 | old_entry->needs_copy = FALSE((boolean_t) 0); |
4140 | object = old_entry->object.vm_object; |
4141 | } |
4142 | |
4143 | /* |
4144 | * Set use_shared_copy to indicate that |
4145 | * object must use shared (delayed) copy-on |
4146 | * write. This is ignored for permanent objects. |
4147 | * Bump the reference count for the new entry |
4148 | */ |
4149 | |
4150 | vm_object_lock(object); |
4151 | object->use_shared_copy = TRUE((boolean_t) 1); |
4152 | object->ref_count++; |
4153 | vm_object_unlock(object)((void)(&(object)->Lock)); |
4154 | |
4155 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); |
4156 | |
4157 | if (old_entry->projected_on != 0) { |
4158 | /* |
4159 | * If entry is projected buffer, clone the |
4160 | * entry exactly. |
4161 | */ |
4162 | |
4163 | vm_map_entry_copy_full(new_entry, old_entry)(*(new_entry) = *(old_entry)); |
4164 | |
4165 | } else { |
4166 | /* |
4167 | * Clone the entry, using object ref from above. |
4168 | * Mark both entries as shared. |
4169 | */ |
4170 | |
4171 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
4172 | old_entry->is_shared = TRUE((boolean_t) 1); |
4173 | new_entry->is_shared = TRUE((boolean_t) 1); |
4174 | } |
4175 | |
4176 | /* |
4177 | * Insert the entry into the new map -- we |
4178 | * know we're inserting at the end of the new |
4179 | * map. |
4180 | */ |
4181 | |
4182 | vm_map_entry_link(({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4185); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4183 | new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4185); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4184 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4185); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4185 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4185); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
4186 | |
4187 | /* |
4188 | * Update the physical map |
4189 | */ |
4190 | |
4191 | pmap_copy(new_map->pmap, old_map->pmap, |
4192 | new_entry->vme_start, |
4193 | entry_size, |
4194 | old_entry->vme_start); |
4195 | |
4196 | new_size += entry_size; |
4197 | break; |
4198 | |
4199 | case VM_INHERIT_COPY((vm_inherit_t) 1): |
4200 | if (old_entry->wired_count == 0) { |
4201 | boolean_t src_needs_copy; |
4202 | boolean_t new_entry_needs_copy; |
4203 | |
4204 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); |
4205 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); |
4206 | |
4207 | if (vm_object_copy_temporary( |
4208 | &new_entry->object.vm_object, |
4209 | &new_entry->offset, |
4210 | &src_needs_copy, |
4211 | &new_entry_needs_copy)) { |
4212 | |
4213 | /* |
4214 | * Handle copy-on-write obligations |
4215 | */ |
4216 | |
4217 | if (src_needs_copy && !old_entry->needs_copy) { |
4218 | vm_object_pmap_protect( |
4219 | old_entry->object.vm_object, |
4220 | old_entry->offset, |
4221 | entry_size, |
4222 | (old_entry->is_shared ? |
4223 | PMAP_NULL((pmap_t) 0) : |
4224 | old_map->pmap), |
4225 | old_entry->vme_startlinks.start, |
4226 | old_entry->protection & |
4227 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4228 | |
4229 | old_entry->needs_copy = TRUE((boolean_t) 1); |
4230 | } |
4231 | |
4232 | new_entry->needs_copy = new_entry_needs_copy; |
4233 | |
4234 | /* |
4235 | * Insert the entry at the end |
4236 | * of the map. |
4237 | */ |
4238 | |
4239 | vm_map_entry_link(new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4241); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4240 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4241); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) |
4241 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4241); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); |
4242 | |
4243 | |
4244 | new_size += entry_size; |
4245 | break; |
4246 | } |
4247 | |
4248 | vm_map_entry_dispose(new_map, new_entry)_vm_map_entry_dispose(&(new_map)->hdr, (new_entry)); |
4249 | } |
4250 | |
4251 | /* INNER BLOCK (copy cannot be optimized) */ { |
4252 | |
4253 | vm_offset_t start = old_entry->vme_startlinks.start; |
4254 | vm_map_copy_t copy; |
4255 | vm_map_entry_t last = vm_map_last_entry(new_map)((new_map)->hdr.links.prev); |
4256 | |
4257 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); |
4258 | if (vm_map_copyin(old_map, |
4259 | start, |
4260 | entry_size, |
4261 | FALSE((boolean_t) 0), |
4262 | ©) |
4263 | != KERN_SUCCESS0) { |
4264 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4265 | if (!vm_map_lookup_entry(old_map, start, &last)) |
4266 | last = last->vme_nextlinks.next; |
4267 | old_entry = last; |
4268 | /* |
4269 | * For some error returns, want to |
4270 | * skip to the next element. |
4271 | */ |
4272 | |
4273 | continue; |
4274 | } |
4275 | |
4276 | /* |
4277 | * Insert the copy into the new map |
4278 | */ |
4279 | |
4280 | vm_map_copy_insert(new_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (new_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4280 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (new_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (new_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); |
4281 | new_size += entry_size; |
4282 | |
4283 | /* |
4284 | * Pick up the traversal at the end of |
4285 | * the copied region. |
4286 | */ |
4287 | |
4288 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); |
4289 | start += entry_size; |
4290 | if (!vm_map_lookup_entry(old_map, start, &last)) |
4291 | last = last->vme_nextlinks.next; |
4292 | else |
4293 | vm_map_clip_start(old_map, last, start)({ if ((start) > (last)->links.start) _vm_map_clip_start (&(old_map)->hdr,(last),(start)); }); |
4294 | old_entry = last; |
4295 | |
4296 | continue; |
4297 | /* INNER BLOCK (copy cannot be optimized) */ } |
4298 | } |
4299 | old_entry = old_entry->vme_nextlinks.next; |
4300 | } |
4301 | |
4302 | new_map->size = new_size; |
4303 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); |
4304 | |
4305 | return(new_map); |
4306 | } |
4307 | |
4308 | /* |
4309 | * vm_map_lookup: |
4310 | * |
4311 | * Finds the VM object, offset, and |
4312 | * protection for a given virtual address in the |
4313 | * specified map, assuming a page fault of the |
4314 | * type specified. |
4315 | * |
4316 | * Returns the (object, offset, protection) for |
4317 | * this address, whether it is wired down, and whether |
4318 | * this map has the only reference to the data in question. |
4319 | * In order to later verify this lookup, a "version" |
4320 | * is returned. |
4321 | * |
4322 | * The map should not be locked; it will not be |
4323 | * locked on exit. In order to guarantee the |
4324 | * existence of the returned object, it is returned |
4325 | * locked. |
4326 | * |
4327 | * If a lookup is requested with "write protection" |
4328 | * specified, the map may be changed to perform virtual |
4329 | * copying operations, although the data referenced will |
4330 | * remain the same. |
4331 | */ |
4332 | kern_return_t vm_map_lookup( |
4333 | vm_map_t *var_map, /* IN/OUT */ |
4334 | vm_offset_t vaddr, |
4335 | vm_prot_t fault_type, |
4336 | |
4337 | vm_map_version_t *out_version, /* OUT */ |
4338 | vm_object_t *object, /* OUT */ |
4339 | vm_offset_t *offset, /* OUT */ |
4340 | vm_prot_t *out_prot, /* OUT */ |
4341 | boolean_t *wired) /* OUT */ |
4342 | { |
4343 | vm_map_entry_t entry; |
4344 | vm_map_t map = *var_map; |
4345 | vm_prot_t prot; |
4346 | |
4347 | RetryLookup: ; |
4348 | |
4349 | /* |
4350 | * Lookup the faulting address. |
4351 | */ |
4352 | |
4353 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4354 | |
4355 | #define RETURN(why) \ |
4356 | { \ |
4357 | vm_map_unlock_read(map)lock_done(&(map)->lock); \ |
4358 | return(why); \ |
4359 | } |
4360 | |
4361 | /* |
4362 | * If the map has an interesting hint, try it before calling |
4363 | * full blown lookup routine. |
4364 | */ |
4365 | |
4366 | simple_lock(&map->hint_lock); |
4367 | entry = map->hint; |
4368 | simple_unlock(&map->hint_lock)((void)(&map->hint_lock)); |
4369 | |
4370 | if ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
4371 | (vaddr < entry->vme_startlinks.start) || (vaddr >= entry->vme_endlinks.end)) { |
4372 | vm_map_entry_t tmp_entry; |
4373 | |
4374 | /* |
4375 | * Entry was either not a valid hint, or the vaddr |
4376 | * was not contained in the entry, so do a full lookup. |
4377 | */ |
4378 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) |
4379 | RETURN(KERN_INVALID_ADDRESS1); |
4380 | |
4381 | entry = tmp_entry; |
4382 | } |
4383 | |
4384 | /* |
4385 | * Handle submaps. |
4386 | */ |
4387 | |
4388 | if (entry->is_sub_map) { |
4389 | vm_map_t old_map = map; |
4390 | |
4391 | *var_map = map = entry->object.sub_map; |
4392 | vm_map_unlock_read(old_map)lock_done(&(old_map)->lock); |
4393 | goto RetryLookup; |
4394 | } |
4395 | |
4396 | /* |
4397 | * Check whether this task is allowed to have |
4398 | * this page. |
4399 | */ |
4400 | |
4401 | prot = entry->protection; |
4402 | |
4403 | if ((fault_type & (prot)) != fault_type) { |
4404 | if ((prot & VM_PROT_NOTIFY((vm_prot_t) 0x10)) && (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) { |
4405 | RETURN(KERN_WRITE_PROTECTION_FAILURE24); |
4406 | } else { |
4407 | RETURN(KERN_PROTECTION_FAILURE2); |
4408 | } |
4409 | } |
4410 | |
4411 | /* |
4412 | * If this page is not pageable, we have to get |
4413 | * it for all possible accesses. |
4414 | */ |
4415 | |
4416 | if ((*wired = (entry->wired_count != 0))) |
4417 | prot = fault_type = entry->protection; |
4418 | |
4419 | /* |
4420 | * If the entry was copy-on-write, we either ... |
4421 | */ |
4422 | |
4423 | if (entry->needs_copy) { |
4424 | /* |
4425 | * If we want to write the page, we may as well |
4426 | * handle that now since we've got the map locked. |
4427 | * |
4428 | * If we don't need to write the page, we just |
4429 | * demote the permissions allowed. |
4430 | */ |
4431 | |
4432 | if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) { |
4433 | /* |
4434 | * Make a new object, and place it in the |
4435 | * object chain. Note that no new references |
4436 | * have appeared -- one just moved from the |
4437 | * map to the new object. |
4438 | */ |
4439 | |
4440 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { |
4441 | goto RetryLookup; |
4442 | } |
4443 | map->timestamp++; |
4444 | |
4445 | vm_object_shadow( |
4446 | &entry->object.vm_object, |
4447 | &entry->offset, |
4448 | (vm_size_t) (entry->vme_endlinks.end - entry->vme_startlinks.start)); |
4449 | |
4450 | entry->needs_copy = FALSE((boolean_t) 0); |
4451 | |
4452 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
4453 | } |
4454 | else { |
4455 | /* |
4456 | * We're attempting to read a copy-on-write |
4457 | * page -- don't allow writes. |
4458 | */ |
4459 | |
4460 | prot &= (~VM_PROT_WRITE((vm_prot_t) 0x02)); |
4461 | } |
4462 | } |
4463 | |
4464 | /* |
4465 | * Create an object if necessary. |
4466 | */ |
4467 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { |
4468 | |
4469 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { |
4470 | goto RetryLookup; |
4471 | } |
4472 | |
4473 | entry->object.vm_object = vm_object_allocate( |
4474 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start)); |
4475 | entry->offset = 0; |
4476 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); |
4477 | } |
4478 | |
4479 | /* |
4480 | * Return the object/offset from this entry. If the entry |
4481 | * was copy-on-write or empty, it has been fixed up. Also |
4482 | * return the protection. |
4483 | */ |
4484 | |
4485 | *offset = (vaddr - entry->vme_startlinks.start) + entry->offset; |
4486 | *object = entry->object.vm_object; |
4487 | *out_prot = prot; |
4488 | |
4489 | /* |
4490 | * Lock the object to prevent it from disappearing |
4491 | */ |
4492 | |
4493 | vm_object_lock(*object); |
4494 | |
4495 | /* |
4496 | * Save the version number and unlock the map. |
4497 | */ |
4498 | |
4499 | out_version->main_timestamp = map->timestamp; |
4500 | |
4501 | RETURN(KERN_SUCCESS0); |
4502 | |
4503 | #undef RETURN |
4504 | } |
4505 | |
4506 | /* |
4507 | * vm_map_verify: |
4508 | * |
4509 | * Verifies that the map in question has not changed |
4510 | * since the given version. If successful, the map |
4511 | * will not change until vm_map_verify_done() is called. |
4512 | */ |
4513 | boolean_t vm_map_verify( |
4514 | vm_map_t map, |
4515 | vm_map_version_t *version) /* REF */ |
4516 | { |
4517 | boolean_t result; |
4518 | |
4519 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4520 | result = (map->timestamp == version->main_timestamp); |
4521 | |
4522 | if (!result) |
4523 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4524 | |
4525 | return(result); |
4526 | } |
4527 | |
4528 | /* |
4529 | * vm_map_verify_done: |
4530 | * |
4531 | * Releases locks acquired by a vm_map_verify. |
4532 | * |
4533 | * This is now a macro in vm/vm_map.h. It does a |
4534 | * vm_map_unlock_read on the map. |
4535 | */ |
4536 | |
4537 | /* |
4538 | * vm_region: |
4539 | * |
4540 | * User call to obtain information about a region in |
4541 | * a task's address map. |
4542 | */ |
4543 | |
4544 | kern_return_t vm_region( |
4545 | vm_map_t map, |
4546 | vm_offset_t *address, /* IN/OUT */ |
4547 | vm_size_t *size, /* OUT */ |
4548 | vm_prot_t *protection, /* OUT */ |
4549 | vm_prot_t *max_protection, /* OUT */ |
4550 | vm_inherit_t *inheritance, /* OUT */ |
4551 | boolean_t *is_shared, /* OUT */ |
4552 | ipc_port_t *object_name, /* OUT */ |
4553 | vm_offset_t *offset_in_object) /* OUT */ |
4554 | { |
4555 | vm_map_entry_t tmp_entry; |
4556 | vm_map_entry_t entry; |
4557 | vm_offset_t tmp_offset; |
4558 | vm_offset_t start; |
4559 | |
4560 | if (map == VM_MAP_NULL((vm_map_t) 0)) |
4561 | return(KERN_INVALID_ARGUMENT4); |
4562 | |
4563 | start = *address; |
4564 | |
4565 | vm_map_lock_read(map)lock_read(&(map)->lock); |
4566 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { |
4567 | if ((entry = tmp_entry->vme_nextlinks.next) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) { |
4568 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4569 | return(KERN_NO_SPACE3); |
4570 | } |
4571 | } else { |
4572 | entry = tmp_entry; |
4573 | } |
4574 | |
4575 | start = entry->vme_startlinks.start; |
4576 | *protection = entry->protection; |
4577 | *max_protection = entry->max_protection; |
4578 | *inheritance = entry->inheritance; |
4579 | *address = start; |
4580 | *size = (entry->vme_endlinks.end - start); |
4581 | |
4582 | tmp_offset = entry->offset; |
4583 | |
4584 | |
4585 | if (entry->is_sub_map) { |
4586 | *is_shared = FALSE((boolean_t) 0); |
4587 | *object_name = IP_NULL((ipc_port_t) ((ipc_object_t) 0)); |
4588 | *offset_in_object = tmp_offset; |
4589 | } else { |
4590 | *is_shared = entry->is_shared; |
4591 | *object_name = vm_object_name(entry->object.vm_object); |
4592 | *offset_in_object = tmp_offset; |
4593 | } |
4594 | |
4595 | vm_map_unlock_read(map)lock_done(&(map)->lock); |
4596 | |
4597 | return(KERN_SUCCESS0); |
4598 | } |
4599 | |
4600 | /* |
4601 | * Routine: vm_map_simplify |
4602 | * |
4603 | * Description: |
4604 | * Attempt to simplify the map representation in |
4605 | * the vicinity of the given starting address. |
4606 | * Note: |
4607 | * This routine is intended primarily to keep the |
4608 | * kernel maps more compact -- they generally don't |
4609 | * benefit from the "expand a map entry" technology |
4610 | * at allocation time because the adjacent entry |
4611 | * is often wired down. |
4612 | */ |
4613 | void vm_map_simplify( |
4614 | vm_map_t map, |
4615 | vm_offset_t start) |
4616 | { |
4617 | vm_map_entry_t this_entry; |
4618 | vm_map_entry_t prev_entry; |
4619 | |
4620 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
4621 | if ( |
4622 | (vm_map_lookup_entry(map, start, &this_entry)) && |
4623 | ((prev_entry = this_entry->vme_prevlinks.prev) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && |
4624 | |
4625 | (prev_entry->vme_endlinks.end == start) && |
4626 | |
4627 | (prev_entry->is_shared == FALSE((boolean_t) 0)) && |
4628 | (prev_entry->is_sub_map == FALSE((boolean_t) 0)) && |
4629 | |
4630 | (this_entry->is_shared == FALSE((boolean_t) 0)) && |
4631 | (this_entry->is_sub_map == FALSE((boolean_t) 0)) && |
4632 | |
4633 | (prev_entry->inheritance == this_entry->inheritance) && |
4634 | (prev_entry->protection == this_entry->protection) && |
4635 | (prev_entry->max_protection == this_entry->max_protection) && |
4636 | (prev_entry->wired_count == this_entry->wired_count) && |
4637 | (prev_entry->user_wired_count == this_entry->user_wired_count) && |
4638 | |
4639 | (prev_entry->needs_copy == this_entry->needs_copy) && |
4640 | |
4641 | (prev_entry->object.vm_object == this_entry->object.vm_object) && |
4642 | ((prev_entry->offset + (prev_entry->vme_endlinks.end - prev_entry->vme_startlinks.start)) |
4643 | == this_entry->offset) && |
4644 | (prev_entry->projected_on == 0) && |
4645 | (this_entry->projected_on == 0) |
4646 | ) { |
4647 | if (map->first_free == this_entry) |
4648 | map->first_free = prev_entry; |
4649 | |
4650 | SAVE_HINT(map, prev_entry); (map)->hint = (prev_entry); ((void)(&(map)->hint_lock ));; |
4651 | vm_map_entry_unlink(map, this_entry)({ (&(map)->hdr)->nentries--; (this_entry)->links .next->links.prev = (this_entry)->links.prev; (this_entry )->links.prev->links.next = (this_entry)->links.next ; rbtree_remove(&(&(map)->hdr)->tree, &(this_entry )->tree_node); }); |
4652 | prev_entry->vme_endlinks.end = this_entry->vme_endlinks.end; |
4653 | vm_object_deallocate(this_entry->object.vm_object); |
4654 | vm_map_entry_dispose(map, this_entry)_vm_map_entry_dispose(&(map)->hdr, (this_entry)); |
4655 | } |
4656 | vm_map_unlock(map)lock_done(&(map)->lock); |
4657 | } |
4658 | |
4659 | |
4660 | /* |
4661 | * Routine: vm_map_machine_attribute |
4662 | * Purpose: |
4663 | * Provide machine-specific attributes to mappings, |
4664 | * such as cachability etc. for machines that provide |
4665 | * them. NUMA architectures and machines with big/strange |
4666 | * caches will use this. |
4667 | * Note: |
4668 | * Responsibilities for locking and checking are handled here, |
4669 | * everything else in the pmap module. If any non-volatile |
4670 | * information must be kept, the pmap module should handle |
4671 | * it itself. [This assumes that attributes do not |
4672 | * need to be inherited, which seems ok to me] |
4673 | */ |
4674 | kern_return_t vm_map_machine_attribute( |
4675 | vm_map_t map, |
4676 | vm_offset_t address, |
4677 | vm_size_t size, |
4678 | vm_machine_attribute_t attribute, |
4679 | vm_machine_attribute_val_t* value) /* IN/OUT */ |
4680 | { |
4681 | kern_return_t ret; |
4682 | |
4683 | if (address < vm_map_min(map)((map)->hdr.links.start) || |
4684 | (address + size) > vm_map_max(map)((map)->hdr.links.end)) |
4685 | return KERN_INVALID_ARGUMENT4; |
4686 | |
4687 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); |
4688 | |
4689 | ret = pmap_attribute(map->pmap, address, size, attribute, value)(1); |
4690 | |
4691 | vm_map_unlock(map)lock_done(&(map)->lock); |
4692 | |
4693 | return ret; |
4694 | } |
4695 | |
4696 | |
4697 | #if MACH_KDB1 |
4698 | |
4699 | #define printfdb_printf kdbprintfdb_printf |
4700 | |
4701 | /* |
4702 | * vm_map_print: [ debug ] |
4703 | */ |
4704 | void vm_map_print(vm_map_t map) |
4705 | { |
4706 | vm_map_entry_t entry; |
4707 | |
4708 | iprintf("Task map 0x%X: pmap=0x%X,", |
4709 | (vm_offset_t) map, (vm_offset_t) (map->pmap)); |
4710 | printfdb_printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries); |
4711 | printfdb_printf("version=%d\n", map->timestamp); |
4712 | indent += 2; |
4713 | for (entry = vm_map_first_entry(map)((map)->hdr.links.next); |
4714 | entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); |
4715 | entry = entry->vme_nextlinks.next) { |
4716 | static char *inheritance_name[3] = { "share", "copy", "none"}; |
4717 | |
4718 | iprintf("map entry 0x%X: ", (vm_offset_t) entry); |
4719 | printfdb_printf("start=0x%X, end=0x%X, ", |
4720 | (vm_offset_t) entry->vme_startlinks.start, (vm_offset_t) entry->vme_endlinks.end); |
4721 | printfdb_printf("prot=%X/%X/%s, ", |
4722 | entry->protection, |
4723 | entry->max_protection, |
4724 | inheritance_name[entry->inheritance]); |
4725 | if (entry->wired_count != 0) { |
4726 | printfdb_printf("wired("); |
4727 | if (entry->user_wired_count != 0) |
4728 | printfdb_printf("u"); |
4729 | if (entry->wired_count > |
4730 | ((entry->user_wired_count == 0) ? 0 : 1)) |
4731 | printfdb_printf("k"); |
4732 | printfdb_printf(") "); |
4733 | } |
4734 | if (entry->in_transition) { |
4735 | printfdb_printf("in transition"); |
4736 | if (entry->needs_wakeup) |
4737 | printfdb_printf("(wake request)"); |
4738 | printfdb_printf(", "); |
4739 | } |
4740 | if (entry->is_sub_map) { |
4741 | printfdb_printf("submap=0x%X, offset=0x%X\n", |
4742 | (vm_offset_t) entry->object.sub_map, |
4743 | (vm_offset_t) entry->offset); |
4744 | } else { |
4745 | printfdb_printf("object=0x%X, offset=0x%X", |
4746 | (vm_offset_t) entry->object.vm_object, |
4747 | (vm_offset_t) entry->offset); |
4748 | if (entry->is_shared) |
4749 | printfdb_printf(", shared"); |
4750 | if (entry->needs_copy) |
4751 | printfdb_printf(", copy needed"); |
4752 | printfdb_printf("\n"); |
4753 | |
4754 | if ((entry->vme_prevlinks.prev == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || |
4755 | (entry->vme_prevlinks.prev->object.vm_object != entry->object.vm_object)) { |
4756 | indent += 2; |
4757 | vm_object_print(entry->object.vm_object); |
4758 | indent -= 2; |
4759 | } |
4760 | } |
4761 | } |
4762 | indent -= 2; |
4763 | } |
4764 | |
4765 | /* |
4766 | * Routine: vm_map_copy_print |
4767 | * Purpose: |
4768 | * Pretty-print a copy object for ddb. |
4769 | */ |
4770 | |
4771 | void vm_map_copy_print(copy) |
4772 | const vm_map_copy_t copy; |
4773 | { |
4774 | int i, npages; |
4775 | |
4776 | printfdb_printf("copy object 0x%x\n", copy); |
4777 | |
4778 | indent += 2; |
4779 | |
4780 | iprintf("type=%d", copy->type); |
4781 | switch (copy->type) { |
4782 | case VM_MAP_COPY_ENTRY_LIST1: |
4783 | printfdb_printf("[entry_list]"); |
4784 | break; |
4785 | |
4786 | case VM_MAP_COPY_OBJECT2: |
4787 | printfdb_printf("[object]"); |
4788 | break; |
4789 | |
4790 | case VM_MAP_COPY_PAGE_LIST3: |
4791 | printfdb_printf("[page_list]"); |
4792 | break; |
4793 | |
4794 | default: |
4795 | printfdb_printf("[bad type]"); |
4796 | break; |
4797 | } |
4798 | printfdb_printf(", offset=0x%x", copy->offset); |
4799 | printfdb_printf(", size=0x%x\n", copy->size); |
4800 | |
4801 | switch (copy->type) { |
4802 | case VM_MAP_COPY_ENTRY_LIST1: |
4803 | /* XXX add stuff here */ |
4804 | break; |
4805 | |
4806 | case VM_MAP_COPY_OBJECT2: |
4807 | iprintf("object=0x%x\n", copy->cpy_objectc_u.c_o.object); |
4808 | break; |
4809 | |
4810 | case VM_MAP_COPY_PAGE_LIST3: |
4811 | iprintf("npages=%d", copy->cpy_npagesc_u.c_p.npages); |
4812 | printfdb_printf(", cont=%x", copy->cpy_contc_u.c_p.cont); |
4813 | printfdb_printf(", cont_args=%x\n", copy->cpy_cont_argsc_u.c_p.cont_args); |
4814 | if (copy->cpy_npagesc_u.c_p.npages < 0) { |
4815 | npages = 0; |
4816 | } else if (copy->cpy_npagesc_u.c_p.npages > VM_MAP_COPY_PAGE_LIST_MAX64) { |
4817 | npages = VM_MAP_COPY_PAGE_LIST_MAX64; |
4818 | } else { |
4819 | npages = copy->cpy_npagesc_u.c_p.npages; |
4820 | } |
4821 | iprintf("copy->cpy_page_list[0..%d] = {", npages); |
4822 | for (i = 0; i < npages - 1; i++) { |
4823 | printfdb_printf("0x%x, ", copy->cpy_page_listc_u.c_p.page_list[i]); |
4824 | } |
4825 | if (npages > 0) { |
4826 | printfdb_printf("0x%x", copy->cpy_page_listc_u.c_p.page_list[npages - 1]); |
4827 | } |
4828 | printfdb_printf("}\n"); |
4829 | break; |
4830 | } |
4831 | |
4832 | indent -= 2; |
4833 | } |
4834 | #endif /* MACH_KDB */ |