File: | obj-scan-build/../vm/vm_map.c |
Location: | line 2983, column 7 |
Description: | Dereference of null pointer |
1 | /* | |||
2 | * Mach Operating System | |||
3 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University. | |||
4 | * Copyright (c) 1993,1994 The University of Utah and | |||
5 | * the Computer Systems Laboratory (CSL). | |||
6 | * All rights reserved. | |||
7 | * | |||
8 | * Permission to use, copy, modify and distribute this software and its | |||
9 | * documentation is hereby granted, provided that both the copyright | |||
10 | * notice and this permission notice appear in all copies of the | |||
11 | * software, derivative works or modified versions, and any portions | |||
12 | * thereof, and that both notices appear in supporting documentation. | |||
13 | * | |||
14 | * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF | |||
15 | * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY | |||
16 | * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF | |||
17 | * THIS SOFTWARE. | |||
18 | * | |||
19 | * Carnegie Mellon requests users of this software to return to | |||
20 | * | |||
21 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |||
22 | * School of Computer Science | |||
23 | * Carnegie Mellon University | |||
24 | * Pittsburgh PA 15213-3890 | |||
25 | * | |||
26 | * any improvements or extensions that they make and grant Carnegie Mellon | |||
27 | * the rights to redistribute these changes. | |||
28 | */ | |||
29 | /* | |||
30 | * File: vm/vm_map.c | |||
31 | * Author: Avadis Tevanian, Jr., Michael Wayne Young | |||
32 | * Date: 1985 | |||
33 | * | |||
34 | * Virtual memory mapping module. | |||
35 | */ | |||
36 | ||||
37 | #include <kern/printf.h> | |||
38 | #include <mach/kern_return.h> | |||
39 | #include <mach/port.h> | |||
40 | #include <mach/vm_attributes.h> | |||
41 | #include <mach/vm_param.h> | |||
42 | #include <kern/assert.h> | |||
43 | #include <kern/debug.h> | |||
44 | #include <kern/kalloc.h> | |||
45 | #include <kern/rbtree.h> | |||
46 | #include <kern/slab.h> | |||
47 | #include <vm/pmap.h> | |||
48 | #include <vm/vm_fault.h> | |||
49 | #include <vm/vm_map.h> | |||
50 | #include <vm/vm_object.h> | |||
51 | #include <vm/vm_page.h> | |||
52 | #include <vm/vm_resident.h> | |||
53 | #include <vm/vm_kern.h> | |||
54 | #include <ipc/ipc_port.h> | |||
55 | ||||
56 | #if MACH_KDB0 | |||
57 | #include <ddb/db_output.h> | |||
58 | #include <vm/vm_print.h> | |||
59 | #endif /* MACH_KDB */ | |||
60 | ||||
61 | ||||
62 | /* Forward declarations */ | |||
63 | kern_return_t vm_map_delete( | |||
64 | vm_map_t map, | |||
65 | vm_offset_t start, | |||
66 | vm_offset_t end); | |||
67 | ||||
68 | kern_return_t vm_map_copyout_page_list( | |||
69 | vm_map_t dst_map, | |||
70 | vm_offset_t *dst_addr, /* OUT */ | |||
71 | vm_map_copy_t copy); | |||
72 | ||||
73 | void vm_map_copy_page_discard (vm_map_copy_t copy); | |||
74 | ||||
75 | /* | |||
76 | * Macros to copy a vm_map_entry. We must be careful to correctly | |||
77 | * manage the wired page count. vm_map_entry_copy() creates a new | |||
78 | * map entry to the same memory - the wired count in the new entry | |||
79 | * must be set to zero. vm_map_entry_copy_full() creates a new | |||
80 | * entry that is identical to the old entry. This preserves the | |||
81 | * wire count; it's used for map splitting and cache changing in | |||
82 | * vm_map_copyout. | |||
83 | */ | |||
84 | #define vm_map_entry_copy(NEW,OLD)({ *(NEW) = *(OLD); (NEW)->is_shared = ((boolean_t) 0); (NEW )->needs_wakeup = ((boolean_t) 0); (NEW)->in_transition = ((boolean_t) 0); (NEW)->wired_count = 0; (NEW)->user_wired_count = 0; }) \({ | |||
85 | MACRO_BEGIN({ \ | |||
86 | *(NEW) = *(OLD); \ | |||
87 | (NEW)->is_shared = FALSE((boolean_t) 0); \ | |||
88 | (NEW)->needs_wakeup = FALSE((boolean_t) 0); \ | |||
89 | (NEW)->in_transition = FALSE((boolean_t) 0); \ | |||
90 | (NEW)->wired_count = 0; \ | |||
91 | (NEW)->user_wired_count = 0; \}) | |||
92 | MACRO_END}) | |||
93 | ||||
94 | #define vm_map_entry_copy_full(NEW,OLD)(*(NEW) = *(OLD)) (*(NEW) = *(OLD)) | |||
95 | ||||
96 | /* | |||
97 | * Virtual memory maps provide for the mapping, protection, | |||
98 | * and sharing of virtual memory objects. In addition, | |||
99 | * this module provides for an efficient virtual copy of | |||
100 | * memory from one map to another. | |||
101 | * | |||
102 | * Synchronization is required prior to most operations. | |||
103 | * | |||
104 | * Maps consist of an ordered doubly-linked list of simple | |||
105 | * entries; a hint and a red-black tree are used to speed up lookups. | |||
106 | * | |||
107 | * Sharing maps have been deleted from this version of Mach. | |||
108 | * All shared objects are now mapped directly into the respective | |||
109 | * maps. This requires a change in the copy on write strategy; | |||
110 | * the asymmetric (delayed) strategy is used for shared temporary | |||
111 | * objects instead of the symmetric (shadow) strategy. This is | |||
112 | * selected by the (new) use_shared_copy bit in the object. See | |||
113 | * vm_object_copy_temporary in vm_object.c for details. All maps | |||
114 | * are now "top level" maps (either task map, kernel map or submap | |||
115 | * of the kernel map). | |||
116 | * | |||
117 | * Since portions of maps are specified by start/end addreses, | |||
118 | * which may not align with existing map entries, all | |||
119 | * routines merely "clip" entries to these start/end values. | |||
120 | * [That is, an entry is split into two, bordering at a | |||
121 | * start or end value.] Note that these clippings may not | |||
122 | * always be necessary (as the two resulting entries are then | |||
123 | * not changed); however, the clipping is done for convenience. | |||
124 | * No attempt is currently made to "glue back together" two | |||
125 | * abutting entries. | |||
126 | * | |||
127 | * The symmetric (shadow) copy strategy implements virtual copy | |||
128 | * by copying VM object references from one map to | |||
129 | * another, and then marking both regions as copy-on-write. | |||
130 | * It is important to note that only one writeable reference | |||
131 | * to a VM object region exists in any map when this strategy | |||
132 | * is used -- this means that shadow object creation can be | |||
133 | * delayed until a write operation occurs. The asymmetric (delayed) | |||
134 | * strategy allows multiple maps to have writeable references to | |||
135 | * the same region of a vm object, and hence cannot delay creating | |||
136 | * its copy objects. See vm_object_copy_temporary() in vm_object.c. | |||
137 | * Copying of permanent objects is completely different; see | |||
138 | * vm_object_copy_strategically() in vm_object.c. | |||
139 | */ | |||
140 | ||||
141 | struct kmem_cache vm_map_cache; /* cache for vm_map structures */ | |||
142 | struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */ | |||
143 | struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */ | |||
144 | struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */ | |||
145 | ||||
146 | boolean_t vm_map_lookup_entry(); /* forward declaration */ | |||
147 | ||||
148 | /* | |||
149 | * Placeholder object for submap operations. This object is dropped | |||
150 | * into the range by a call to vm_map_find, and removed when | |||
151 | * vm_map_submap creates the submap. | |||
152 | */ | |||
153 | ||||
154 | static struct vm_object vm_submap_object_store; | |||
155 | vm_object_t vm_submap_object = &vm_submap_object_store; | |||
156 | ||||
157 | /* | |||
158 | * vm_map_init: | |||
159 | * | |||
160 | * Initialize the vm_map module. Must be called before | |||
161 | * any other vm_map routines. | |||
162 | * | |||
163 | * Map and entry structures are allocated from caches -- we must | |||
164 | * initialize those caches. | |||
165 | * | |||
166 | * There are three caches of interest: | |||
167 | * | |||
168 | * vm_map_cache: used to allocate maps. | |||
169 | * vm_map_entry_cache: used to allocate map entries. | |||
170 | * vm_map_kentry_cache: used to allocate map entries for the kernel. | |||
171 | * | |||
172 | * Kernel map entries are allocated from a special cache, using a custom | |||
173 | * page allocation function to avoid recursion. It would be difficult | |||
174 | * (perhaps impossible) for the kernel to allocate more memory to an entry | |||
175 | * cache when it became empty since the very act of allocating memory | |||
176 | * implies the creation of a new entry. | |||
177 | */ | |||
178 | ||||
179 | vm_offset_t kentry_data; | |||
180 | vm_size_t kentry_data_size = KENTRY_DATA_SIZE(256*(1 << 12)); | |||
181 | ||||
182 | static vm_offset_t kentry_pagealloc(vm_size_t size) | |||
183 | { | |||
184 | vm_offset_t result; | |||
185 | ||||
186 | if (size > kentry_data_size) | |||
187 | panic("vm_map: kentry memory exhausted"); | |||
188 | ||||
189 | result = kentry_data; | |||
190 | kentry_data += size; | |||
191 | kentry_data_size -= size; | |||
192 | return result; | |||
193 | } | |||
194 | ||||
195 | void vm_map_init(void) | |||
196 | { | |||
197 | kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0, | |||
198 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
199 | kmem_cache_init(&vm_map_entry_cache, "vm_map_entry", | |||
200 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
201 | kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry", | |||
202 | sizeof(struct vm_map_entry), 0, NULL((void *) 0), kentry_pagealloc, | |||
203 | NULL((void *) 0), KMEM_CACHE_NOCPUPOOL0x1 | KMEM_CACHE_NOOFFSLAB0x2 | |||
204 | | KMEM_CACHE_NORECLAIM0x4); | |||
205 | kmem_cache_init(&vm_map_copy_cache, "vm_map_copy", | |||
206 | sizeof(struct vm_map_copy), 0, NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0); | |||
207 | ||||
208 | /* | |||
209 | * Submap object is initialized by vm_object_init. | |||
210 | */ | |||
211 | } | |||
212 | ||||
213 | void vm_map_setup(map, pmap, min, max, pageable) | |||
214 | vm_map_t map; | |||
215 | pmap_t pmap; | |||
216 | vm_offset_t min, max; | |||
217 | boolean_t pageable; | |||
218 | { | |||
219 | vm_map_first_entry(map)((map)->hdr.links.next) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
220 | vm_map_last_entry(map)((map)->hdr.links.prev) = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
221 | map->hdr.nentries = 0; | |||
222 | map->hdr.entries_pageable = pageable; | |||
223 | rbtree_init(&map->hdr.tree); | |||
224 | ||||
225 | map->size = 0; | |||
226 | map->ref_count = 1; | |||
227 | map->pmap = pmap; | |||
228 | map->min_offsethdr.links.start = min; | |||
229 | map->max_offsethdr.links.end = max; | |||
230 | map->wiring_required = FALSE((boolean_t) 0); | |||
231 | map->wait_for_space = FALSE((boolean_t) 0); | |||
232 | map->first_free = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
233 | map->hint = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
234 | vm_map_lock_init(map)({ lock_init(&(map)->lock, ((boolean_t) 1)); (map)-> timestamp = 0; }); | |||
235 | simple_lock_init(&map->ref_lock); | |||
236 | simple_lock_init(&map->hint_lock); | |||
237 | } | |||
238 | ||||
239 | /* | |||
240 | * vm_map_create: | |||
241 | * | |||
242 | * Creates and returns a new empty VM map with | |||
243 | * the given physical map structure, and having | |||
244 | * the given lower and upper address bounds. | |||
245 | */ | |||
246 | vm_map_t vm_map_create(pmap, min, max, pageable) | |||
247 | pmap_t pmap; | |||
248 | vm_offset_t min, max; | |||
249 | boolean_t pageable; | |||
250 | { | |||
251 | register vm_map_t result; | |||
252 | ||||
253 | result = (vm_map_t) kmem_cache_alloc(&vm_map_cache); | |||
254 | if (result == VM_MAP_NULL((vm_map_t) 0)) | |||
255 | panic("vm_map_create"); | |||
256 | ||||
257 | vm_map_setup(result, pmap, min, max, pageable); | |||
258 | ||||
259 | return(result); | |||
260 | } | |||
261 | ||||
262 | /* | |||
263 | * vm_map_entry_create: [ internal use only ] | |||
264 | * | |||
265 | * Allocates a VM map entry for insertion in the | |||
266 | * given map (or map copy). No fields are filled. | |||
267 | */ | |||
268 | #define vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr) \ | |||
269 | _vm_map_entry_create(&(map)->hdr) | |||
270 | ||||
271 | #define vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr) \ | |||
272 | _vm_map_entry_create(&(copy)->cpy_hdrc_u.hdr) | |||
273 | ||||
274 | vm_map_entry_t _vm_map_entry_create(map_header) | |||
275 | register struct vm_map_header *map_header; | |||
276 | { | |||
277 | register kmem_cache_t cache; | |||
278 | register vm_map_entry_t entry; | |||
279 | ||||
280 | if (map_header->entries_pageable) | |||
281 | cache = &vm_map_entry_cache; | |||
282 | else | |||
283 | cache = &vm_map_kentry_cache; | |||
284 | ||||
285 | entry = (vm_map_entry_t) kmem_cache_alloc(cache); | |||
286 | if (entry == VM_MAP_ENTRY_NULL((vm_map_entry_t) 0)) | |||
287 | panic("vm_map_entry_create"); | |||
288 | ||||
289 | return(entry); | |||
290 | } | |||
291 | ||||
292 | /* | |||
293 | * vm_map_entry_dispose: [ internal use only ] | |||
294 | * | |||
295 | * Inverse of vm_map_entry_create. | |||
296 | */ | |||
297 | #define vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)) \ | |||
298 | _vm_map_entry_dispose(&(map)->hdr, (entry)) | |||
299 | ||||
300 | #define vm_map_copy_entry_dispose(map, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)) \ | |||
301 | _vm_map_entry_dispose(&(copy)->cpy_hdrc_u.hdr, (entry)) | |||
302 | ||||
303 | void _vm_map_entry_dispose(map_header, entry) | |||
304 | register struct vm_map_header *map_header; | |||
305 | register vm_map_entry_t entry; | |||
306 | { | |||
307 | register kmem_cache_t cache; | |||
308 | ||||
309 | if (map_header->entries_pageable) | |||
310 | cache = &vm_map_entry_cache; | |||
311 | else | |||
312 | cache = &vm_map_kentry_cache; | |||
313 | ||||
314 | kmem_cache_free(cache, (vm_offset_t) entry); | |||
315 | } | |||
316 | ||||
317 | /* | |||
318 | * Red-black tree lookup/insert comparison functions | |||
319 | */ | |||
320 | static inline int vm_map_entry_cmp_lookup(vm_offset_t addr, | |||
321 | const struct rbtree_node *node) | |||
322 | { | |||
323 | struct vm_map_entry *entry; | |||
324 | ||||
325 | entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); | |||
326 | ||||
327 | if (addr < entry->vme_startlinks.start) | |||
328 | return -1; | |||
329 | else if (addr < entry->vme_endlinks.end) | |||
330 | return 0; | |||
331 | else | |||
332 | return 1; | |||
333 | } | |||
334 | ||||
335 | static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a, | |||
336 | const struct rbtree_node *b) | |||
337 | { | |||
338 | struct vm_map_entry *entry; | |||
339 | ||||
340 | entry = rbtree_entry(a, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)a - __builtin_offsetof (struct vm_map_entry, tree_node))); | |||
341 | return vm_map_entry_cmp_lookup(entry->vme_startlinks.start, b); | |||
342 | } | |||
343 | ||||
344 | /* | |||
345 | * vm_map_entry_{un,}link: | |||
346 | * | |||
347 | * Insert/remove entries from maps (or map copies). | |||
348 | * | |||
349 | * The start and end addresses of the entries must be properly set | |||
350 | * before using these macros. | |||
351 | */ | |||
352 | #define vm_map_entry_link(map, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 352); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ | |||
353 | _vm_map_entry_link(&(map)->hdr, after_where, entry)({ (&(map)->hdr)->nentries++; (entry)->links.prev = (after_where); (entry)->links.next = (after_where)-> links.next; (entry)->links.prev->links.next = (entry)-> links.next->links.prev = (entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(&(map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 353); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) | |||
354 | ||||
355 | #define vm_map_copy_entry_link(copy, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 355); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) \ | |||
356 | _vm_map_entry_link(&(copy)->cpy_hdr, after_where, entry)({ (&(copy)->c_u.hdr)->nentries++; (entry)->links .prev = (after_where); (entry)->links.next = (after_where) ->links.next; (entry)->links.prev->links.next = (entry )->links.next->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 356); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (entry)->tree_node); }); }) | |||
357 | ||||
358 | #define _vm_map_entry_link(hdr, after_where, entry)({ (hdr)->nentries++; (entry)->links.prev = (after_where ); (entry)->links.next = (after_where)->links.next; (entry )->links.prev->links.next = (entry)->links.next-> links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(hdr)->tree)->root; while (___cur != ( (void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry) ->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 358); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }) \ | |||
359 | MACRO_BEGIN({ \ | |||
360 | (hdr)->nentries++; \ | |||
361 | (entry)->vme_prevlinks.prev = (after_where); \ | |||
362 | (entry)->vme_nextlinks.next = (after_where)->vme_nextlinks.next; \ | |||
363 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = \ | |||
364 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry); \ | |||
365 | rbtree_insert(&(hdr)->tree, &(entry)->tree_node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 366); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }) | |||
366 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 366); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); \ | |||
367 | MACRO_END}) | |||
368 | ||||
369 | #define vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) \ | |||
370 | _vm_map_entry_unlink(&(map)->hdr, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ) | |||
371 | ||||
372 | #define vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) \ | |||
373 | _vm_map_entry_unlink(&(copy)->cpy_hdr, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }) | |||
374 | ||||
375 | #define _vm_map_entry_unlink(hdr, entry)({ (hdr)->nentries--; (entry)->links.next->links.prev = (entry)->links.prev; (entry)->links.prev->links.next = (entry)->links.next; rbtree_remove(&(hdr)->tree, &(entry)->tree_node); }) \ | |||
376 | MACRO_BEGIN({ \ | |||
377 | (hdr)->nentries--; \ | |||
378 | (entry)->vme_nextlinks.next->vme_prevlinks.prev = (entry)->vme_prevlinks.prev; \ | |||
379 | (entry)->vme_prevlinks.prev->vme_nextlinks.next = (entry)->vme_nextlinks.next; \ | |||
380 | rbtree_remove(&(hdr)->tree, &(entry)->tree_node); \ | |||
381 | MACRO_END}) | |||
382 | ||||
383 | /* | |||
384 | * vm_map_reference: | |||
385 | * | |||
386 | * Creates another valid reference to the given map. | |||
387 | * | |||
388 | */ | |||
389 | void vm_map_reference(map) | |||
390 | register vm_map_t map; | |||
391 | { | |||
392 | if (map == VM_MAP_NULL((vm_map_t) 0)) | |||
393 | return; | |||
394 | ||||
395 | simple_lock(&map->ref_lock); | |||
396 | map->ref_count++; | |||
397 | simple_unlock(&map->ref_lock); | |||
398 | } | |||
399 | ||||
400 | /* | |||
401 | * vm_map_deallocate: | |||
402 | * | |||
403 | * Removes a reference from the specified map, | |||
404 | * destroying it if no references remain. | |||
405 | * The map should not be locked. | |||
406 | */ | |||
407 | void vm_map_deallocate(map) | |||
408 | register vm_map_t map; | |||
409 | { | |||
410 | register int c; | |||
411 | ||||
412 | if (map == VM_MAP_NULL((vm_map_t) 0)) | |||
413 | return; | |||
414 | ||||
415 | simple_lock(&map->ref_lock); | |||
416 | c = --map->ref_count; | |||
417 | simple_unlock(&map->ref_lock); | |||
418 | ||||
419 | if (c > 0) { | |||
420 | return; | |||
421 | } | |||
422 | ||||
423 | projected_buffer_collect(map); | |||
424 | (void) vm_map_delete(map, map->min_offsethdr.links.start, map->max_offsethdr.links.end); | |||
425 | ||||
426 | pmap_destroy(map->pmap); | |||
427 | ||||
428 | kmem_cache_free(&vm_map_cache, (vm_offset_t) map); | |||
429 | } | |||
430 | ||||
431 | /* | |||
432 | * SAVE_HINT: | |||
433 | * | |||
434 | * Saves the specified entry as the hint for | |||
435 | * future lookups. Performs necessary interlocks. | |||
436 | */ | |||
437 | #define SAVE_HINT(map,value); (map)->hint = (value); ; \ | |||
438 | simple_lock(&(map)->hint_lock); \ | |||
439 | (map)->hint = (value); \ | |||
440 | simple_unlock(&(map)->hint_lock); | |||
441 | ||||
442 | /* | |||
443 | * vm_map_lookup_entry: [ internal use only ] | |||
444 | * | |||
445 | * Finds the map entry containing (or | |||
446 | * immediately preceding) the specified address | |||
447 | * in the given map; the entry is returned | |||
448 | * in the "entry" parameter. The boolean | |||
449 | * result indicates whether the address is | |||
450 | * actually contained in the map. | |||
451 | */ | |||
452 | boolean_t vm_map_lookup_entry(map, address, entry) | |||
453 | register vm_map_t map; | |||
454 | register vm_offset_t address; | |||
455 | vm_map_entry_t *entry; /* OUT */ | |||
456 | { | |||
457 | register struct rbtree_node *node; | |||
458 | register vm_map_entry_t hint; | |||
459 | ||||
460 | /* | |||
461 | * First, make a quick check to see if we are already | |||
462 | * looking at the entry we want (which is often the case). | |||
463 | */ | |||
464 | ||||
465 | simple_lock(&map->hint_lock); | |||
466 | hint = map->hint; | |||
467 | simple_unlock(&map->hint_lock); | |||
468 | ||||
469 | if ((hint != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (address >= hint->vme_startlinks.start)) { | |||
470 | if (address < hint->vme_endlinks.end) { | |||
471 | *entry = hint; | |||
472 | return(TRUE((boolean_t) 1)); | |||
473 | } else { | |||
474 | vm_map_entry_t next = hint->vme_nextlinks.next; | |||
475 | ||||
476 | if ((next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) | |||
477 | || (address < next->vme_startlinks.start)) { | |||
478 | *entry = hint; | |||
479 | return(FALSE((boolean_t) 0)); | |||
480 | } | |||
481 | } | |||
482 | } | |||
483 | ||||
484 | /* | |||
485 | * If the hint didn't help, use the red-black tree. | |||
486 | */ | |||
487 | ||||
488 | node = rbtree_lookup_nearest(&map->hdr.tree, address,({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }) | |||
489 | vm_map_entry_cmp_lookup, RBTREE_LEFT)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&map-> hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_lookup(address, ___cur); if (___diff == 0) break ; ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur ->children[___index]; } if (___cur == ((void *) 0)) ___cur = rbtree_nearest(___prev, ___index, 0); ___cur; }); | |||
490 | ||||
491 | if (node == NULL((void *) 0)) { | |||
492 | *entry = vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
493 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ;; | |||
494 | return(FALSE((boolean_t) 0)); | |||
495 | } else { | |||
496 | *entry = rbtree_entry(node, struct vm_map_entry, tree_node)((struct vm_map_entry *)((char *)node - __builtin_offsetof (struct vm_map_entry, tree_node))); | |||
497 | SAVE_HINT(map, *entry); (map)->hint = (*entry); ;; | |||
498 | return((address < (*entry)->vme_endlinks.end) ? TRUE((boolean_t) 1) : FALSE((boolean_t) 0)); | |||
499 | } | |||
500 | } | |||
501 | ||||
502 | /* | |||
503 | * Routine: invalid_user_access | |||
504 | * | |||
505 | * Verifies whether user access is valid. | |||
506 | */ | |||
507 | ||||
508 | boolean_t | |||
509 | invalid_user_access(map, start, end, prot) | |||
510 | vm_map_t map; | |||
511 | vm_offset_t start, end; | |||
512 | vm_prot_t prot; | |||
513 | { | |||
514 | vm_map_entry_t entry; | |||
515 | ||||
516 | return (map == VM_MAP_NULL((vm_map_t) 0) || map == kernel_map || | |||
517 | !vm_map_lookup_entry(map, start, &entry) || | |||
518 | entry->vme_endlinks.end < end || | |||
519 | (prot & ~(entry->protection))); | |||
520 | } | |||
521 | ||||
522 | ||||
523 | /* | |||
524 | * Routine: vm_map_find_entry | |||
525 | * Purpose: | |||
526 | * Allocate a range in the specified virtual address map, | |||
527 | * returning the entry allocated for that range. | |||
528 | * Used by kmem_alloc, etc. Returns wired entries. | |||
529 | * | |||
530 | * The map must be locked. | |||
531 | * | |||
532 | * If an entry is allocated, the object/offset fields | |||
533 | * are initialized to zero. If an object is supplied, | |||
534 | * then an existing entry may be extended. | |||
535 | */ | |||
536 | kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry) | |||
537 | register vm_map_t map; | |||
538 | vm_offset_t *address; /* OUT */ | |||
539 | vm_size_t size; | |||
540 | vm_offset_t mask; | |||
541 | vm_object_t object; | |||
542 | vm_map_entry_t *o_entry; /* OUT */ | |||
543 | { | |||
544 | register vm_map_entry_t entry, new_entry; | |||
545 | register vm_offset_t start; | |||
546 | register vm_offset_t end; | |||
547 | ||||
548 | /* | |||
549 | * Look for the first possible address; | |||
550 | * if there's already something at this | |||
551 | * address, we have to start after it. | |||
552 | */ | |||
553 | ||||
554 | if ((entry = map->first_free) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) | |||
555 | start = map->min_offsethdr.links.start; | |||
556 | else | |||
557 | start = entry->vme_endlinks.end; | |||
558 | ||||
559 | /* | |||
560 | * In any case, the "entry" always precedes | |||
561 | * the proposed new region throughout the loop: | |||
562 | */ | |||
563 | ||||
564 | while (TRUE((boolean_t) 1)) { | |||
565 | register vm_map_entry_t next; | |||
566 | ||||
567 | /* | |||
568 | * Find the end of the proposed new region. | |||
569 | * Be sure we didn't go beyond the end, or | |||
570 | * wrap around the address. | |||
571 | */ | |||
572 | ||||
573 | if (((start + mask) & ~mask) < start) { | |||
574 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); | |||
575 | return(KERN_NO_SPACE3); | |||
576 | } | |||
577 | start = ((start + mask) & ~mask); | |||
578 | end = start + size; | |||
579 | ||||
580 | if ((end > map->max_offsethdr.links.end) || (end < start)) { | |||
581 | printf_once("no more room for vm_map_find_entry in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_find_entry in %p\n" , map); __once = 1; } }); | |||
582 | return(KERN_NO_SPACE3); | |||
583 | } | |||
584 | ||||
585 | /* | |||
586 | * If there are no more entries, we must win. | |||
587 | */ | |||
588 | ||||
589 | next = entry->vme_nextlinks.next; | |||
590 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) | |||
591 | break; | |||
592 | ||||
593 | /* | |||
594 | * If there is another entry, it must be | |||
595 | * after the end of the potential new region. | |||
596 | */ | |||
597 | ||||
598 | if (next->vme_startlinks.start >= end) | |||
599 | break; | |||
600 | ||||
601 | /* | |||
602 | * Didn't fit -- move to the next entry. | |||
603 | */ | |||
604 | ||||
605 | entry = next; | |||
606 | start = entry->vme_endlinks.end; | |||
607 | } | |||
608 | ||||
609 | /* | |||
610 | * At this point, | |||
611 | * "start" and "end" should define the endpoints of the | |||
612 | * available new range, and | |||
613 | * "entry" should refer to the region before the new | |||
614 | * range, and | |||
615 | * | |||
616 | * the map should be locked. | |||
617 | */ | |||
618 | ||||
619 | *address = start; | |||
620 | ||||
621 | /* | |||
622 | * See whether we can avoid creating a new entry by | |||
623 | * extending one of our neighbors. [So far, we only attempt to | |||
624 | * extend from below.] | |||
625 | */ | |||
626 | ||||
627 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && | |||
628 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
629 | (entry->vme_endlinks.end == start) && | |||
630 | (!entry->is_shared) && | |||
631 | (!entry->is_sub_map) && | |||
632 | (entry->object.vm_object == object) && | |||
633 | (entry->needs_copy == FALSE((boolean_t) 0)) && | |||
634 | (entry->inheritance == VM_INHERIT_DEFAULT((vm_inherit_t) 1)) && | |||
635 | (entry->protection == VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02))) && | |||
636 | (entry->max_protection == VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) && | |||
637 | (entry->wired_count == 1) && | |||
638 | (entry->user_wired_count == 0) && | |||
639 | (entry->projected_on == 0)) { | |||
640 | /* | |||
641 | * Because this is a special case, | |||
642 | * we don't need to use vm_object_coalesce. | |||
643 | */ | |||
644 | ||||
645 | entry->vme_endlinks.end = end; | |||
646 | new_entry = entry; | |||
647 | } else { | |||
648 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); | |||
649 | ||||
650 | new_entry->vme_startlinks.start = start; | |||
651 | new_entry->vme_endlinks.end = end; | |||
652 | ||||
653 | new_entry->is_shared = FALSE((boolean_t) 0); | |||
654 | new_entry->is_sub_map = FALSE((boolean_t) 0); | |||
655 | new_entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); | |||
656 | new_entry->offset = (vm_offset_t) 0; | |||
657 | ||||
658 | new_entry->needs_copy = FALSE((boolean_t) 0); | |||
659 | ||||
660 | new_entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); | |||
661 | new_entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); | |||
662 | new_entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); | |||
663 | new_entry->wired_count = 1; | |||
664 | new_entry->user_wired_count = 0; | |||
665 | ||||
666 | new_entry->in_transition = FALSE((boolean_t) 0); | |||
667 | new_entry->needs_wakeup = FALSE((boolean_t) 0); | |||
668 | new_entry->projected_on = 0; | |||
669 | ||||
670 | /* | |||
671 | * Insert the new entry into the list | |||
672 | */ | |||
673 | ||||
674 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 674); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); | |||
675 | } | |||
676 | ||||
677 | map->size += size; | |||
678 | ||||
679 | /* | |||
680 | * Update the free space hint and the lookup hint | |||
681 | */ | |||
682 | ||||
683 | map->first_free = new_entry; | |||
684 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ;; | |||
685 | ||||
686 | *o_entry = new_entry; | |||
687 | return(KERN_SUCCESS0); | |||
688 | } | |||
689 | ||||
690 | int vm_map_pmap_enter_print = FALSE((boolean_t) 0); | |||
691 | int vm_map_pmap_enter_enable = FALSE((boolean_t) 0); | |||
692 | ||||
693 | /* | |||
694 | * Routine: vm_map_pmap_enter | |||
695 | * | |||
696 | * Description: | |||
697 | * Force pages from the specified object to be entered into | |||
698 | * the pmap at the specified address if they are present. | |||
699 | * As soon as a page not found in the object the scan ends. | |||
700 | * | |||
701 | * Returns: | |||
702 | * Nothing. | |||
703 | * | |||
704 | * In/out conditions: | |||
705 | * The source map should not be locked on entry. | |||
706 | */ | |||
707 | void | |||
708 | vm_map_pmap_enter(map, addr, end_addr, object, offset, protection) | |||
709 | vm_map_t map; | |||
710 | register | |||
711 | vm_offset_t addr; | |||
712 | register | |||
713 | vm_offset_t end_addr; | |||
714 | register | |||
715 | vm_object_t object; | |||
716 | vm_offset_t offset; | |||
717 | vm_prot_t protection; | |||
718 | { | |||
719 | while (addr < end_addr) { | |||
720 | register vm_page_t m; | |||
721 | ||||
722 | vm_object_lock(object); | |||
723 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||
724 | ||||
725 | m = vm_page_lookup(object, offset); | |||
726 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->absent) { | |||
727 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 727); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||
728 | vm_object_unlock(object); | |||
729 | return; | |||
730 | } | |||
731 | ||||
732 | if (vm_map_pmap_enter_print) { | |||
733 | printf("vm_map_pmap_enter:"); | |||
734 | printf("map: %p, addr: %lx, object: %p, offset: %lx\n", | |||
735 | map, addr, object, offset); | |||
736 | } | |||
737 | ||||
738 | m->busy = TRUE((boolean_t) 1); | |||
739 | vm_object_unlock(object); | |||
740 | ||||
741 | PMAP_ENTER(map->pmap, addr, m,({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }) | |||
742 | protection, FALSE)({ pmap_enter( (map->pmap), (addr), (m)->phys_addr, (protection ) & ~(m)->page_lock, (((boolean_t) 0)) ); }); | |||
743 | ||||
744 | vm_object_lock(object); | |||
745 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||
746 | vm_page_lock_queues(); | |||
747 | if (!m->active && !m->inactive) | |||
748 | vm_page_activate(m); | |||
749 | vm_page_unlock_queues(); | |||
750 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 750); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||
751 | vm_object_unlock(object); | |||
752 | ||||
753 | offset += PAGE_SIZE(1 << 12); | |||
754 | addr += PAGE_SIZE(1 << 12); | |||
755 | } | |||
756 | } | |||
757 | ||||
758 | /* | |||
759 | * Routine: vm_map_enter | |||
760 | * | |||
761 | * Description: | |||
762 | * Allocate a range in the specified virtual address map. | |||
763 | * The resulting range will refer to memory defined by | |||
764 | * the given memory object and offset into that object. | |||
765 | * | |||
766 | * Arguments are as defined in the vm_map call. | |||
767 | */ | |||
768 | kern_return_t vm_map_enter( | |||
769 | map, | |||
770 | address, size, mask, anywhere, | |||
771 | object, offset, needs_copy, | |||
772 | cur_protection, max_protection, inheritance) | |||
773 | register | |||
774 | vm_map_t map; | |||
775 | vm_offset_t *address; /* IN/OUT */ | |||
776 | vm_size_t size; | |||
777 | vm_offset_t mask; | |||
778 | boolean_t anywhere; | |||
779 | vm_object_t object; | |||
780 | vm_offset_t offset; | |||
781 | boolean_t needs_copy; | |||
782 | vm_prot_t cur_protection; | |||
783 | vm_prot_t max_protection; | |||
784 | vm_inherit_t inheritance; | |||
785 | { | |||
786 | register vm_map_entry_t entry; | |||
787 | register vm_offset_t start; | |||
788 | register vm_offset_t end; | |||
789 | kern_return_t result = KERN_SUCCESS0; | |||
790 | ||||
791 | #define RETURN(value) { result = value; goto BailOut; } | |||
792 | ||||
793 | if (size == 0) | |||
794 | return KERN_INVALID_ARGUMENT4; | |||
795 | ||||
796 | StartAgain: ; | |||
797 | ||||
798 | start = *address; | |||
799 | ||||
800 | if (anywhere) { | |||
801 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
802 | ||||
803 | /* | |||
804 | * Calculate the first possible address. | |||
805 | */ | |||
806 | ||||
807 | if (start < map->min_offsethdr.links.start) | |||
808 | start = map->min_offsethdr.links.start; | |||
809 | if (start > map->max_offsethdr.links.end) | |||
810 | RETURN(KERN_NO_SPACE3); | |||
811 | ||||
812 | /* | |||
813 | * Look for the first possible address; | |||
814 | * if there's already something at this | |||
815 | * address, we have to start after it. | |||
816 | */ | |||
817 | ||||
818 | if (start == map->min_offsethdr.links.start) { | |||
819 | if ((entry = map->first_free) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) | |||
820 | start = entry->vme_endlinks.end; | |||
821 | } else { | |||
822 | vm_map_entry_t tmp_entry; | |||
823 | if (vm_map_lookup_entry(map, start, &tmp_entry)) | |||
824 | start = tmp_entry->vme_endlinks.end; | |||
825 | entry = tmp_entry; | |||
826 | } | |||
827 | ||||
828 | /* | |||
829 | * In any case, the "entry" always precedes | |||
830 | * the proposed new region throughout the | |||
831 | * loop: | |||
832 | */ | |||
833 | ||||
834 | while (TRUE((boolean_t) 1)) { | |||
835 | register vm_map_entry_t next; | |||
836 | ||||
837 | /* | |||
838 | * Find the end of the proposed new region. | |||
839 | * Be sure we didn't go beyond the end, or | |||
840 | * wrap around the address. | |||
841 | */ | |||
842 | ||||
843 | if (((start + mask) & ~mask) < start) { | |||
844 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); | |||
845 | RETURN(KERN_NO_SPACE3); | |||
846 | } | |||
847 | start = ((start + mask) & ~mask); | |||
848 | end = start + size; | |||
849 | ||||
850 | if ((end > map->max_offsethdr.links.end) || (end < start)) { | |||
851 | if (map->wait_for_space) { | |||
852 | if (size <= (map->max_offsethdr.links.end - | |||
853 | map->min_offsethdr.links.start)) { | |||
854 | assert_wait((event_t) map, TRUE((boolean_t) 1)); | |||
855 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
856 | thread_block((void (*)()) 0); | |||
857 | goto StartAgain; | |||
858 | } | |||
859 | } | |||
860 | ||||
861 | printf_once("no more room for vm_map_enter in %p\n", map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_enter in %p\n" , map); __once = 1; } }); | |||
862 | RETURN(KERN_NO_SPACE3); | |||
863 | } | |||
864 | ||||
865 | /* | |||
866 | * If there are no more entries, we must win. | |||
867 | */ | |||
868 | ||||
869 | next = entry->vme_nextlinks.next; | |||
870 | if (next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) | |||
871 | break; | |||
872 | ||||
873 | /* | |||
874 | * If there is another entry, it must be | |||
875 | * after the end of the potential new region. | |||
876 | */ | |||
877 | ||||
878 | if (next->vme_startlinks.start >= end) | |||
879 | break; | |||
880 | ||||
881 | /* | |||
882 | * Didn't fit -- move to the next entry. | |||
883 | */ | |||
884 | ||||
885 | entry = next; | |||
886 | start = entry->vme_endlinks.end; | |||
887 | } | |||
888 | *address = start; | |||
889 | } else { | |||
890 | vm_map_entry_t temp_entry; | |||
891 | ||||
892 | /* | |||
893 | * Verify that: | |||
894 | * the address doesn't itself violate | |||
895 | * the mask requirement. | |||
896 | */ | |||
897 | ||||
898 | if ((start & mask) != 0) | |||
899 | return(KERN_NO_SPACE3); | |||
900 | ||||
901 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
902 | ||||
903 | /* | |||
904 | * ... the address is within bounds | |||
905 | */ | |||
906 | ||||
907 | end = start + size; | |||
908 | ||||
909 | if ((start < map->min_offsethdr.links.start) || | |||
910 | (end > map->max_offsethdr.links.end) || | |||
911 | (start >= end)) { | |||
912 | RETURN(KERN_INVALID_ADDRESS1); | |||
913 | } | |||
914 | ||||
915 | /* | |||
916 | * ... the starting address isn't allocated | |||
917 | */ | |||
918 | ||||
919 | if (vm_map_lookup_entry(map, start, &temp_entry)) | |||
920 | RETURN(KERN_NO_SPACE3); | |||
921 | ||||
922 | entry = temp_entry; | |||
923 | ||||
924 | /* | |||
925 | * ... the next region doesn't overlap the | |||
926 | * end point. | |||
927 | */ | |||
928 | ||||
929 | if ((entry->vme_nextlinks.next != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
930 | (entry->vme_nextlinks.next->vme_startlinks.start < end)) | |||
931 | RETURN(KERN_NO_SPACE3); | |||
932 | } | |||
933 | ||||
934 | /* | |||
935 | * At this point, | |||
936 | * "start" and "end" should define the endpoints of the | |||
937 | * available new range, and | |||
938 | * "entry" should refer to the region before the new | |||
939 | * range, and | |||
940 | * | |||
941 | * the map should be locked. | |||
942 | */ | |||
943 | ||||
944 | /* | |||
945 | * See whether we can avoid creating a new entry (and object) by | |||
946 | * extending one of our neighbors. [So far, we only attempt to | |||
947 | * extend from below.] | |||
948 | */ | |||
949 | ||||
950 | if ((object == VM_OBJECT_NULL((vm_object_t) 0)) && | |||
951 | (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
952 | (entry->vme_endlinks.end == start) && | |||
953 | (!entry->is_shared) && | |||
954 | (!entry->is_sub_map) && | |||
955 | (entry->inheritance == inheritance) && | |||
956 | (entry->protection == cur_protection) && | |||
957 | (entry->max_protection == max_protection) && | |||
958 | (entry->wired_count == 0) && /* implies user_wired_count == 0 */ | |||
959 | (entry->projected_on == 0)) { | |||
960 | if (vm_object_coalesce(entry->object.vm_object, | |||
961 | VM_OBJECT_NULL((vm_object_t) 0), | |||
962 | entry->offset, | |||
963 | (vm_offset_t) 0, | |||
964 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start), | |||
965 | (vm_size_t)(end - entry->vme_endlinks.end))) { | |||
966 | ||||
967 | /* | |||
968 | * Coalesced the two objects - can extend | |||
969 | * the previous map entry to include the | |||
970 | * new range. | |||
971 | */ | |||
972 | map->size += (end - entry->vme_endlinks.end); | |||
973 | entry->vme_endlinks.end = end; | |||
974 | RETURN(KERN_SUCCESS0); | |||
975 | } | |||
976 | } | |||
977 | ||||
978 | /* | |||
979 | * Create a new entry | |||
980 | */ | |||
981 | ||||
982 | /**/ { | |||
983 | register vm_map_entry_t new_entry; | |||
984 | ||||
985 | new_entry = vm_map_entry_create(map)_vm_map_entry_create(&(map)->hdr); | |||
986 | ||||
987 | new_entry->vme_startlinks.start = start; | |||
988 | new_entry->vme_endlinks.end = end; | |||
989 | ||||
990 | new_entry->is_shared = FALSE((boolean_t) 0); | |||
991 | new_entry->is_sub_map = FALSE((boolean_t) 0); | |||
992 | new_entry->object.vm_object = object; | |||
993 | new_entry->offset = offset; | |||
994 | ||||
995 | new_entry->needs_copy = needs_copy; | |||
996 | ||||
997 | new_entry->inheritance = inheritance; | |||
998 | new_entry->protection = cur_protection; | |||
999 | new_entry->max_protection = max_protection; | |||
1000 | new_entry->wired_count = 0; | |||
1001 | new_entry->user_wired_count = 0; | |||
1002 | ||||
1003 | new_entry->in_transition = FALSE((boolean_t) 0); | |||
1004 | new_entry->needs_wakeup = FALSE((boolean_t) 0); | |||
1005 | new_entry->projected_on = 0; | |||
1006 | ||||
1007 | /* | |||
1008 | * Insert the new entry into the list | |||
1009 | */ | |||
1010 | ||||
1011 | vm_map_entry_link(map, entry, new_entry)({ (&(map)->hdr)->nentries++; (new_entry)->links .prev = (entry); (new_entry)->links.next = (entry)->links .next; (new_entry)->links.prev->links.next = (new_entry )->links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(map)->hdr)-> tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 1011); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); | |||
1012 | map->size += size; | |||
1013 | ||||
1014 | /* | |||
1015 | * Update the free space hint and the lookup hint | |||
1016 | */ | |||
1017 | ||||
1018 | if ((map->first_free == entry) && | |||
1019 | ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) ? map->min_offsethdr.links.start : entry->vme_endlinks.end) | |||
1020 | >= new_entry->vme_startlinks.start)) | |||
1021 | map->first_free = new_entry; | |||
1022 | ||||
1023 | SAVE_HINT(map, new_entry); (map)->hint = (new_entry); ;; | |||
1024 | ||||
1025 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1026 | ||||
1027 | if ((object != VM_OBJECT_NULL((vm_object_t) 0)) && | |||
1028 | (vm_map_pmap_enter_enable) && | |||
1029 | (!anywhere) && | |||
1030 | (!needs_copy) && | |||
1031 | (size < (128*1024))) { | |||
1032 | vm_map_pmap_enter(map, start, end, | |||
1033 | object, offset, cur_protection); | |||
1034 | } | |||
1035 | ||||
1036 | return(result); | |||
1037 | /**/ } | |||
1038 | ||||
1039 | BailOut: ; | |||
1040 | ||||
1041 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1042 | return(result); | |||
1043 | ||||
1044 | #undef RETURN | |||
1045 | } | |||
1046 | ||||
1047 | /* | |||
1048 | * vm_map_clip_start: [ internal use only ] | |||
1049 | * | |||
1050 | * Asserts that the given entry begins at or after | |||
1051 | * the specified address; if necessary, | |||
1052 | * it splits the entry into two. | |||
1053 | */ | |||
1054 | void _vm_map_clip_start(); | |||
1055 | #define vm_map_clip_start(map, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(startaddr)); }) \ | |||
1056 | MACRO_BEGIN({ \ | |||
1057 | if ((startaddr) > (entry)->vme_startlinks.start) \ | |||
1058 | _vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \ | |||
1059 | MACRO_END}) | |||
1060 | ||||
1061 | void _vm_map_copy_clip_start(); | |||
1062 | #define vm_map_copy_clip_start(copy, entry, startaddr)({ if ((startaddr) > (entry)->links.start) _vm_map_clip_start (&(copy)->c_u.hdr,(entry),(startaddr)); }) \ | |||
1063 | MACRO_BEGIN({ \ | |||
1064 | if ((startaddr) > (entry)->vme_startlinks.start) \ | |||
1065 | _vm_map_clip_start(&(copy)->cpy_hdrc_u.hdr,(entry),(startaddr)); \ | |||
1066 | MACRO_END}) | |||
1067 | ||||
1068 | /* | |||
1069 | * This routine is called only when it is known that | |||
1070 | * the entry must be split. | |||
1071 | */ | |||
1072 | void _vm_map_clip_start(map_header, entry, start) | |||
1073 | register struct vm_map_header *map_header; | |||
1074 | register vm_map_entry_t entry; | |||
1075 | register vm_offset_t start; | |||
1076 | { | |||
1077 | register vm_map_entry_t new_entry; | |||
1078 | ||||
1079 | /* | |||
1080 | * Split off the front portion -- | |||
1081 | * note that we must insert the new | |||
1082 | * entry BEFORE this one, so that | |||
1083 | * this entry has the specified starting | |||
1084 | * address. | |||
1085 | */ | |||
1086 | ||||
1087 | new_entry = _vm_map_entry_create(map_header); | |||
1088 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); | |||
1089 | ||||
1090 | new_entry->vme_endlinks.end = start; | |||
1091 | entry->offset += (start - entry->vme_startlinks.start); | |||
1092 | entry->vme_startlinks.start = start; | |||
1093 | ||||
1094 | _vm_map_entry_link(map_header, entry->vme_prev, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry->links.prev); (new_entry)->links.next = (entry-> links.prev)->links.next; (new_entry)->links.prev->links .next = (new_entry)->links.next->links.prev = (new_entry ); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header )->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 1094); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(map_header)->tree, ___prev, ___index, &(new_entry )->tree_node); }); }); | |||
1095 | ||||
1096 | if (entry->is_sub_map) | |||
1097 | vm_map_reference(new_entry->object.sub_map); | |||
1098 | else | |||
1099 | vm_object_reference(new_entry->object.vm_object); | |||
1100 | } | |||
1101 | ||||
1102 | /* | |||
1103 | * vm_map_clip_end: [ internal use only ] | |||
1104 | * | |||
1105 | * Asserts that the given entry ends at or before | |||
1106 | * the specified address; if necessary, | |||
1107 | * it splits the entry into two. | |||
1108 | */ | |||
1109 | void _vm_map_clip_end(); | |||
1110 | #define vm_map_clip_end(map, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(map)->hdr,(entry),(endaddr)); }) \ | |||
1111 | MACRO_BEGIN({ \ | |||
1112 | if ((endaddr) < (entry)->vme_endlinks.end) \ | |||
1113 | _vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \ | |||
1114 | MACRO_END}) | |||
1115 | ||||
1116 | void _vm_map_copy_clip_end(); | |||
1117 | #define vm_map_copy_clip_end(copy, entry, endaddr)({ if ((endaddr) < (entry)->links.end) _vm_map_clip_end (&(copy)->c_u.hdr,(entry),(endaddr)); }) \ | |||
1118 | MACRO_BEGIN({ \ | |||
1119 | if ((endaddr) < (entry)->vme_endlinks.end) \ | |||
1120 | _vm_map_clip_end(&(copy)->cpy_hdrc_u.hdr,(entry),(endaddr)); \ | |||
1121 | MACRO_END}) | |||
1122 | ||||
1123 | /* | |||
1124 | * This routine is called only when it is known that | |||
1125 | * the entry must be split. | |||
1126 | */ | |||
1127 | void _vm_map_clip_end(map_header, entry, end) | |||
1128 | register struct vm_map_header *map_header; | |||
1129 | register vm_map_entry_t entry; | |||
1130 | register vm_offset_t end; | |||
1131 | { | |||
1132 | register vm_map_entry_t new_entry; | |||
1133 | ||||
1134 | /* | |||
1135 | * Create a new entry and insert it | |||
1136 | * AFTER the specified entry | |||
1137 | */ | |||
1138 | ||||
1139 | new_entry = _vm_map_entry_create(map_header); | |||
1140 | vm_map_entry_copy_full(new_entry, entry)(*(new_entry) = *(entry)); | |||
1141 | ||||
1142 | new_entry->vme_startlinks.start = entry->vme_endlinks.end = end; | |||
1143 | new_entry->offset += (end - entry->vme_startlinks.start); | |||
1144 | ||||
1145 | _vm_map_entry_link(map_header, entry, new_entry)({ (map_header)->nentries++; (new_entry)->links.prev = ( entry); (new_entry)->links.next = (entry)->links.next; ( new_entry)->links.prev->links.next = (new_entry)->links .next->links.prev = (new_entry); ({ struct rbtree_node *___cur , *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map_header)->tree)->root; while ( ___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(& (new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert ("___diff != 0", "../vm/vm_map.c", 1145); }); ___prev = ___cur ; ___index = rbtree_d2i(___diff); ___cur = ___cur->children [___index]; } rbtree_insert_rebalance(&(map_header)->tree , ___prev, ___index, &(new_entry)->tree_node); }); }); | |||
1146 | ||||
1147 | if (entry->is_sub_map) | |||
1148 | vm_map_reference(new_entry->object.sub_map); | |||
1149 | else | |||
1150 | vm_object_reference(new_entry->object.vm_object); | |||
1151 | } | |||
1152 | ||||
1153 | /* | |||
1154 | * VM_MAP_RANGE_CHECK: [ internal use only ] | |||
1155 | * | |||
1156 | * Asserts that the starting and ending region | |||
1157 | * addresses fall within the valid range of the map. | |||
1158 | */ | |||
1159 | #define VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; } \ | |||
1160 | { \ | |||
1161 | if (start < vm_map_min(map)((map)->hdr.links.start)) \ | |||
1162 | start = vm_map_min(map)((map)->hdr.links.start); \ | |||
1163 | if (end > vm_map_max(map)((map)->hdr.links.end)) \ | |||
1164 | end = vm_map_max(map)((map)->hdr.links.end); \ | |||
1165 | if (start > end) \ | |||
1166 | start = end; \ | |||
1167 | } | |||
1168 | ||||
1169 | /* | |||
1170 | * vm_map_submap: [ kernel use only ] | |||
1171 | * | |||
1172 | * Mark the given range as handled by a subordinate map. | |||
1173 | * | |||
1174 | * This range must have been created with vm_map_find using | |||
1175 | * the vm_submap_object, and no other operations may have been | |||
1176 | * performed on this range prior to calling vm_map_submap. | |||
1177 | * | |||
1178 | * Only a limited number of operations can be performed | |||
1179 | * within this rage after calling vm_map_submap: | |||
1180 | * vm_fault | |||
1181 | * [Don't try vm_map_copyin!] | |||
1182 | * | |||
1183 | * To remove a submapping, one must first remove the | |||
1184 | * range from the superior map, and then destroy the | |||
1185 | * submap (if desired). [Better yet, don't try it.] | |||
1186 | */ | |||
1187 | kern_return_t vm_map_submap(map, start, end, submap) | |||
1188 | register vm_map_t map; | |||
1189 | register vm_offset_t start; | |||
1190 | register vm_offset_t end; | |||
1191 | vm_map_t submap; | |||
1192 | { | |||
1193 | vm_map_entry_t entry; | |||
1194 | register kern_return_t result = KERN_INVALID_ARGUMENT4; | |||
1195 | register vm_object_t object; | |||
1196 | ||||
1197 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1198 | ||||
1199 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; | |||
1200 | ||||
1201 | if (vm_map_lookup_entry(map, start, &entry)) { | |||
1202 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1203 | } | |||
1204 | else | |||
1205 | entry = entry->vme_nextlinks.next; | |||
1206 | ||||
1207 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1208 | ||||
1209 | if ((entry->vme_startlinks.start == start) && (entry->vme_endlinks.end == end) && | |||
1210 | (!entry->is_sub_map) && | |||
1211 | ((object = entry->object.vm_object) == vm_submap_object) && | |||
1212 | (object->resident_page_count == 0) && | |||
1213 | (object->copy == VM_OBJECT_NULL((vm_object_t) 0)) && | |||
1214 | (object->shadow == VM_OBJECT_NULL((vm_object_t) 0)) && | |||
1215 | (!object->pager_created)) { | |||
1216 | entry->object.vm_object = VM_OBJECT_NULL((vm_object_t) 0); | |||
1217 | vm_object_deallocate(object); | |||
1218 | entry->is_sub_map = TRUE((boolean_t) 1); | |||
1219 | vm_map_reference(entry->object.sub_map = submap); | |||
1220 | result = KERN_SUCCESS0; | |||
1221 | } | |||
1222 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1223 | ||||
1224 | return(result); | |||
1225 | } | |||
1226 | ||||
1227 | /* | |||
1228 | * vm_map_protect: | |||
1229 | * | |||
1230 | * Sets the protection of the specified address | |||
1231 | * region in the target map. If "set_max" is | |||
1232 | * specified, the maximum protection is to be set; | |||
1233 | * otherwise, only the current protection is affected. | |||
1234 | */ | |||
1235 | kern_return_t vm_map_protect(map, start, end, new_prot, set_max) | |||
1236 | register vm_map_t map; | |||
1237 | register vm_offset_t start; | |||
1238 | register vm_offset_t end; | |||
1239 | register vm_prot_t new_prot; | |||
1240 | register boolean_t set_max; | |||
1241 | { | |||
1242 | register vm_map_entry_t current; | |||
1243 | vm_map_entry_t entry; | |||
1244 | ||||
1245 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1246 | ||||
1247 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; | |||
1248 | ||||
1249 | if (vm_map_lookup_entry(map, start, &entry)) { | |||
1250 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1251 | } | |||
1252 | else | |||
1253 | entry = entry->vme_nextlinks.next; | |||
1254 | ||||
1255 | /* | |||
1256 | * Make a first pass to check for protection | |||
1257 | * violations. | |||
1258 | */ | |||
1259 | ||||
1260 | current = entry; | |||
1261 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1262 | (current->vme_startlinks.start < end)) { | |||
1263 | ||||
1264 | if (current->is_sub_map) { | |||
1265 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1266 | return(KERN_INVALID_ARGUMENT4); | |||
1267 | } | |||
1268 | if ((new_prot & (VM_PROT_NOTIFY((vm_prot_t) 0x10) | current->max_protection)) | |||
1269 | != new_prot) { | |||
1270 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1271 | return(KERN_PROTECTION_FAILURE2); | |||
1272 | } | |||
1273 | ||||
1274 | current = current->vme_nextlinks.next; | |||
1275 | } | |||
1276 | ||||
1277 | /* | |||
1278 | * Go back and fix up protections. | |||
1279 | * [Note that clipping is not necessary the second time.] | |||
1280 | */ | |||
1281 | ||||
1282 | current = entry; | |||
1283 | ||||
1284 | while ((current != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1285 | (current->vme_startlinks.start < end)) { | |||
1286 | ||||
1287 | vm_prot_t old_prot; | |||
1288 | ||||
1289 | vm_map_clip_end(map, current, end)({ if ((end) < (current)->links.end) _vm_map_clip_end(& (map)->hdr,(current),(end)); }); | |||
1290 | ||||
1291 | old_prot = current->protection; | |||
1292 | if (set_max) | |||
1293 | current->protection = | |||
1294 | (current->max_protection = new_prot) & | |||
1295 | old_prot; | |||
1296 | else | |||
1297 | current->protection = new_prot; | |||
1298 | ||||
1299 | /* | |||
1300 | * Update physical map if necessary. | |||
1301 | */ | |||
1302 | ||||
1303 | if (current->protection != old_prot) { | |||
1304 | pmap_protect(map->pmap, current->vme_startlinks.start, | |||
1305 | current->vme_endlinks.end, | |||
1306 | current->protection); | |||
1307 | } | |||
1308 | current = current->vme_nextlinks.next; | |||
1309 | } | |||
1310 | ||||
1311 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1312 | return(KERN_SUCCESS0); | |||
1313 | } | |||
1314 | ||||
1315 | /* | |||
1316 | * vm_map_inherit: | |||
1317 | * | |||
1318 | * Sets the inheritance of the specified address | |||
1319 | * range in the target map. Inheritance | |||
1320 | * affects how the map will be shared with | |||
1321 | * child maps at the time of vm_map_fork. | |||
1322 | */ | |||
1323 | kern_return_t vm_map_inherit(map, start, end, new_inheritance) | |||
1324 | register vm_map_t map; | |||
1325 | register vm_offset_t start; | |||
1326 | register vm_offset_t end; | |||
1327 | register vm_inherit_t new_inheritance; | |||
1328 | { | |||
1329 | register vm_map_entry_t entry; | |||
1330 | vm_map_entry_t temp_entry; | |||
1331 | ||||
1332 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1333 | ||||
1334 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; | |||
1335 | ||||
1336 | if (vm_map_lookup_entry(map, start, &temp_entry)) { | |||
1337 | entry = temp_entry; | |||
1338 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1339 | } | |||
1340 | else | |||
1341 | entry = temp_entry->vme_nextlinks.next; | |||
1342 | ||||
1343 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { | |||
1344 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1345 | ||||
1346 | entry->inheritance = new_inheritance; | |||
1347 | ||||
1348 | entry = entry->vme_nextlinks.next; | |||
1349 | } | |||
1350 | ||||
1351 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1352 | return(KERN_SUCCESS0); | |||
1353 | } | |||
1354 | ||||
1355 | /* | |||
1356 | * vm_map_pageable_common: | |||
1357 | * | |||
1358 | * Sets the pageability of the specified address | |||
1359 | * range in the target map. Regions specified | |||
1360 | * as not pageable require locked-down physical | |||
1361 | * memory and physical page maps. access_type indicates | |||
1362 | * types of accesses that must not generate page faults. | |||
1363 | * This is checked against protection of memory being locked-down. | |||
1364 | * access_type of VM_PROT_NONE makes memory pageable. | |||
1365 | * | |||
1366 | * The map must not be locked, but a reference | |||
1367 | * must remain to the map throughout the call. | |||
1368 | * | |||
1369 | * Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable, | |||
1370 | * or vm_map_pageable_user); don't call vm_map_pageable directly. | |||
1371 | */ | |||
1372 | kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire) | |||
1373 | register vm_map_t map; | |||
1374 | register vm_offset_t start; | |||
1375 | register vm_offset_t end; | |||
1376 | register vm_prot_t access_type; | |||
1377 | boolean_t user_wire; | |||
1378 | { | |||
1379 | register vm_map_entry_t entry; | |||
1380 | vm_map_entry_t start_entry; | |||
1381 | ||||
1382 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1383 | ||||
1384 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; | |||
1385 | ||||
1386 | if (vm_map_lookup_entry(map, start, &start_entry)) { | |||
1387 | entry = start_entry; | |||
1388 | /* | |||
1389 | * vm_map_clip_start will be done later. | |||
1390 | */ | |||
1391 | } | |||
1392 | else { | |||
1393 | /* | |||
1394 | * Start address is not in map; this is fatal. | |||
1395 | */ | |||
1396 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1397 | return(KERN_FAILURE5); | |||
1398 | } | |||
1399 | ||||
1400 | /* | |||
1401 | * Actions are rather different for wiring and unwiring, | |||
1402 | * so we have two separate cases. | |||
1403 | */ | |||
1404 | ||||
1405 | if (access_type == VM_PROT_NONE((vm_prot_t) 0x00)) { | |||
1406 | ||||
1407 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1408 | ||||
1409 | /* | |||
1410 | * Unwiring. First ensure that the range to be | |||
1411 | * unwired is really wired down. | |||
1412 | */ | |||
1413 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1414 | (entry->vme_startlinks.start < end)) { | |||
1415 | ||||
1416 | if ((entry->wired_count == 0) || | |||
1417 | ((entry->vme_endlinks.end < end) && | |||
1418 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || | |||
1419 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || | |||
1420 | (user_wire && (entry->user_wired_count == 0))) { | |||
1421 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1422 | return(KERN_INVALID_ARGUMENT4); | |||
1423 | } | |||
1424 | entry = entry->vme_nextlinks.next; | |||
1425 | } | |||
1426 | ||||
1427 | /* | |||
1428 | * Now decrement the wiring count for each region. | |||
1429 | * If a region becomes completely unwired, | |||
1430 | * unwire its physical pages and mappings. | |||
1431 | */ | |||
1432 | entry = start_entry; | |||
1433 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1434 | (entry->vme_startlinks.start < end)) { | |||
1435 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1436 | ||||
1437 | if (user_wire) { | |||
1438 | if (--(entry->user_wired_count) == 0) | |||
1439 | entry->wired_count--; | |||
1440 | } | |||
1441 | else { | |||
1442 | entry->wired_count--; | |||
1443 | } | |||
1444 | ||||
1445 | if (entry->wired_count == 0) | |||
1446 | vm_fault_unwire(map, entry); | |||
1447 | ||||
1448 | entry = entry->vme_nextlinks.next; | |||
1449 | } | |||
1450 | } | |||
1451 | ||||
1452 | else { | |||
1453 | /* | |||
1454 | * Wiring. We must do this in two passes: | |||
1455 | * | |||
1456 | * 1. Holding the write lock, we create any shadow | |||
1457 | * or zero-fill objects that need to be created. | |||
1458 | * Then we clip each map entry to the region to be | |||
1459 | * wired and increment its wiring count. We | |||
1460 | * create objects before clipping the map entries | |||
1461 | * to avoid object proliferation. | |||
1462 | * | |||
1463 | * 2. We downgrade to a read lock, and call | |||
1464 | * vm_fault_wire to fault in the pages for any | |||
1465 | * newly wired area (wired_count is 1). | |||
1466 | * | |||
1467 | * Downgrading to a read lock for vm_fault_wire avoids | |||
1468 | * a possible deadlock with another thread that may have | |||
1469 | * faulted on one of the pages to be wired (it would mark | |||
1470 | * the page busy, blocking us, then in turn block on the | |||
1471 | * map lock that we hold). Because of problems in the | |||
1472 | * recursive lock package, we cannot upgrade to a write | |||
1473 | * lock in vm_map_lookup. Thus, any actions that require | |||
1474 | * the write lock must be done beforehand. Because we | |||
1475 | * keep the read lock on the map, the copy-on-write | |||
1476 | * status of the entries we modify here cannot change. | |||
1477 | */ | |||
1478 | ||||
1479 | /* | |||
1480 | * Pass 1. | |||
1481 | */ | |||
1482 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1483 | (entry->vme_startlinks.start < end)) { | |||
1484 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1485 | ||||
1486 | if (entry->wired_count == 0) { | |||
1487 | ||||
1488 | /* | |||
1489 | * Perform actions of vm_map_lookup that need | |||
1490 | * the write lock on the map: create a shadow | |||
1491 | * object for a copy-on-write region, or an | |||
1492 | * object for a zero-fill region. | |||
1493 | */ | |||
1494 | if (entry->needs_copy && | |||
1495 | ((entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02)) != 0)) { | |||
1496 | ||||
1497 | vm_object_shadow(&entry->object.vm_object, | |||
1498 | &entry->offset, | |||
1499 | (vm_size_t)(entry->vme_endlinks.end | |||
1500 | - entry->vme_startlinks.start)); | |||
1501 | entry->needs_copy = FALSE((boolean_t) 0); | |||
1502 | } | |||
1503 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||
1504 | entry->object.vm_object = | |||
1505 | vm_object_allocate( | |||
1506 | (vm_size_t)(entry->vme_endlinks.end | |||
1507 | - entry->vme_startlinks.start)); | |||
1508 | entry->offset = (vm_offset_t)0; | |||
1509 | } | |||
1510 | } | |||
1511 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1512 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1513 | ||||
1514 | if (user_wire) { | |||
1515 | if ((entry->user_wired_count)++ == 0) | |||
1516 | entry->wired_count++; | |||
1517 | } | |||
1518 | else { | |||
1519 | entry->wired_count++; | |||
1520 | } | |||
1521 | ||||
1522 | /* | |||
1523 | * Check for holes and protection mismatch. | |||
1524 | * Holes: Next entry should be contiguous unless | |||
1525 | * this is the end of the region. | |||
1526 | * Protection: Access requested must be allowed. | |||
1527 | */ | |||
1528 | if (((entry->vme_endlinks.end < end) && | |||
1529 | ((entry->vme_nextlinks.next == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || | |||
1530 | (entry->vme_nextlinks.next->vme_startlinks.start > entry->vme_endlinks.end))) || | |||
1531 | ((entry->protection & access_type) != access_type)) { | |||
1532 | /* | |||
1533 | * Found a hole or protection problem. | |||
1534 | * Object creation actions | |||
1535 | * do not need to be undone, but the | |||
1536 | * wired counts need to be restored. | |||
1537 | */ | |||
1538 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
1539 | (entry->vme_endlinks.end > start)) { | |||
1540 | if (user_wire) { | |||
1541 | if (--(entry->user_wired_count) == 0) | |||
1542 | entry->wired_count--; | |||
1543 | } | |||
1544 | else { | |||
1545 | entry->wired_count--; | |||
1546 | } | |||
1547 | ||||
1548 | entry = entry->vme_prevlinks.prev; | |||
1549 | } | |||
1550 | ||||
1551 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1552 | return(KERN_FAILURE5); | |||
1553 | } | |||
1554 | entry = entry->vme_nextlinks.next; | |||
1555 | } | |||
1556 | ||||
1557 | /* | |||
1558 | * Pass 2. | |||
1559 | */ | |||
1560 | ||||
1561 | /* | |||
1562 | * HACK HACK HACK HACK | |||
1563 | * | |||
1564 | * If we are wiring in the kernel map or a submap of it, | |||
1565 | * unlock the map to avoid deadlocks. We trust that the | |||
1566 | * kernel threads are well-behaved, and therefore will | |||
1567 | * not do anything destructive to this region of the map | |||
1568 | * while we have it unlocked. We cannot trust user threads | |||
1569 | * to do the same. | |||
1570 | * | |||
1571 | * HACK HACK HACK HACK | |||
1572 | */ | |||
1573 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { | |||
1574 | vm_map_unlock(map)lock_done(&(map)->lock); /* trust me ... */ | |||
1575 | } | |||
1576 | else { | |||
1577 | vm_map_lock_set_recursive(map)lock_set_recursive(&(map)->lock); | |||
1578 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); | |||
1579 | } | |||
1580 | ||||
1581 | entry = start_entry; | |||
1582 | while (entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links) && | |||
1583 | entry->vme_startlinks.start < end) { | |||
1584 | /* | |||
1585 | * Wiring cases: | |||
1586 | * Kernel: wired == 1 && user_wired == 0 | |||
1587 | * User: wired == 1 && user_wired == 1 | |||
1588 | * | |||
1589 | * Don't need to wire if either is > 1. wired = 0 && | |||
1590 | * user_wired == 1 can't happen. | |||
1591 | */ | |||
1592 | ||||
1593 | /* | |||
1594 | * XXX This assumes that the faults always succeed. | |||
1595 | */ | |||
1596 | if ((entry->wired_count == 1) && | |||
1597 | (entry->user_wired_count <= 1)) { | |||
1598 | vm_fault_wire(map, entry); | |||
1599 | } | |||
1600 | entry = entry->vme_nextlinks.next; | |||
1601 | } | |||
1602 | ||||
1603 | if (vm_map_pmap(map)((map)->pmap) == kernel_pmap) { | |||
1604 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1605 | } | |||
1606 | else { | |||
1607 | vm_map_lock_clear_recursive(map)lock_clear_recursive(&(map)->lock); | |||
1608 | } | |||
1609 | } | |||
1610 | ||||
1611 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1612 | ||||
1613 | return(KERN_SUCCESS0); | |||
1614 | } | |||
1615 | ||||
1616 | /* | |||
1617 | * vm_map_entry_delete: [ internal use only ] | |||
1618 | * | |||
1619 | * Deallocate the given entry from the target map. | |||
1620 | */ | |||
1621 | void vm_map_entry_delete(map, entry) | |||
1622 | register vm_map_t map; | |||
1623 | register vm_map_entry_t entry; | |||
1624 | { | |||
1625 | register vm_offset_t s, e; | |||
1626 | register vm_object_t object; | |||
1627 | extern vm_object_t kernel_object; | |||
1628 | ||||
1629 | s = entry->vme_startlinks.start; | |||
1630 | e = entry->vme_endlinks.end; | |||
1631 | ||||
1632 | /*Check if projected buffer*/ | |||
1633 | if (map != kernel_map && entry->projected_on != 0) { | |||
1634 | /*Check if projected kernel entry is persistent; | |||
1635 | may only manipulate directly if it is*/ | |||
1636 | if (entry->projected_on->projected_on == 0) | |||
1637 | entry->wired_count = 0; /*Avoid unwire fault*/ | |||
1638 | else | |||
1639 | return; | |||
1640 | } | |||
1641 | ||||
1642 | /* | |||
1643 | * Get the object. Null objects cannot have pmap entries. | |||
1644 | */ | |||
1645 | ||||
1646 | if ((object = entry->object.vm_object) != VM_OBJECT_NULL((vm_object_t) 0)) { | |||
1647 | ||||
1648 | /* | |||
1649 | * Unwire before removing addresses from the pmap; | |||
1650 | * otherwise, unwiring will put the entries back in | |||
1651 | * the pmap. | |||
1652 | */ | |||
1653 | ||||
1654 | if (entry->wired_count != 0) { | |||
1655 | vm_fault_unwire(map, entry); | |||
1656 | entry->wired_count = 0; | |||
1657 | entry->user_wired_count = 0; | |||
1658 | } | |||
1659 | ||||
1660 | /* | |||
1661 | * If the object is shared, we must remove | |||
1662 | * *all* references to this data, since we can't | |||
1663 | * find all of the physical maps which are sharing | |||
1664 | * it. | |||
1665 | */ | |||
1666 | ||||
1667 | if (object == kernel_object) { | |||
1668 | vm_object_lock(object); | |||
1669 | vm_object_page_remove(object, entry->offset, | |||
1670 | entry->offset + (e - s)); | |||
1671 | vm_object_unlock(object); | |||
1672 | } else if (entry->is_shared) { | |||
1673 | vm_object_pmap_remove(object, | |||
1674 | entry->offset, | |||
1675 | entry->offset + (e - s)); | |||
1676 | } | |||
1677 | else { | |||
1678 | pmap_remove(map->pmap, s, e); | |||
1679 | } | |||
1680 | } | |||
1681 | ||||
1682 | /* | |||
1683 | * Deallocate the object only after removing all | |||
1684 | * pmap entries pointing to its pages. | |||
1685 | */ | |||
1686 | ||||
1687 | if (entry->is_sub_map) | |||
1688 | vm_map_deallocate(entry->object.sub_map); | |||
1689 | else | |||
1690 | vm_object_deallocate(entry->object.vm_object); | |||
1691 | ||||
1692 | vm_map_entry_unlink(map, entry)({ (&(map)->hdr)->nentries--; (entry)->links.next ->links.prev = (entry)->links.prev; (entry)->links.prev ->links.next = (entry)->links.next; rbtree_remove(& (&(map)->hdr)->tree, &(entry)->tree_node); } ); | |||
1693 | map->size -= e - s; | |||
1694 | ||||
1695 | vm_map_entry_dispose(map, entry)_vm_map_entry_dispose(&(map)->hdr, (entry)); | |||
1696 | } | |||
1697 | ||||
1698 | /* | |||
1699 | * vm_map_delete: [ internal use only ] | |||
1700 | * | |||
1701 | * Deallocates the given address range from the target | |||
1702 | * map. | |||
1703 | */ | |||
1704 | ||||
1705 | kern_return_t vm_map_delete(map, start, end) | |||
1706 | register vm_map_t map; | |||
1707 | register vm_offset_t start; | |||
1708 | register vm_offset_t end; | |||
1709 | { | |||
1710 | vm_map_entry_t entry; | |||
1711 | vm_map_entry_t first_entry; | |||
1712 | ||||
1713 | /* | |||
1714 | * Find the start of the region, and clip it | |||
1715 | */ | |||
1716 | ||||
1717 | if (!vm_map_lookup_entry(map, start, &first_entry)) | |||
1718 | entry = first_entry->vme_nextlinks.next; | |||
1719 | else { | |||
1720 | entry = first_entry; | |||
1721 | vm_map_clip_start(map, entry, start)({ if ((start) > (entry)->links.start) _vm_map_clip_start (&(map)->hdr,(entry),(start)); }); | |||
1722 | ||||
1723 | /* | |||
1724 | * Fix the lookup hint now, rather than each | |||
1725 | * time though the loop. | |||
1726 | */ | |||
1727 | ||||
1728 | SAVE_HINT(map, entry->vme_prev); (map)->hint = (entry->links.prev); ;; | |||
1729 | } | |||
1730 | ||||
1731 | /* | |||
1732 | * Save the free space hint | |||
1733 | */ | |||
1734 | ||||
1735 | if (map->first_free->vme_startlinks.start >= start) | |||
1736 | map->first_free = entry->vme_prevlinks.prev; | |||
1737 | ||||
1738 | /* | |||
1739 | * Step through all entries in this region | |||
1740 | */ | |||
1741 | ||||
1742 | while ((entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && (entry->vme_startlinks.start < end)) { | |||
1743 | vm_map_entry_t next; | |||
1744 | ||||
1745 | vm_map_clip_end(map, entry, end)({ if ((end) < (entry)->links.end) _vm_map_clip_end(& (map)->hdr,(entry),(end)); }); | |||
1746 | ||||
1747 | /* | |||
1748 | * If the entry is in transition, we must wait | |||
1749 | * for it to exit that state. It could be clipped | |||
1750 | * while we leave the map unlocked. | |||
1751 | */ | |||
1752 | if(entry->in_transition) { | |||
1753 | /* | |||
1754 | * Say that we are waiting, and wait for entry. | |||
1755 | */ | |||
1756 | entry->needs_wakeup = TRUE((boolean_t) 1); | |||
1757 | vm_map_entry_wait(map, FALSE)({ assert_wait((event_t)&(map)->hdr, ((boolean_t) 0)); lock_done(&(map)->lock); thread_block((void (*)()) 0) ; }); | |||
1758 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1759 | ||||
1760 | /* | |||
1761 | * The entry could have been clipped or it | |||
1762 | * may not exist anymore. look it up again. | |||
1763 | */ | |||
1764 | if(!vm_map_lookup_entry(map, start, &entry)) { | |||
1765 | entry = entry->vme_nextlinks.next; | |||
1766 | } | |||
1767 | continue; | |||
1768 | } | |||
1769 | ||||
1770 | next = entry->vme_nextlinks.next; | |||
1771 | ||||
1772 | vm_map_entry_delete(map, entry); | |||
1773 | entry = next; | |||
1774 | } | |||
1775 | ||||
1776 | if (map->wait_for_space) | |||
1777 | thread_wakeup((event_t) map)thread_wakeup_prim(((event_t) map), ((boolean_t) 0), 0); | |||
1778 | ||||
1779 | return(KERN_SUCCESS0); | |||
1780 | } | |||
1781 | ||||
1782 | /* | |||
1783 | * vm_map_remove: | |||
1784 | * | |||
1785 | * Remove the given address range from the target map. | |||
1786 | * This is the exported form of vm_map_delete. | |||
1787 | */ | |||
1788 | kern_return_t vm_map_remove(map, start, end) | |||
1789 | register vm_map_t map; | |||
1790 | register vm_offset_t start; | |||
1791 | register vm_offset_t end; | |||
1792 | { | |||
1793 | register kern_return_t result; | |||
1794 | ||||
1795 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
1796 | VM_MAP_RANGE_CHECK(map, start, end){ if (start < ((map)->hdr.links.start)) start = ((map)-> hdr.links.start); if (end > ((map)->hdr.links.end)) end = ((map)->hdr.links.end); if (start > end) start = end ; }; | |||
1797 | result = vm_map_delete(map, start, end); | |||
1798 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
1799 | ||||
1800 | return(result); | |||
1801 | } | |||
1802 | ||||
1803 | ||||
1804 | /* | |||
1805 | * vm_map_copy_steal_pages: | |||
1806 | * | |||
1807 | * Steal all the pages from a vm_map_copy page_list by copying ones | |||
1808 | * that have not already been stolen. | |||
1809 | */ | |||
1810 | void | |||
1811 | vm_map_copy_steal_pages(copy) | |||
1812 | vm_map_copy_t copy; | |||
1813 | { | |||
1814 | register vm_page_t m, new_m; | |||
1815 | register int i; | |||
1816 | vm_object_t object; | |||
1817 | ||||
1818 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { | |||
1819 | ||||
1820 | /* | |||
1821 | * If the page is not tabled, then it's already stolen. | |||
1822 | */ | |||
1823 | m = copy->cpy_page_listc_u.c_p.page_list[i]; | |||
1824 | if (!m->tabled) | |||
1825 | continue; | |||
1826 | ||||
1827 | /* | |||
1828 | * Page was not stolen, get a new | |||
1829 | * one and do the copy now. | |||
1830 | */ | |||
1831 | while ((new_m = vm_page_grab(FALSE((boolean_t) 0))) == VM_PAGE_NULL((vm_page_t) 0)) { | |||
1832 | VM_PAGE_WAIT((void(*)()) 0)vm_page_wait((void(*)()) 0); | |||
1833 | } | |||
1834 | ||||
1835 | vm_page_copy(m, new_m); | |||
1836 | ||||
1837 | object = m->object; | |||
1838 | vm_object_lock(object); | |||
1839 | vm_page_lock_queues(); | |||
1840 | if (!m->active && !m->inactive) | |||
1841 | vm_page_activate(m); | |||
1842 | vm_page_unlock_queues(); | |||
1843 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||
1844 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1844); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||
1845 | vm_object_unlock(object); | |||
1846 | ||||
1847 | copy->cpy_page_listc_u.c_p.page_list[i] = new_m; | |||
1848 | } | |||
1849 | } | |||
1850 | ||||
1851 | /* | |||
1852 | * vm_map_copy_page_discard: | |||
1853 | * | |||
1854 | * Get rid of the pages in a page_list copy. If the pages are | |||
1855 | * stolen, they are freed. If the pages are not stolen, they | |||
1856 | * are unbusied, and associated state is cleaned up. | |||
1857 | */ | |||
1858 | void vm_map_copy_page_discard(copy) | |||
1859 | vm_map_copy_t copy; | |||
1860 | { | |||
1861 | while (copy->cpy_npagesc_u.c_p.npages > 0) { | |||
1862 | vm_page_t m; | |||
1863 | ||||
1864 | if((m = copy->cpy_page_listc_u.c_p.page_list[--(copy->cpy_npagesc_u.c_p.npages)]) != | |||
1865 | VM_PAGE_NULL((vm_page_t) 0)) { | |||
1866 | ||||
1867 | /* | |||
1868 | * If it's not in the table, then it's | |||
1869 | * a stolen page that goes back | |||
1870 | * to the free list. Else it belongs | |||
1871 | * to some object, and we hold a | |||
1872 | * paging reference on that object. | |||
1873 | */ | |||
1874 | if (!m->tabled) { | |||
1875 | VM_PAGE_FREE(m)({ ; vm_page_free(m); ; }); | |||
1876 | } | |||
1877 | else { | |||
1878 | vm_object_t object; | |||
1879 | ||||
1880 | object = m->object; | |||
1881 | ||||
1882 | vm_object_lock(object); | |||
1883 | vm_page_lock_queues(); | |||
1884 | if (!m->active && !m->inactive) | |||
1885 | vm_page_activate(m); | |||
1886 | vm_page_unlock_queues(); | |||
1887 | ||||
1888 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||
1889 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 1889); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||
1890 | vm_object_unlock(object); | |||
1891 | } | |||
1892 | } | |||
1893 | } | |||
1894 | } | |||
1895 | ||||
1896 | /* | |||
1897 | * Routine: vm_map_copy_discard | |||
1898 | * | |||
1899 | * Description: | |||
1900 | * Dispose of a map copy object (returned by | |||
1901 | * vm_map_copyin). | |||
1902 | */ | |||
1903 | void | |||
1904 | vm_map_copy_discard(copy) | |||
1905 | vm_map_copy_t copy; | |||
1906 | { | |||
1907 | free_next_copy: | |||
1908 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) | |||
1909 | return; | |||
1910 | ||||
1911 | switch (copy->type) { | |||
1912 | case VM_MAP_COPY_ENTRY_LIST1: | |||
1913 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != | |||
1914 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { | |||
1915 | vm_map_entry_t entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); | |||
1916 | ||||
1917 | vm_map_copy_entry_unlink(copy, entry)({ (&(copy)->c_u.hdr)->nentries--; (entry)->links .next->links.prev = (entry)->links.prev; (entry)->links .prev->links.next = (entry)->links.next; rbtree_remove( &(&(copy)->c_u.hdr)->tree, &(entry)->tree_node ); }); | |||
1918 | vm_object_deallocate(entry->object.vm_object); | |||
1919 | vm_map_copy_entry_dispose(copy, entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (entry)); | |||
1920 | } | |||
1921 | break; | |||
1922 | case VM_MAP_COPY_OBJECT2: | |||
1923 | vm_object_deallocate(copy->cpy_objectc_u.c_o.object); | |||
1924 | break; | |||
1925 | case VM_MAP_COPY_PAGE_LIST3: | |||
1926 | ||||
1927 | /* | |||
1928 | * To clean this up, we have to unbusy all the pages | |||
1929 | * and release the paging references in their objects. | |||
1930 | */ | |||
1931 | if (copy->cpy_npagesc_u.c_p.npages > 0) | |||
1932 | vm_map_copy_page_discard(copy); | |||
1933 | ||||
1934 | /* | |||
1935 | * If there's a continuation, abort it. The | |||
1936 | * abort routine releases any storage. | |||
1937 | */ | |||
1938 | if (vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { | |||
1939 | ||||
1940 | /* | |||
1941 | * Special case: recognize | |||
1942 | * vm_map_copy_discard_cont and optimize | |||
1943 | * here to avoid tail recursion. | |||
1944 | */ | |||
1945 | if (copy->cpy_contc_u.c_p.cont == vm_map_copy_discard_cont) { | |||
1946 | register vm_map_copy_t new_copy; | |||
1947 | ||||
1948 | new_copy = (vm_map_copy_t) copy->cpy_cont_argsc_u.c_p.cont_args; | |||
1949 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); | |||
1950 | copy = new_copy; | |||
1951 | goto free_next_copy; | |||
1952 | } | |||
1953 | else { | |||
1954 | vm_map_copy_abort_cont(copy)({ vm_map_copy_page_discard(copy); (*((copy)->c_u.c_p.cont ))((copy)->c_u.c_p.cont_args, (vm_map_copy_t *) 0); (copy) ->c_u.c_p.cont = (kern_return_t (*)()) 0; (copy)->c_u.c_p .cont_args = (char *) 0; }); | |||
1955 | } | |||
1956 | } | |||
1957 | ||||
1958 | break; | |||
1959 | } | |||
1960 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); | |||
1961 | } | |||
1962 | ||||
1963 | /* | |||
1964 | * Routine: vm_map_copy_copy | |||
1965 | * | |||
1966 | * Description: | |||
1967 | * Move the information in a map copy object to | |||
1968 | * a new map copy object, leaving the old one | |||
1969 | * empty. | |||
1970 | * | |||
1971 | * This is used by kernel routines that need | |||
1972 | * to look at out-of-line data (in copyin form) | |||
1973 | * before deciding whether to return SUCCESS. | |||
1974 | * If the routine returns FAILURE, the original | |||
1975 | * copy object will be deallocated; therefore, | |||
1976 | * these routines must make a copy of the copy | |||
1977 | * object and leave the original empty so that | |||
1978 | * deallocation will not fail. | |||
1979 | */ | |||
1980 | vm_map_copy_t | |||
1981 | vm_map_copy_copy(copy) | |||
1982 | vm_map_copy_t copy; | |||
1983 | { | |||
1984 | vm_map_copy_t new_copy; | |||
1985 | ||||
1986 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) | |||
1987 | return VM_MAP_COPY_NULL((vm_map_copy_t) 0); | |||
1988 | ||||
1989 | /* | |||
1990 | * Allocate a new copy object, and copy the information | |||
1991 | * from the old one into it. | |||
1992 | */ | |||
1993 | ||||
1994 | new_copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); | |||
1995 | *new_copy = *copy; | |||
1996 | ||||
1997 | if (copy->type == VM_MAP_COPY_ENTRY_LIST1) { | |||
1998 | /* | |||
1999 | * The links in the entry chain must be | |||
2000 | * changed to point to the new copy object. | |||
2001 | */ | |||
2002 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)->vme_prevlinks.prev | |||
2003 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); | |||
2004 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)->vme_nextlinks.next | |||
2005 | = vm_map_copy_to_entry(new_copy)((struct vm_map_entry *) &(new_copy)->c_u.hdr.links); | |||
2006 | } | |||
2007 | ||||
2008 | /* | |||
2009 | * Change the old copy object into one that contains | |||
2010 | * nothing to be deallocated. | |||
2011 | */ | |||
2012 | copy->type = VM_MAP_COPY_OBJECT2; | |||
2013 | copy->cpy_objectc_u.c_o.object = VM_OBJECT_NULL((vm_object_t) 0); | |||
2014 | ||||
2015 | /* | |||
2016 | * Return the new object. | |||
2017 | */ | |||
2018 | return new_copy; | |||
2019 | } | |||
2020 | ||||
2021 | /* | |||
2022 | * Routine: vm_map_copy_discard_cont | |||
2023 | * | |||
2024 | * Description: | |||
2025 | * A version of vm_map_copy_discard that can be called | |||
2026 | * as a continuation from a vm_map_copy page list. | |||
2027 | */ | |||
2028 | kern_return_t vm_map_copy_discard_cont(cont_args, copy_result) | |||
2029 | vm_map_copyin_args_t cont_args; | |||
2030 | vm_map_copy_t *copy_result; /* OUT */ | |||
2031 | { | |||
2032 | vm_map_copy_discard((vm_map_copy_t) cont_args); | |||
2033 | if (copy_result != (vm_map_copy_t *)0) | |||
2034 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); | |||
2035 | return(KERN_SUCCESS0); | |||
2036 | } | |||
2037 | ||||
2038 | /* | |||
2039 | * Routine: vm_map_copy_overwrite | |||
2040 | * | |||
2041 | * Description: | |||
2042 | * Copy the memory described by the map copy | |||
2043 | * object (copy; returned by vm_map_copyin) onto | |||
2044 | * the specified destination region (dst_map, dst_addr). | |||
2045 | * The destination must be writeable. | |||
2046 | * | |||
2047 | * Unlike vm_map_copyout, this routine actually | |||
2048 | * writes over previously-mapped memory. If the | |||
2049 | * previous mapping was to a permanent (user-supplied) | |||
2050 | * memory object, it is preserved. | |||
2051 | * | |||
2052 | * The attributes (protection and inheritance) of the | |||
2053 | * destination region are preserved. | |||
2054 | * | |||
2055 | * If successful, consumes the copy object. | |||
2056 | * Otherwise, the caller is responsible for it. | |||
2057 | * | |||
2058 | * Implementation notes: | |||
2059 | * To overwrite temporary virtual memory, it is | |||
2060 | * sufficient to remove the previous mapping and insert | |||
2061 | * the new copy. This replacement is done either on | |||
2062 | * the whole region (if no permanent virtual memory | |||
2063 | * objects are embedded in the destination region) or | |||
2064 | * in individual map entries. | |||
2065 | * | |||
2066 | * To overwrite permanent virtual memory, it is | |||
2067 | * necessary to copy each page, as the external | |||
2068 | * memory management interface currently does not | |||
2069 | * provide any optimizations. | |||
2070 | * | |||
2071 | * Once a page of permanent memory has been overwritten, | |||
2072 | * it is impossible to interrupt this function; otherwise, | |||
2073 | * the call would be neither atomic nor location-independent. | |||
2074 | * The kernel-state portion of a user thread must be | |||
2075 | * interruptible. | |||
2076 | * | |||
2077 | * It may be expensive to forward all requests that might | |||
2078 | * overwrite permanent memory (vm_write, vm_copy) to | |||
2079 | * uninterruptible kernel threads. This routine may be | |||
2080 | * called by interruptible threads; however, success is | |||
2081 | * not guaranteed -- if the request cannot be performed | |||
2082 | * atomically and interruptibly, an error indication is | |||
2083 | * returned. | |||
2084 | */ | |||
2085 | kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible) | |||
2086 | vm_map_t dst_map; | |||
2087 | vm_offset_t dst_addr; | |||
2088 | vm_map_copy_t copy; | |||
2089 | boolean_t interruptible; | |||
2090 | { | |||
2091 | vm_size_t size; | |||
2092 | vm_offset_t start; | |||
2093 | vm_map_entry_t tmp_entry; | |||
2094 | vm_map_entry_t entry; | |||
2095 | ||||
2096 | boolean_t contains_permanent_objects = FALSE((boolean_t) 0); | |||
2097 | ||||
2098 | interruptible = FALSE((boolean_t) 0); /* XXX */ | |||
2099 | ||||
2100 | /* | |||
2101 | * Check for null copy object. | |||
2102 | */ | |||
2103 | ||||
2104 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) | |||
2105 | return(KERN_SUCCESS0); | |||
2106 | ||||
2107 | /* | |||
2108 | * Only works for entry lists at the moment. Will | |||
2109 | * support page lists LATER. | |||
2110 | */ | |||
2111 | ||||
2112 | assert(copy->type == VM_MAP_COPY_ENTRY_LIST)({ if (!(copy->type == 1)) Assert("copy->type == VM_MAP_COPY_ENTRY_LIST" , "../vm/vm_map.c", 2112); }); | |||
2113 | ||||
2114 | /* | |||
2115 | * Currently this routine only handles page-aligned | |||
2116 | * regions. Eventually, it should handle misalignments | |||
2117 | * by actually copying pages. | |||
2118 | */ | |||
2119 | ||||
2120 | if (!page_aligned(copy->offset)((((vm_offset_t) (copy->offset)) & ((1 << 12)-1) ) == 0) || | |||
2121 | !page_aligned(copy->size)((((vm_offset_t) (copy->size)) & ((1 << 12)-1)) == 0) || | |||
2122 | !page_aligned(dst_addr)((((vm_offset_t) (dst_addr)) & ((1 << 12)-1)) == 0)) | |||
2123 | return(KERN_INVALID_ARGUMENT4); | |||
2124 | ||||
2125 | size = copy->size; | |||
2126 | ||||
2127 | if (size == 0) { | |||
2128 | vm_map_copy_discard(copy); | |||
2129 | return(KERN_SUCCESS0); | |||
2130 | } | |||
2131 | ||||
2132 | /* | |||
2133 | * Verify that the destination is all writeable | |||
2134 | * initially. | |||
2135 | */ | |||
2136 | start_pass_1: | |||
2137 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
2138 | if (!vm_map_lookup_entry(dst_map, dst_addr, &tmp_entry)) { | |||
2139 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2140 | return(KERN_INVALID_ADDRESS1); | |||
2141 | } | |||
2142 | vm_map_clip_start(dst_map, tmp_entry, dst_addr)({ if ((dst_addr) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(dst_addr)); }); | |||
2143 | for (entry = tmp_entry;;) { | |||
2144 | vm_size_t sub_size = (entry->vme_endlinks.end - entry->vme_startlinks.start); | |||
2145 | vm_map_entry_t next = entry->vme_nextlinks.next; | |||
2146 | ||||
2147 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
2148 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2149 | return(KERN_PROTECTION_FAILURE2); | |||
2150 | } | |||
2151 | ||||
2152 | /* | |||
2153 | * If the entry is in transition, we must wait | |||
2154 | * for it to exit that state. Anything could happen | |||
2155 | * when we unlock the map, so start over. | |||
2156 | */ | |||
2157 | if (entry->in_transition) { | |||
2158 | ||||
2159 | /* | |||
2160 | * Say that we are waiting, and wait for entry. | |||
2161 | */ | |||
2162 | entry->needs_wakeup = TRUE((boolean_t) 1); | |||
2163 | vm_map_entry_wait(dst_map, FALSE)({ assert_wait((event_t)&(dst_map)->hdr, ((boolean_t) 0 )); lock_done(&(dst_map)->lock); thread_block((void (* )()) 0); }); | |||
2164 | ||||
2165 | goto start_pass_1; | |||
2166 | } | |||
2167 | ||||
2168 | if (size <= sub_size) | |||
2169 | break; | |||
2170 | ||||
2171 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || | |||
2172 | (next->vme_startlinks.start != entry->vme_endlinks.end)) { | |||
2173 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2174 | return(KERN_INVALID_ADDRESS1); | |||
2175 | } | |||
2176 | ||||
2177 | ||||
2178 | /* | |||
2179 | * Check for permanent objects in the destination. | |||
2180 | */ | |||
2181 | ||||
2182 | if ((entry->object.vm_object != VM_OBJECT_NULL((vm_object_t) 0)) && | |||
2183 | !entry->object.vm_object->temporary) | |||
2184 | contains_permanent_objects = TRUE((boolean_t) 1); | |||
2185 | ||||
2186 | size -= sub_size; | |||
2187 | entry = next; | |||
2188 | } | |||
2189 | ||||
2190 | /* | |||
2191 | * If there are permanent objects in the destination, then | |||
2192 | * the copy cannot be interrupted. | |||
2193 | */ | |||
2194 | ||||
2195 | if (interruptible && contains_permanent_objects) { | |||
2196 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2197 | return(KERN_FAILURE5); /* XXX */ | |||
2198 | } | |||
2199 | ||||
2200 | /* | |||
2201 | * XXXO If there are no permanent objects in the destination, | |||
2202 | * XXXO and the source and destination map entry caches match, | |||
2203 | * XXXO and the destination map entry is not shared, | |||
2204 | * XXXO then the map entries can be deleted and replaced | |||
2205 | * XXXO with those from the copy. The following code is the | |||
2206 | * XXXO basic idea of what to do, but there are lots of annoying | |||
2207 | * XXXO little details about getting protection and inheritance | |||
2208 | * XXXO right. Should add protection, inheritance, and sharing checks | |||
2209 | * XXXO to the above pass and make sure that no wiring is involved. | |||
2210 | */ | |||
2211 | /* | |||
2212 | * if (!contains_permanent_objects && | |||
2213 | * copy->cpy_hdr.entries_pageable == dst_map->hdr.entries_pageable) { | |||
2214 | * | |||
2215 | * * | |||
2216 | * * Run over copy and adjust entries. Steal code | |||
2217 | * * from vm_map_copyout() to do this. | |||
2218 | * * | |||
2219 | * | |||
2220 | * tmp_entry = tmp_entry->vme_prev; | |||
2221 | * vm_map_delete(dst_map, dst_addr, dst_addr + copy->size); | |||
2222 | * vm_map_copy_insert(dst_map, tmp_entry, copy); | |||
2223 | * | |||
2224 | * vm_map_unlock(dst_map); | |||
2225 | * vm_map_copy_discard(copy); | |||
2226 | * } | |||
2227 | */ | |||
2228 | /* | |||
2229 | * | |||
2230 | * Make a second pass, overwriting the data | |||
2231 | * At the beginning of each loop iteration, | |||
2232 | * the next entry to be overwritten is "tmp_entry" | |||
2233 | * (initially, the value returned from the lookup above), | |||
2234 | * and the starting address expected in that entry | |||
2235 | * is "start". | |||
2236 | */ | |||
2237 | ||||
2238 | start = dst_addr; | |||
2239 | ||||
2240 | while (vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { | |||
2241 | vm_map_entry_t copy_entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); | |||
2242 | vm_size_t copy_size = (copy_entry->vme_endlinks.end - copy_entry->vme_startlinks.start); | |||
2243 | vm_object_t object; | |||
2244 | ||||
2245 | entry = tmp_entry; | |||
2246 | size = (entry->vme_endlinks.end - entry->vme_startlinks.start); | |||
2247 | /* | |||
2248 | * Make sure that no holes popped up in the | |||
2249 | * address map, and that the protection is | |||
2250 | * still valid, in case the map was unlocked | |||
2251 | * earlier. | |||
2252 | */ | |||
2253 | ||||
2254 | if (entry->vme_startlinks.start != start) { | |||
2255 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2256 | return(KERN_INVALID_ADDRESS1); | |||
2257 | } | |||
2258 | assert(entry != vm_map_to_entry(dst_map))({ if (!(entry != ((struct vm_map_entry *) &(dst_map)-> hdr.links))) Assert("entry != vm_map_to_entry(dst_map)", "../vm/vm_map.c" , 2258); }); | |||
2259 | ||||
2260 | /* | |||
2261 | * Check protection again | |||
2262 | */ | |||
2263 | ||||
2264 | if ( ! (entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
2265 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2266 | return(KERN_PROTECTION_FAILURE2); | |||
2267 | } | |||
2268 | ||||
2269 | /* | |||
2270 | * Adjust to source size first | |||
2271 | */ | |||
2272 | ||||
2273 | if (copy_size < size) { | |||
2274 | vm_map_clip_end(dst_map, entry, entry->vme_start + copy_size)({ if ((entry->links.start + copy_size) < (entry)->links .end) _vm_map_clip_end(&(dst_map)->hdr,(entry),(entry-> links.start + copy_size)); }); | |||
2275 | size = copy_size; | |||
2276 | } | |||
2277 | ||||
2278 | /* | |||
2279 | * Adjust to destination size | |||
2280 | */ | |||
2281 | ||||
2282 | if (size < copy_size) { | |||
2283 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }) | |||
2284 | copy_entry->vme_start + size)({ if ((copy_entry->links.start + size) < (copy_entry)-> links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + size)); }); | |||
2285 | copy_size = size; | |||
2286 | } | |||
2287 | ||||
2288 | assert((entry->vme_end - entry->vme_start) == size)({ if (!((entry->links.end - entry->links.start) == size )) Assert("(entry->vme_end - entry->vme_start) == size" , "../vm/vm_map.c", 2288); }); | |||
2289 | assert((tmp_entry->vme_end - tmp_entry->vme_start) == size)({ if (!((tmp_entry->links.end - tmp_entry->links.start ) == size)) Assert("(tmp_entry->vme_end - tmp_entry->vme_start) == size" , "../vm/vm_map.c", 2289); }); | |||
2290 | assert((copy_entry->vme_end - copy_entry->vme_start) == size)({ if (!((copy_entry->links.end - copy_entry->links.start ) == size)) Assert("(copy_entry->vme_end - copy_entry->vme_start) == size" , "../vm/vm_map.c", 2290); }); | |||
2291 | ||||
2292 | /* | |||
2293 | * If the destination contains temporary unshared memory, | |||
2294 | * we can perform the copy by throwing it away and | |||
2295 | * installing the source data. | |||
2296 | */ | |||
2297 | ||||
2298 | object = entry->object.vm_object; | |||
2299 | if (!entry->is_shared && | |||
2300 | ((object == VM_OBJECT_NULL((vm_object_t) 0)) || object->temporary)) { | |||
2301 | vm_object_t old_object = entry->object.vm_object; | |||
2302 | vm_offset_t old_offset = entry->offset; | |||
2303 | ||||
2304 | entry->object = copy_entry->object; | |||
2305 | entry->offset = copy_entry->offset; | |||
2306 | entry->needs_copy = copy_entry->needs_copy; | |||
2307 | entry->wired_count = 0; | |||
2308 | entry->user_wired_count = 0; | |||
2309 | ||||
2310 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); | |||
2311 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); | |||
2312 | ||||
2313 | vm_object_pmap_protect( | |||
2314 | old_object, | |||
2315 | old_offset, | |||
2316 | size, | |||
2317 | dst_map->pmap, | |||
2318 | tmp_entry->vme_startlinks.start, | |||
2319 | VM_PROT_NONE((vm_prot_t) 0x00)); | |||
2320 | ||||
2321 | vm_object_deallocate(old_object); | |||
2322 | ||||
2323 | /* | |||
2324 | * Set up for the next iteration. The map | |||
2325 | * has not been unlocked, so the next | |||
2326 | * address should be at the end of this | |||
2327 | * entry, and the next map entry should be | |||
2328 | * the one following it. | |||
2329 | */ | |||
2330 | ||||
2331 | start = tmp_entry->vme_endlinks.end; | |||
2332 | tmp_entry = tmp_entry->vme_nextlinks.next; | |||
2333 | } else { | |||
2334 | vm_map_version_t version; | |||
2335 | vm_object_t dst_object = entry->object.vm_object; | |||
2336 | vm_offset_t dst_offset = entry->offset; | |||
2337 | kern_return_t r; | |||
2338 | ||||
2339 | /* | |||
2340 | * Take an object reference, and record | |||
2341 | * the map version information so that the | |||
2342 | * map can be safely unlocked. | |||
2343 | */ | |||
2344 | ||||
2345 | vm_object_reference(dst_object); | |||
2346 | ||||
2347 | version.main_timestamp = dst_map->timestamp; | |||
2348 | ||||
2349 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2350 | ||||
2351 | /* | |||
2352 | * Copy as much as possible in one pass | |||
2353 | */ | |||
2354 | ||||
2355 | copy_size = size; | |||
2356 | r = vm_fault_copy( | |||
2357 | copy_entry->object.vm_object, | |||
2358 | copy_entry->offset, | |||
2359 | ©_size, | |||
2360 | dst_object, | |||
2361 | dst_offset, | |||
2362 | dst_map, | |||
2363 | &version, | |||
2364 | FALSE((boolean_t) 0) /* XXX interruptible */ ); | |||
2365 | ||||
2366 | /* | |||
2367 | * Release the object reference | |||
2368 | */ | |||
2369 | ||||
2370 | vm_object_deallocate(dst_object); | |||
2371 | ||||
2372 | /* | |||
2373 | * If a hard error occurred, return it now | |||
2374 | */ | |||
2375 | ||||
2376 | if (r != KERN_SUCCESS0) | |||
2377 | return(r); | |||
2378 | ||||
2379 | if (copy_size != 0) { | |||
2380 | /* | |||
2381 | * Dispose of the copied region | |||
2382 | */ | |||
2383 | ||||
2384 | vm_map_copy_clip_end(copy, copy_entry,({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }) | |||
2385 | copy_entry->vme_start + copy_size)({ if ((copy_entry->links.start + copy_size) < (copy_entry )->links.end) _vm_map_clip_end(&(copy)->c_u.hdr,(copy_entry ),(copy_entry->links.start + copy_size)); }); | |||
2386 | vm_map_copy_entry_unlink(copy, copy_entry)({ (&(copy)->c_u.hdr)->nentries--; (copy_entry)-> links.next->links.prev = (copy_entry)->links.prev; (copy_entry )->links.prev->links.next = (copy_entry)->links.next ; rbtree_remove(&(&(copy)->c_u.hdr)->tree, & (copy_entry)->tree_node); }); | |||
2387 | vm_object_deallocate(copy_entry->object.vm_object); | |||
2388 | vm_map_copy_entry_dispose(copy, copy_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (copy_entry)); | |||
2389 | } | |||
2390 | ||||
2391 | /* | |||
2392 | * Pick up in the destination map where we left off. | |||
2393 | * | |||
2394 | * Use the version information to avoid a lookup | |||
2395 | * in the normal case. | |||
2396 | */ | |||
2397 | ||||
2398 | start += copy_size; | |||
2399 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
2400 | if ((version.main_timestamp + 1) == dst_map->timestamp) { | |||
2401 | /* We can safely use saved tmp_entry value */ | |||
2402 | ||||
2403 | vm_map_clip_end(dst_map, tmp_entry, start)({ if ((start) < (tmp_entry)->links.end) _vm_map_clip_end (&(dst_map)->hdr,(tmp_entry),(start)); }); | |||
2404 | tmp_entry = tmp_entry->vme_nextlinks.next; | |||
2405 | } else { | |||
2406 | /* Must do lookup of tmp_entry */ | |||
2407 | ||||
2408 | if (!vm_map_lookup_entry(dst_map, start, &tmp_entry)) { | |||
2409 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2410 | return(KERN_INVALID_ADDRESS1); | |||
2411 | } | |||
2412 | vm_map_clip_start(dst_map, tmp_entry, start)({ if ((start) > (tmp_entry)->links.start) _vm_map_clip_start (&(dst_map)->hdr,(tmp_entry),(start)); }); | |||
2413 | } | |||
2414 | } | |||
2415 | ||||
2416 | } | |||
2417 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2418 | ||||
2419 | /* | |||
2420 | * Throw away the vm_map_copy object | |||
2421 | */ | |||
2422 | vm_map_copy_discard(copy); | |||
2423 | ||||
2424 | return(KERN_SUCCESS0); | |||
2425 | } | |||
2426 | ||||
2427 | /* | |||
2428 | * Macro: vm_map_copy_insert | |||
2429 | * | |||
2430 | * Description: | |||
2431 | * Link a copy chain ("copy") into a map at the | |||
2432 | * specified location (after "where"). | |||
2433 | * Side effects: | |||
2434 | * The copy chain is destroyed. | |||
2435 | * Warning: | |||
2436 | * The arguments are evaluated multiple times. | |||
2437 | */ | |||
2438 | #define vm_map_copy_insert(map, where, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (map)->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2438); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); (((where)->links.next )->links.prev = ((copy)->c_u.hdr.links.prev)) ->links .next = ((where)->links.next); ((where)->links.next = ( (copy)->c_u.hdr.links.next)) ->links.prev = (where); (map )->hdr.nentries += (copy)->c_u.hdr.nentries; kmem_cache_free (&vm_map_copy_cache, (vm_offset_t) copy); }) \ | |||
2439 | MACRO_BEGIN({ \ | |||
2440 | struct rbtree_node *node, *tmp; \ | |||
2441 | rbtree_for_each_remove(&(copy)->cpy_hdr.tree, node, tmp)for (node = rbtree_postwalk_deepest(&(copy)->c_u.hdr.tree ), tmp = rbtree_postwalk_unlink(node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink(node)) \ | |||
2442 | rbtree_insert(&(map)->hdr.tree, node, \({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2443); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }) | |||
2443 | vm_map_entry_cmp_insert)({ struct rbtree_node *___cur, *___prev; int ___diff, ___index ; ___prev = ((void *) 0); ___index = -1; ___cur = (&(map) ->hdr.tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2443); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(map)-> hdr.tree, ___prev, ___index, node); }); \ | |||
2444 | (((where)->vme_nextlinks.next)->vme_prevlinks.prev = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev)) \ | |||
2445 | ->vme_nextlinks.next = ((where)->vme_nextlinks.next); \ | |||
2446 | ((where)->vme_nextlinks.next = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next)) \ | |||
2447 | ->vme_prevlinks.prev = (where); \ | |||
2448 | (map)->hdr.nentries += (copy)->cpy_hdrc_u.hdr.nentries; \ | |||
2449 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); \ | |||
2450 | MACRO_END}) | |||
2451 | ||||
2452 | /* | |||
2453 | * Routine: vm_map_copyout | |||
2454 | * | |||
2455 | * Description: | |||
2456 | * Copy out a copy chain ("copy") into newly-allocated | |||
2457 | * space in the destination map. | |||
2458 | * | |||
2459 | * If successful, consumes the copy object. | |||
2460 | * Otherwise, the caller is responsible for it. | |||
2461 | */ | |||
2462 | kern_return_t vm_map_copyout(dst_map, dst_addr, copy) | |||
2463 | register | |||
2464 | vm_map_t dst_map; | |||
2465 | vm_offset_t *dst_addr; /* OUT */ | |||
2466 | register | |||
2467 | vm_map_copy_t copy; | |||
2468 | { | |||
2469 | vm_size_t size; | |||
2470 | vm_size_t adjustment; | |||
2471 | vm_offset_t start; | |||
2472 | vm_offset_t vm_copy_start; | |||
2473 | vm_map_entry_t last; | |||
2474 | register | |||
2475 | vm_map_entry_t entry; | |||
2476 | ||||
2477 | /* | |||
2478 | * Check for null copy object. | |||
2479 | */ | |||
2480 | ||||
2481 | if (copy == VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { | |||
2482 | *dst_addr = 0; | |||
2483 | return(KERN_SUCCESS0); | |||
2484 | } | |||
2485 | ||||
2486 | /* | |||
2487 | * Check for special copy object, created | |||
2488 | * by vm_map_copyin_object. | |||
2489 | */ | |||
2490 | ||||
2491 | if (copy->type == VM_MAP_COPY_OBJECT2) { | |||
2492 | vm_object_t object = copy->cpy_objectc_u.c_o.object; | |||
2493 | vm_size_t offset = copy->offset; | |||
2494 | vm_size_t tmp_size = copy->size; | |||
2495 | kern_return_t kr; | |||
2496 | ||||
2497 | *dst_addr = 0; | |||
2498 | kr = vm_map_enter(dst_map, dst_addr, tmp_size, | |||
2499 | (vm_offset_t) 0, TRUE((boolean_t) 1), | |||
2500 | object, offset, FALSE((boolean_t) 0), | |||
2501 | VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)), VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), | |||
2502 | VM_INHERIT_DEFAULT((vm_inherit_t) 1)); | |||
2503 | if (kr != KERN_SUCCESS0) | |||
2504 | return(kr); | |||
2505 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); | |||
2506 | return(KERN_SUCCESS0); | |||
2507 | } | |||
2508 | ||||
2509 | if (copy->type == VM_MAP_COPY_PAGE_LIST3) | |||
2510 | return(vm_map_copyout_page_list(dst_map, dst_addr, copy)); | |||
2511 | ||||
2512 | /* | |||
2513 | * Find space for the data | |||
2514 | */ | |||
2515 | ||||
2516 | vm_copy_start = trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); | |||
2517 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - vm_copy_start; | |||
2518 | ||||
2519 | StartAgain: ; | |||
2520 | ||||
2521 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
2522 | start = ((last = dst_map->first_free) == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) ? | |||
2523 | vm_map_min(dst_map)((dst_map)->hdr.links.start) : last->vme_endlinks.end; | |||
2524 | ||||
2525 | while (TRUE((boolean_t) 1)) { | |||
2526 | vm_map_entry_t next = last->vme_nextlinks.next; | |||
2527 | vm_offset_t end = start + size; | |||
2528 | ||||
2529 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { | |||
2530 | if (dst_map->wait_for_space) { | |||
2531 | if (size <= (dst_map->max_offsethdr.links.end - dst_map->min_offsethdr.links.start)) { | |||
2532 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); | |||
2533 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2534 | thread_block((void (*)()) 0); | |||
2535 | goto StartAgain; | |||
2536 | } | |||
2537 | } | |||
2538 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2539 | printf_once("no more room for vm_map_copyout in %p\n", dst_map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_copyout in %p\n" , dst_map); __once = 1; } }); | |||
2540 | return(KERN_NO_SPACE3); | |||
2541 | } | |||
2542 | ||||
2543 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || | |||
2544 | (next->vme_startlinks.start >= end)) | |||
2545 | break; | |||
2546 | ||||
2547 | last = next; | |||
2548 | start = last->vme_endlinks.end; | |||
2549 | } | |||
2550 | ||||
2551 | /* | |||
2552 | * Since we're going to just drop the map | |||
2553 | * entries from the copy into the destination | |||
2554 | * map, they must come from the same pool. | |||
2555 | */ | |||
2556 | ||||
2557 | if (copy->cpy_hdrc_u.hdr.entries_pageable != dst_map->hdr.entries_pageable) { | |||
2558 | /* | |||
2559 | * Mismatches occur when dealing with the default | |||
2560 | * pager. | |||
2561 | */ | |||
2562 | kmem_cache_t old_cache; | |||
2563 | vm_map_entry_t next, new; | |||
2564 | ||||
2565 | /* | |||
2566 | * Find the cache that the copies were allocated from | |||
2567 | */ | |||
2568 | old_cache = (copy->cpy_hdrc_u.hdr.entries_pageable) | |||
2569 | ? &vm_map_entry_cache | |||
2570 | : &vm_map_kentry_cache; | |||
2571 | entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); | |||
2572 | ||||
2573 | /* | |||
2574 | * Reinitialize the copy so that vm_map_copy_entry_link | |||
2575 | * will work. | |||
2576 | */ | |||
2577 | copy->cpy_hdrc_u.hdr.nentries = 0; | |||
2578 | copy->cpy_hdrc_u.hdr.entries_pageable = dst_map->hdr.entries_pageable; | |||
2579 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = | |||
2580 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = | |||
2581 | vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); | |||
2582 | ||||
2583 | /* | |||
2584 | * Copy each entry. | |||
2585 | */ | |||
2586 | while (entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links)) { | |||
2587 | new = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); | |||
2588 | vm_map_entry_copy_full(new, entry)(*(new) = *(entry)); | |||
2589 | vm_map_copy_entry_link(copy,({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2591); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) | |||
2590 | vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2591); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }) | |||
2591 | new)({ (&(copy)->c_u.hdr)->nentries++; (new)->links. prev = (((copy)->c_u.hdr.links.prev)); (new)->links.next = (((copy)->c_u.hdr.links.prev))->links.next; (new)-> links.prev->links.next = (new)->links.next->links.prev = (new); ({ struct rbtree_node *___cur, *___prev; int ___diff , ___index; ___prev = ((void *) 0); ___index = -1; ___cur = ( &(&(copy)->c_u.hdr)->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new )->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0" , "../vm/vm_map.c", 2591); }); ___prev = ___cur; ___index = rbtree_d2i (___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new)->tree_node); }); }); | |||
2592 | next = entry->vme_nextlinks.next; | |||
2593 | kmem_cache_free(old_cache, (vm_offset_t) entry); | |||
2594 | entry = next; | |||
2595 | } | |||
2596 | } | |||
2597 | ||||
2598 | /* | |||
2599 | * Adjust the addresses in the copy chain, and | |||
2600 | * reset the region attributes. | |||
2601 | */ | |||
2602 | ||||
2603 | adjustment = start - vm_copy_start; | |||
2604 | for (entry = vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next); | |||
2605 | entry != vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); | |||
2606 | entry = entry->vme_nextlinks.next) { | |||
2607 | entry->vme_startlinks.start += adjustment; | |||
2608 | entry->vme_endlinks.end += adjustment; | |||
2609 | ||||
2610 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); | |||
2611 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); | |||
2612 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); | |||
2613 | entry->projected_on = 0; | |||
2614 | ||||
2615 | /* | |||
2616 | * If the entry is now wired, | |||
2617 | * map the pages into the destination map. | |||
2618 | */ | |||
2619 | if (entry->wired_count != 0) { | |||
2620 | register vm_offset_t va; | |||
2621 | vm_offset_t offset; | |||
2622 | register vm_object_t object; | |||
2623 | ||||
2624 | object = entry->object.vm_object; | |||
2625 | offset = entry->offset; | |||
2626 | va = entry->vme_startlinks.start; | |||
2627 | ||||
2628 | pmap_pageable(dst_map->pmap, | |||
2629 | entry->vme_startlinks.start, | |||
2630 | entry->vme_endlinks.end, | |||
2631 | TRUE((boolean_t) 1)); | |||
2632 | ||||
2633 | while (va < entry->vme_endlinks.end) { | |||
2634 | register vm_page_t m; | |||
2635 | ||||
2636 | /* | |||
2637 | * Look up the page in the object. | |||
2638 | * Assert that the page will be found in the | |||
2639 | * top object: | |||
2640 | * either | |||
2641 | * the object was newly created by | |||
2642 | * vm_object_copy_slowly, and has | |||
2643 | * copies of all of the pages from | |||
2644 | * the source object | |||
2645 | * or | |||
2646 | * the object was moved from the old | |||
2647 | * map entry; because the old map | |||
2648 | * entry was wired, all of the pages | |||
2649 | * were in the top-level object. | |||
2650 | * (XXX not true if we wire pages for | |||
2651 | * reading) | |||
2652 | */ | |||
2653 | vm_object_lock(object); | |||
2654 | vm_object_paging_begin(object)((object)->paging_in_progress++); | |||
2655 | ||||
2656 | m = vm_page_lookup(object, offset); | |||
2657 | if (m == VM_PAGE_NULL((vm_page_t) 0) || m->wire_count == 0 || | |||
2658 | m->absent) | |||
2659 | panic("vm_map_copyout: wiring 0x%x", m); | |||
2660 | ||||
2661 | m->busy = TRUE((boolean_t) 1); | |||
2662 | vm_object_unlock(object); | |||
2663 | ||||
2664 | PMAP_ENTER(dst_map->pmap, va, m,({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }) | |||
2665 | entry->protection, TRUE)({ pmap_enter( (dst_map->pmap), (va), (m)->phys_addr, ( entry->protection) & ~(m)->page_lock, (((boolean_t) 1)) ); }); | |||
2666 | ||||
2667 | vm_object_lock(object); | |||
2668 | PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); | |||
2669 | /* the page is wired, so we don't have to activate */ | |||
2670 | vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0" , "../vm/vm_map.c", 2670); }); if (--(object)->paging_in_progress == 0) { ({ if ((object)->all_wanted & (1 << (2) )) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2) )), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 << (2)); }); } }); | |||
2671 | vm_object_unlock(object); | |||
2672 | ||||
2673 | offset += PAGE_SIZE(1 << 12); | |||
2674 | va += PAGE_SIZE(1 << 12); | |||
2675 | } | |||
2676 | } | |||
2677 | ||||
2678 | ||||
2679 | } | |||
2680 | ||||
2681 | /* | |||
2682 | * Correct the page alignment for the result | |||
2683 | */ | |||
2684 | ||||
2685 | *dst_addr = start + (copy->offset - vm_copy_start); | |||
2686 | ||||
2687 | /* | |||
2688 | * Update the hints and the map size | |||
2689 | */ | |||
2690 | ||||
2691 | if (dst_map->first_free == last) | |||
2692 | dst_map->first_free = vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev); | |||
2693 | SAVE_HINT(dst_map, vm_map_copy_last_entry(copy)); (dst_map)->hint = (((copy)->c_u.hdr.links.prev)); ;; | |||
2694 | ||||
2695 | dst_map->size += size; | |||
2696 | ||||
2697 | /* | |||
2698 | * Link in the copy | |||
2699 | */ | |||
2700 | ||||
2701 | vm_map_copy_insert(dst_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (dst_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2701 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (dst_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (dst_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); | |||
2702 | ||||
2703 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2704 | ||||
2705 | /* | |||
2706 | * XXX If wiring_required, call vm_map_pageable | |||
2707 | */ | |||
2708 | ||||
2709 | return(KERN_SUCCESS0); | |||
2710 | } | |||
2711 | ||||
2712 | /* | |||
2713 | * | |||
2714 | * vm_map_copyout_page_list: | |||
2715 | * | |||
2716 | * Version of vm_map_copyout() for page list vm map copies. | |||
2717 | * | |||
2718 | */ | |||
2719 | kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy) | |||
2720 | register | |||
2721 | vm_map_t dst_map; | |||
2722 | vm_offset_t *dst_addr; /* OUT */ | |||
2723 | register | |||
2724 | vm_map_copy_t copy; | |||
2725 | { | |||
2726 | vm_size_t size; | |||
2727 | vm_offset_t start; | |||
2728 | vm_offset_t end; | |||
2729 | vm_offset_t offset; | |||
2730 | vm_map_entry_t last; | |||
2731 | register | |||
2732 | vm_object_t object; | |||
2733 | vm_page_t *page_list, m; | |||
2734 | vm_map_entry_t entry; | |||
2735 | vm_offset_t old_last_offset; | |||
2736 | boolean_t cont_invoked, needs_wakeup = FALSE((boolean_t) 0); | |||
2737 | kern_return_t result = KERN_SUCCESS0; | |||
2738 | vm_map_copy_t orig_copy; | |||
2739 | vm_offset_t dst_offset; | |||
2740 | boolean_t must_wire; | |||
2741 | ||||
2742 | /* | |||
2743 | * Make sure the pages are stolen, because we are | |||
2744 | * going to put them in a new object. Assume that | |||
2745 | * all pages are identical to first in this regard. | |||
2746 | */ | |||
2747 | ||||
2748 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; | |||
2749 | if ((*page_list)->tabled) | |||
| ||||
2750 | vm_map_copy_steal_pages(copy); | |||
2751 | ||||
2752 | /* | |||
2753 | * Find space for the data | |||
2754 | */ | |||
2755 | ||||
2756 | size = round_page(copy->offset + copy->size)((vm_offset_t)((((vm_offset_t)(copy->offset + copy->size )) + ((1 << 12)-1)) & ~((1 << 12)-1))) - | |||
2757 | trunc_page(copy->offset)((vm_offset_t)(((vm_offset_t)(copy->offset)) & ~((1 << 12)-1))); | |||
2758 | StartAgain: | |||
2759 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
2760 | must_wire = dst_map->wiring_required; | |||
2761 | ||||
2762 | last = dst_map->first_free; | |||
2763 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) { | |||
2764 | start = vm_map_min(dst_map)((dst_map)->hdr.links.start); | |||
2765 | } else { | |||
2766 | start = last->vme_endlinks.end; | |||
2767 | } | |||
2768 | ||||
2769 | while (TRUE((boolean_t) 1)) { | |||
2770 | vm_map_entry_t next = last->vme_nextlinks.next; | |||
2771 | end = start + size; | |||
2772 | ||||
2773 | if ((end > dst_map->max_offsethdr.links.end) || (end < start)) { | |||
2774 | if (dst_map->wait_for_space) { | |||
2775 | if (size <= (dst_map->max_offsethdr.links.end - | |||
2776 | dst_map->min_offsethdr.links.start)) { | |||
2777 | assert_wait((event_t) dst_map, TRUE((boolean_t) 1)); | |||
2778 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2779 | thread_block((void (*)()) 0); | |||
2780 | goto StartAgain; | |||
2781 | } | |||
2782 | } | |||
2783 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2784 | printf_once("no more room for vm_map_copyout_page_list in %p\n", dst_map)({ static int __once = 0; if (!__once) { printf("no more room for vm_map_copyout_page_list in %p\n" , dst_map); __once = 1; } }); | |||
2785 | return(KERN_NO_SPACE3); | |||
2786 | } | |||
2787 | ||||
2788 | if ((next == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) || | |||
2789 | (next->vme_startlinks.start >= end)) { | |||
2790 | break; | |||
2791 | } | |||
2792 | ||||
2793 | last = next; | |||
2794 | start = last->vme_endlinks.end; | |||
2795 | } | |||
2796 | ||||
2797 | /* | |||
2798 | * See whether we can avoid creating a new entry (and object) by | |||
2799 | * extending one of our neighbors. [So far, we only attempt to | |||
2800 | * extend from below.] | |||
2801 | * | |||
2802 | * The code path below here is a bit twisted. If any of the | |||
2803 | * extension checks fails, we branch to create_object. If | |||
2804 | * it all works, we fall out the bottom and goto insert_pages. | |||
2805 | */ | |||
2806 | if (last == vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links) || | |||
2807 | last->vme_endlinks.end != start || | |||
2808 | last->is_shared != FALSE((boolean_t) 0) || | |||
2809 | last->is_sub_map != FALSE((boolean_t) 0) || | |||
2810 | last->inheritance != VM_INHERIT_DEFAULT((vm_inherit_t) 1) || | |||
2811 | last->protection != VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)) || | |||
2812 | last->max_protection != VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) || | |||
2813 | (must_wire ? (last->wired_count != 1 || | |||
2814 | last->user_wired_count != 1) : | |||
2815 | (last->wired_count != 0))) { | |||
2816 | goto create_object; | |||
2817 | } | |||
2818 | ||||
2819 | /* | |||
2820 | * If this entry needs an object, make one. | |||
2821 | */ | |||
2822 | if (last->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||
2823 | object = vm_object_allocate( | |||
2824 | (vm_size_t)(last->vme_endlinks.end - last->vme_startlinks.start + size)); | |||
2825 | last->object.vm_object = object; | |||
2826 | last->offset = 0; | |||
2827 | vm_object_lock(object); | |||
2828 | } | |||
2829 | else { | |||
2830 | vm_offset_t prev_offset = last->offset; | |||
2831 | vm_size_t prev_size = start - last->vme_startlinks.start; | |||
2832 | vm_size_t new_size; | |||
2833 | ||||
2834 | /* | |||
2835 | * This is basically vm_object_coalesce. | |||
2836 | */ | |||
2837 | ||||
2838 | object = last->object.vm_object; | |||
2839 | vm_object_lock(object); | |||
2840 | ||||
2841 | /* | |||
2842 | * Try to collapse the object first | |||
2843 | */ | |||
2844 | vm_object_collapse(object); | |||
2845 | ||||
2846 | /* | |||
2847 | * Can't coalesce if pages not mapped to | |||
2848 | * last may be in use anyway: | |||
2849 | * . more than one reference | |||
2850 | * . paged out | |||
2851 | * . shadows another object | |||
2852 | * . has a copy elsewhere | |||
2853 | * . paging references (pages might be in page-list) | |||
2854 | */ | |||
2855 | ||||
2856 | if ((object->ref_count > 1) || | |||
2857 | object->pager_created || | |||
2858 | (object->shadow != VM_OBJECT_NULL((vm_object_t) 0)) || | |||
2859 | (object->copy != VM_OBJECT_NULL((vm_object_t) 0)) || | |||
2860 | (object->paging_in_progress != 0)) { | |||
2861 | vm_object_unlock(object); | |||
2862 | goto create_object; | |||
2863 | } | |||
2864 | ||||
2865 | /* | |||
2866 | * Extend the object if necessary. Don't have to call | |||
2867 | * vm_object_page_remove because the pages aren't mapped, | |||
2868 | * and vm_page_replace will free up any old ones it encounters. | |||
2869 | */ | |||
2870 | new_size = prev_offset + prev_size + size; | |||
2871 | if (new_size > object->size) | |||
2872 | object->size = new_size; | |||
2873 | } | |||
2874 | ||||
2875 | /* | |||
2876 | * Coalesced the two objects - can extend | |||
2877 | * the previous map entry to include the | |||
2878 | * new range. | |||
2879 | */ | |||
2880 | dst_map->size += size; | |||
2881 | last->vme_endlinks.end = end; | |||
2882 | ||||
2883 | SAVE_HINT(dst_map, last); (dst_map)->hint = (last); ;; | |||
2884 | ||||
2885 | goto insert_pages; | |||
2886 | ||||
2887 | create_object: | |||
2888 | ||||
2889 | /* | |||
2890 | * Create object | |||
2891 | */ | |||
2892 | object = vm_object_allocate(size); | |||
2893 | ||||
2894 | /* | |||
2895 | * Create entry | |||
2896 | */ | |||
2897 | ||||
2898 | entry = vm_map_entry_create(dst_map)_vm_map_entry_create(&(dst_map)->hdr); | |||
2899 | ||||
2900 | entry->object.vm_object = object; | |||
2901 | entry->offset = 0; | |||
2902 | ||||
2903 | entry->is_shared = FALSE((boolean_t) 0); | |||
2904 | entry->is_sub_map = FALSE((boolean_t) 0); | |||
2905 | entry->needs_copy = FALSE((boolean_t) 0); | |||
2906 | ||||
2907 | if (must_wire) { | |||
2908 | entry->wired_count = 1; | |||
2909 | entry->user_wired_count = 1; | |||
2910 | } else { | |||
2911 | entry->wired_count = 0; | |||
2912 | entry->user_wired_count = 0; | |||
2913 | } | |||
2914 | ||||
2915 | entry->in_transition = TRUE((boolean_t) 1); | |||
2916 | entry->needs_wakeup = FALSE((boolean_t) 0); | |||
2917 | ||||
2918 | entry->vme_startlinks.start = start; | |||
2919 | entry->vme_endlinks.end = start + size; | |||
2920 | ||||
2921 | entry->inheritance = VM_INHERIT_DEFAULT((vm_inherit_t) 1); | |||
2922 | entry->protection = VM_PROT_DEFAULT(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)); | |||
2923 | entry->max_protection = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)); | |||
2924 | entry->projected_on = 0; | |||
2925 | ||||
2926 | vm_object_lock(object); | |||
2927 | ||||
2928 | /* | |||
2929 | * Update the hints and the map size | |||
2930 | */ | |||
2931 | if (dst_map->first_free == last) { | |||
2932 | dst_map->first_free = entry; | |||
2933 | } | |||
2934 | SAVE_HINT(dst_map, entry); (dst_map)->hint = (entry); ;; | |||
2935 | dst_map->size += size; | |||
2936 | ||||
2937 | /* | |||
2938 | * Link in the entry | |||
2939 | */ | |||
2940 | vm_map_entry_link(dst_map, last, entry)({ (&(dst_map)->hdr)->nentries++; (entry)->links .prev = (last); (entry)->links.next = (last)->links.next ; (entry)->links.prev->links.next = (entry)->links.next ->links.prev = (entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(dst_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 2940); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(dst_map )->hdr)->tree, ___prev, ___index, &(entry)->tree_node ); }); }); | |||
2941 | last = entry; | |||
2942 | ||||
2943 | /* | |||
2944 | * Transfer pages into new object. | |||
2945 | * Scan page list in vm_map_copy. | |||
2946 | */ | |||
2947 | insert_pages: | |||
2948 | dst_offset = copy->offset & PAGE_MASK((1 << 12)-1); | |||
2949 | cont_invoked = FALSE((boolean_t) 0); | |||
2950 | orig_copy = copy; | |||
2951 | last->in_transition = TRUE((boolean_t) 1); | |||
2952 | old_last_offset = last->offset | |||
2953 | + (start - last->vme_startlinks.start); | |||
2954 | ||||
2955 | vm_page_lock_queues(); | |||
2956 | ||||
2957 | for (offset = 0; offset < size; offset += PAGE_SIZE(1 << 12)) { | |||
2958 | m = *page_list; | |||
2959 | assert(m && !m->tabled)({ if (!(m && !m->tabled)) Assert("m && !m->tabled" , "../vm/vm_map.c", 2959); }); | |||
2960 | ||||
2961 | /* | |||
2962 | * Must clear busy bit in page before inserting it. | |||
2963 | * Ok to skip wakeup logic because nobody else | |||
2964 | * can possibly know about this page. | |||
2965 | * The page is dirty in its new object. | |||
2966 | */ | |||
2967 | ||||
2968 | assert(!m->wanted)({ if (!(!m->wanted)) Assert("!m->wanted", "../vm/vm_map.c" , 2968); }); | |||
2969 | ||||
2970 | m->busy = FALSE((boolean_t) 0); | |||
2971 | m->dirty = TRUE((boolean_t) 1); | |||
2972 | vm_page_replace(m, object, old_last_offset + offset); | |||
2973 | if (must_wire) { | |||
2974 | vm_page_wire(m); | |||
2975 | PMAP_ENTER(dst_map->pmap,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) | |||
2976 | last->vme_start + m->offset - last->offset,({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }) | |||
2977 | m, last->protection, TRUE)({ pmap_enter( (dst_map->pmap), (last->links.start + m-> offset - last->offset), (m)->phys_addr, (last->protection ) & ~(m)->page_lock, (((boolean_t) 1)) ); }); | |||
2978 | } else { | |||
2979 | vm_page_activate(m); | |||
2980 | } | |||
2981 | ||||
2982 | *page_list++ = VM_PAGE_NULL((vm_page_t) 0); | |||
2983 | if (--(copy->cpy_npagesc_u.c_p.npages) == 0 && | |||
| ||||
2984 | vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { | |||
2985 | vm_map_copy_t new_copy; | |||
2986 | ||||
2987 | /* | |||
2988 | * Ok to unlock map because entry is | |||
2989 | * marked in_transition. | |||
2990 | */ | |||
2991 | cont_invoked = TRUE((boolean_t) 1); | |||
2992 | vm_page_unlock_queues(); | |||
2993 | vm_object_unlock(object); | |||
2994 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
2995 | vm_map_copy_invoke_cont(copy, &new_copy, &result)({ vm_map_copy_page_discard(copy); *&result = (*((copy)-> c_u.c_p.cont))((copy)->c_u.c_p.cont_args, &new_copy); ( copy)->c_u.c_p.cont = (kern_return_t (*)()) 0; }); | |||
2996 | ||||
2997 | if (result == KERN_SUCCESS0) { | |||
2998 | ||||
2999 | /* | |||
3000 | * If we got back a copy with real pages, | |||
3001 | * steal them now. Either all of the | |||
3002 | * pages in the list are tabled or none | |||
3003 | * of them are; mixtures are not possible. | |||
3004 | * | |||
3005 | * Save original copy for consume on | |||
3006 | * success logic at end of routine. | |||
3007 | */ | |||
3008 | if (copy != orig_copy) | |||
3009 | vm_map_copy_discard(copy); | |||
3010 | ||||
3011 | if ((copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0)) { | |||
3012 | page_list = ©->cpy_page_listc_u.c_p.page_list[0]; | |||
3013 | if ((*page_list)->tabled) | |||
3014 | vm_map_copy_steal_pages(copy); | |||
3015 | } | |||
3016 | } | |||
3017 | else { | |||
3018 | /* | |||
3019 | * Continuation failed. | |||
3020 | */ | |||
3021 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
3022 | goto error; | |||
3023 | } | |||
3024 | ||||
3025 | vm_map_lock(dst_map)({ lock_write(&(dst_map)->lock); (dst_map)->timestamp ++; }); | |||
3026 | vm_object_lock(object); | |||
3027 | vm_page_lock_queues(); | |||
3028 | } | |||
3029 | } | |||
3030 | ||||
3031 | vm_page_unlock_queues(); | |||
3032 | vm_object_unlock(object); | |||
3033 | ||||
3034 | *dst_addr = start + dst_offset; | |||
3035 | ||||
3036 | /* | |||
3037 | * Clear the in transition bits. This is easy if we | |||
3038 | * didn't have a continuation. | |||
3039 | */ | |||
3040 | error: | |||
3041 | if (!cont_invoked) { | |||
3042 | /* | |||
3043 | * We didn't unlock the map, so nobody could | |||
3044 | * be waiting. | |||
3045 | */ | |||
3046 | last->in_transition = FALSE((boolean_t) 0); | |||
3047 | assert(!last->needs_wakeup)({ if (!(!last->needs_wakeup)) Assert("!last->needs_wakeup" , "../vm/vm_map.c", 3047); }); | |||
3048 | needs_wakeup = FALSE((boolean_t) 0); | |||
3049 | } | |||
3050 | else { | |||
3051 | if (!vm_map_lookup_entry(dst_map, start, &entry)) | |||
3052 | panic("vm_map_copyout_page_list: missing entry"); | |||
3053 | ||||
3054 | /* | |||
3055 | * Clear transition bit for all constituent entries that | |||
3056 | * were in the original entry. Also check for waiters. | |||
3057 | */ | |||
3058 | while((entry != vm_map_to_entry(dst_map)((struct vm_map_entry *) &(dst_map)->hdr.links)) && | |||
3059 | (entry->vme_startlinks.start < end)) { | |||
3060 | assert(entry->in_transition)({ if (!(entry->in_transition)) Assert("entry->in_transition" , "../vm/vm_map.c", 3060); }); | |||
3061 | entry->in_transition = FALSE((boolean_t) 0); | |||
3062 | if(entry->needs_wakeup) { | |||
3063 | entry->needs_wakeup = FALSE((boolean_t) 0); | |||
3064 | needs_wakeup = TRUE((boolean_t) 1); | |||
3065 | } | |||
3066 | entry = entry->vme_nextlinks.next; | |||
3067 | } | |||
3068 | } | |||
3069 | ||||
3070 | if (result != KERN_SUCCESS0) | |||
3071 | vm_map_delete(dst_map, start, end); | |||
3072 | ||||
3073 | vm_map_unlock(dst_map)lock_done(&(dst_map)->lock); | |||
3074 | ||||
3075 | if (needs_wakeup) | |||
3076 | vm_map_entry_wakeup(dst_map)thread_wakeup_prim(((event_t)&(dst_map)->hdr), ((boolean_t ) 0), 0); | |||
3077 | ||||
3078 | /* | |||
3079 | * Consume on success logic. | |||
3080 | */ | |||
3081 | if (copy != orig_copy) { | |||
3082 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy); | |||
3083 | } | |||
3084 | if (result == KERN_SUCCESS0) { | |||
3085 | kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) orig_copy); | |||
3086 | } | |||
3087 | ||||
3088 | return(result); | |||
3089 | } | |||
3090 | ||||
3091 | /* | |||
3092 | * Routine: vm_map_copyin | |||
3093 | * | |||
3094 | * Description: | |||
3095 | * Copy the specified region (src_addr, len) from the | |||
3096 | * source address space (src_map), possibly removing | |||
3097 | * the region from the source address space (src_destroy). | |||
3098 | * | |||
3099 | * Returns: | |||
3100 | * A vm_map_copy_t object (copy_result), suitable for | |||
3101 | * insertion into another address space (using vm_map_copyout), | |||
3102 | * copying over another address space region (using | |||
3103 | * vm_map_copy_overwrite). If the copy is unused, it | |||
3104 | * should be destroyed (using vm_map_copy_discard). | |||
3105 | * | |||
3106 | * In/out conditions: | |||
3107 | * The source map should not be locked on entry. | |||
3108 | */ | |||
3109 | kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result) | |||
3110 | vm_map_t src_map; | |||
3111 | vm_offset_t src_addr; | |||
3112 | vm_size_t len; | |||
3113 | boolean_t src_destroy; | |||
3114 | vm_map_copy_t *copy_result; /* OUT */ | |||
3115 | { | |||
3116 | vm_map_entry_t tmp_entry; /* Result of last map lookup -- | |||
3117 | * in multi-level lookup, this | |||
3118 | * entry contains the actual | |||
3119 | * vm_object/offset. | |||
3120 | */ | |||
3121 | ||||
3122 | vm_offset_t src_start; /* Start of current entry -- | |||
3123 | * where copy is taking place now | |||
3124 | */ | |||
3125 | vm_offset_t src_end; /* End of entire region to be | |||
3126 | * copied */ | |||
3127 | ||||
3128 | register | |||
3129 | vm_map_copy_t copy; /* Resulting copy */ | |||
3130 | ||||
3131 | /* | |||
3132 | * Check for copies of zero bytes. | |||
3133 | */ | |||
3134 | ||||
3135 | if (len == 0) { | |||
3136 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); | |||
3137 | return(KERN_SUCCESS0); | |||
3138 | } | |||
3139 | ||||
3140 | /* | |||
3141 | * Compute start and end of region | |||
3142 | */ | |||
3143 | ||||
3144 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); | |||
3145 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); | |||
3146 | ||||
3147 | /* | |||
3148 | * Check that the end address doesn't overflow | |||
3149 | */ | |||
3150 | ||||
3151 | if (src_end <= src_start) | |||
3152 | if ((src_end < src_start) || (src_start != 0)) | |||
3153 | return(KERN_INVALID_ADDRESS1); | |||
3154 | ||||
3155 | /* | |||
3156 | * Allocate a header element for the list. | |||
3157 | * | |||
3158 | * Use the start and end in the header to | |||
3159 | * remember the endpoints prior to rounding. | |||
3160 | */ | |||
3161 | ||||
3162 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); | |||
3163 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = | |||
3164 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = vm_map_copy_to_entry(copy)((struct vm_map_entry *) &(copy)->c_u.hdr.links); | |||
3165 | copy->type = VM_MAP_COPY_ENTRY_LIST1; | |||
3166 | copy->cpy_hdrc_u.hdr.nentries = 0; | |||
3167 | copy->cpy_hdrc_u.hdr.entries_pageable = TRUE((boolean_t) 1); | |||
3168 | rbtree_init(©->cpy_hdrc_u.hdr.tree); | |||
3169 | ||||
3170 | copy->offset = src_addr; | |||
3171 | copy->size = len; | |||
3172 | ||||
3173 | #define RETURN(x) \ | |||
3174 | MACRO_BEGIN({ \ | |||
3175 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); \ | |||
3176 | vm_map_copy_discard(copy); \ | |||
3177 | MACRO_RETURNif (((boolean_t) 1)) return(x); \ | |||
3178 | MACRO_END}) | |||
3179 | ||||
3180 | /* | |||
3181 | * Find the beginning of the region. | |||
3182 | */ | |||
3183 | ||||
3184 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
3185 | ||||
3186 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) | |||
3187 | RETURN(KERN_INVALID_ADDRESS1); | |||
3188 | vm_map_clip_start(src_map, tmp_entry, src_start)({ if ((src_start) > (tmp_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(tmp_entry),(src_start)); }); | |||
3189 | ||||
3190 | /* | |||
3191 | * Go through entries until we get to the end. | |||
3192 | */ | |||
3193 | ||||
3194 | while (TRUE((boolean_t) 1)) { | |||
3195 | register | |||
3196 | vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */ | |||
3197 | vm_size_t src_size; /* Size of source | |||
3198 | * map entry (in both | |||
3199 | * maps) | |||
3200 | */ | |||
3201 | ||||
3202 | register | |||
3203 | vm_object_t src_object; /* Object to copy */ | |||
3204 | vm_offset_t src_offset; | |||
3205 | ||||
3206 | boolean_t src_needs_copy; /* Should source map | |||
3207 | * be made read-only | |||
3208 | * for copy-on-write? | |||
3209 | */ | |||
3210 | ||||
3211 | register | |||
3212 | vm_map_entry_t new_entry; /* Map entry for copy */ | |||
3213 | boolean_t new_entry_needs_copy; /* Will new entry be COW? */ | |||
3214 | ||||
3215 | boolean_t was_wired; /* Was source wired? */ | |||
3216 | vm_map_version_t version; /* Version before locks | |||
3217 | * dropped to make copy | |||
3218 | */ | |||
3219 | ||||
3220 | /* | |||
3221 | * Verify that the region can be read. | |||
3222 | */ | |||
3223 | ||||
3224 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) | |||
3225 | RETURN(KERN_PROTECTION_FAILURE2); | |||
3226 | ||||
3227 | /* | |||
3228 | * Clip against the endpoints of the entire region. | |||
3229 | */ | |||
3230 | ||||
3231 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); | |||
3232 | ||||
3233 | src_size = src_entry->vme_endlinks.end - src_start; | |||
3234 | src_object = src_entry->object.vm_object; | |||
3235 | src_offset = src_entry->offset; | |||
3236 | was_wired = (src_entry->wired_count != 0); | |||
3237 | ||||
3238 | /* | |||
3239 | * Create a new address map entry to | |||
3240 | * hold the result. Fill in the fields from | |||
3241 | * the appropriate source entries. | |||
3242 | */ | |||
3243 | ||||
3244 | new_entry = vm_map_copy_entry_create(copy)_vm_map_entry_create(&(copy)->c_u.hdr); | |||
3245 | vm_map_entry_copy(new_entry, src_entry)({ *(new_entry) = *(src_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); | |||
3246 | ||||
3247 | /* | |||
3248 | * Attempt non-blocking copy-on-write optimizations. | |||
3249 | */ | |||
3250 | ||||
3251 | if (src_destroy && | |||
3252 | (src_object == VM_OBJECT_NULL((vm_object_t) 0) || | |||
3253 | (src_object->temporary && !src_object->use_shared_copy))) | |||
3254 | { | |||
3255 | /* | |||
3256 | * If we are destroying the source, and the object | |||
3257 | * is temporary, and not shared writable, | |||
3258 | * we can move the object reference | |||
3259 | * from the source to the copy. The copy is | |||
3260 | * copy-on-write only if the source is. | |||
3261 | * We make another reference to the object, because | |||
3262 | * destroying the source entry will deallocate it. | |||
3263 | */ | |||
3264 | vm_object_reference(src_object); | |||
3265 | ||||
3266 | /* | |||
3267 | * Copy is always unwired. vm_map_copy_entry | |||
3268 | * set its wired count to zero. | |||
3269 | */ | |||
3270 | ||||
3271 | goto CopySuccessful; | |||
3272 | } | |||
3273 | ||||
3274 | if (!was_wired && | |||
3275 | vm_object_copy_temporary( | |||
3276 | &new_entry->object.vm_object, | |||
3277 | &new_entry->offset, | |||
3278 | &src_needs_copy, | |||
3279 | &new_entry_needs_copy)) { | |||
3280 | ||||
3281 | new_entry->needs_copy = new_entry_needs_copy; | |||
3282 | ||||
3283 | /* | |||
3284 | * Handle copy-on-write obligations | |||
3285 | */ | |||
3286 | ||||
3287 | if (src_needs_copy && !tmp_entry->needs_copy) { | |||
3288 | vm_object_pmap_protect( | |||
3289 | src_object, | |||
3290 | src_offset, | |||
3291 | src_size, | |||
3292 | (src_entry->is_shared ? PMAP_NULL((pmap_t) 0) | |||
3293 | : src_map->pmap), | |||
3294 | src_entry->vme_startlinks.start, | |||
3295 | src_entry->protection & | |||
3296 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||
3297 | ||||
3298 | tmp_entry->needs_copy = TRUE((boolean_t) 1); | |||
3299 | } | |||
3300 | ||||
3301 | /* | |||
3302 | * The map has never been unlocked, so it's safe to | |||
3303 | * move to the next entry rather than doing another | |||
3304 | * lookup. | |||
3305 | */ | |||
3306 | ||||
3307 | goto CopySuccessful; | |||
3308 | } | |||
3309 | ||||
3310 | new_entry->needs_copy = FALSE((boolean_t) 0); | |||
3311 | ||||
3312 | /* | |||
3313 | * Take an object reference, so that we may | |||
3314 | * release the map lock(s). | |||
3315 | */ | |||
3316 | ||||
3317 | assert(src_object != VM_OBJECT_NULL)({ if (!(src_object != ((vm_object_t) 0))) Assert("src_object != VM_OBJECT_NULL" , "../vm/vm_map.c", 3317); }); | |||
3318 | vm_object_reference(src_object); | |||
3319 | ||||
3320 | /* | |||
3321 | * Record the timestamp for later verification. | |||
3322 | * Unlock the map. | |||
3323 | */ | |||
3324 | ||||
3325 | version.main_timestamp = src_map->timestamp; | |||
3326 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
3327 | ||||
3328 | /* | |||
3329 | * Perform the copy | |||
3330 | */ | |||
3331 | ||||
3332 | if (was_wired) { | |||
3333 | vm_object_lock(src_object); | |||
3334 | (void) vm_object_copy_slowly( | |||
3335 | src_object, | |||
3336 | src_offset, | |||
3337 | src_size, | |||
3338 | FALSE((boolean_t) 0), | |||
3339 | &new_entry->object.vm_object); | |||
3340 | new_entry->offset = 0; | |||
3341 | new_entry->needs_copy = FALSE((boolean_t) 0); | |||
3342 | } else { | |||
3343 | kern_return_t result; | |||
3344 | ||||
3345 | result = vm_object_copy_strategically(src_object, | |||
3346 | src_offset, | |||
3347 | src_size, | |||
3348 | &new_entry->object.vm_object, | |||
3349 | &new_entry->offset, | |||
3350 | &new_entry_needs_copy); | |||
3351 | ||||
3352 | new_entry->needs_copy = new_entry_needs_copy; | |||
3353 | ||||
3354 | ||||
3355 | if (result != KERN_SUCCESS0) { | |||
3356 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); | |||
3357 | ||||
3358 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
3359 | RETURN(result); | |||
3360 | } | |||
3361 | ||||
3362 | } | |||
3363 | ||||
3364 | /* | |||
3365 | * Throw away the extra reference | |||
3366 | */ | |||
3367 | ||||
3368 | vm_object_deallocate(src_object); | |||
3369 | ||||
3370 | /* | |||
3371 | * Verify that the map has not substantially | |||
3372 | * changed while the copy was being made. | |||
3373 | */ | |||
3374 | ||||
3375 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); /* Increments timestamp once! */ | |||
3376 | ||||
3377 | if ((version.main_timestamp + 1) == src_map->timestamp) | |||
3378 | goto CopySuccessful; | |||
3379 | ||||
3380 | /* | |||
3381 | * Simple version comparison failed. | |||
3382 | * | |||
3383 | * Retry the lookup and verify that the | |||
3384 | * same object/offset are still present. | |||
3385 | * | |||
3386 | * [Note: a memory manager that colludes with | |||
3387 | * the calling task can detect that we have | |||
3388 | * cheated. While the map was unlocked, the | |||
3389 | * mapping could have been changed and restored.] | |||
3390 | */ | |||
3391 | ||||
3392 | if (!vm_map_lookup_entry(src_map, src_start, &tmp_entry)) { | |||
3393 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); | |||
3394 | RETURN(KERN_INVALID_ADDRESS1); | |||
3395 | } | |||
3396 | ||||
3397 | src_entry = tmp_entry; | |||
3398 | vm_map_clip_start(src_map, src_entry, src_start)({ if ((src_start) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(src_start)); }); | |||
3399 | ||||
3400 | if ((src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01)) == VM_PROT_NONE((vm_prot_t) 0x00)) | |||
3401 | goto VerificationFailed; | |||
3402 | ||||
3403 | if (src_entry->vme_endlinks.end < new_entry->vme_endlinks.end) | |||
3404 | src_size = (new_entry->vme_endlinks.end = src_entry->vme_endlinks.end) - src_start; | |||
3405 | ||||
3406 | if ((src_entry->object.vm_object != src_object) || | |||
3407 | (src_entry->offset != src_offset) ) { | |||
3408 | ||||
3409 | /* | |||
3410 | * Verification failed. | |||
3411 | * | |||
3412 | * Start over with this top-level entry. | |||
3413 | */ | |||
3414 | ||||
3415 | VerificationFailed: ; | |||
3416 | ||||
3417 | vm_object_deallocate(new_entry->object.vm_object); | |||
3418 | vm_map_copy_entry_dispose(copy, new_entry)_vm_map_entry_dispose(&(copy)->c_u.hdr, (new_entry)); | |||
3419 | tmp_entry = src_entry; | |||
3420 | continue; | |||
3421 | } | |||
3422 | ||||
3423 | /* | |||
3424 | * Verification succeeded. | |||
3425 | */ | |||
3426 | ||||
3427 | CopySuccessful: ; | |||
3428 | ||||
3429 | /* | |||
3430 | * Link in the new copy entry. | |||
3431 | */ | |||
3432 | ||||
3433 | vm_map_copy_entry_link(copy, vm_map_copy_last_entry(copy),({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3434); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }) | |||
3434 | new_entry)({ (&(copy)->c_u.hdr)->nentries++; (new_entry)-> links.prev = (((copy)->c_u.hdr.links.prev)); (new_entry)-> links.next = (((copy)->c_u.hdr.links.prev))->links.next ; (new_entry)->links.prev->links.next = (new_entry)-> links.next->links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void * ) 0); ___index = -1; ___cur = (&(&(copy)->c_u.hdr) ->tree)->root; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert(&(new_entry)->tree_node, ___cur ); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c" , 3434); }); ___prev = ___cur; ___index = rbtree_d2i(___diff) ; ___cur = ___cur->children[___index]; } rbtree_insert_rebalance (&(&(copy)->c_u.hdr)->tree, ___prev, ___index, & (new_entry)->tree_node); }); }); | |||
3435 | ||||
3436 | /* | |||
3437 | * Determine whether the entire region | |||
3438 | * has been copied. | |||
3439 | */ | |||
3440 | src_start = new_entry->vme_endlinks.end; | |||
3441 | if ((src_start >= src_end) && (src_end != 0)) | |||
3442 | break; | |||
3443 | ||||
3444 | /* | |||
3445 | * Verify that there are no gaps in the region | |||
3446 | */ | |||
3447 | ||||
3448 | tmp_entry = src_entry->vme_nextlinks.next; | |||
3449 | if (tmp_entry->vme_startlinks.start != src_start) | |||
3450 | RETURN(KERN_INVALID_ADDRESS1); | |||
3451 | } | |||
3452 | ||||
3453 | /* | |||
3454 | * If the source should be destroyed, do it now, since the | |||
3455 | * copy was successful. | |||
3456 | */ | |||
3457 | if (src_destroy) | |||
3458 | (void) vm_map_delete(src_map, trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))), src_end); | |||
3459 | ||||
3460 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
3461 | ||||
3462 | *copy_result = copy; | |||
3463 | return(KERN_SUCCESS0); | |||
3464 | ||||
3465 | #undef RETURN | |||
3466 | } | |||
3467 | ||||
3468 | /* | |||
3469 | * vm_map_copyin_object: | |||
3470 | * | |||
3471 | * Create a copy object from an object. | |||
3472 | * Our caller donates an object reference. | |||
3473 | */ | |||
3474 | ||||
3475 | kern_return_t vm_map_copyin_object(object, offset, size, copy_result) | |||
3476 | vm_object_t object; | |||
3477 | vm_offset_t offset; /* offset of region in object */ | |||
3478 | vm_size_t size; /* size of region in object */ | |||
3479 | vm_map_copy_t *copy_result; /* OUT */ | |||
3480 | { | |||
3481 | vm_map_copy_t copy; /* Resulting copy */ | |||
3482 | ||||
3483 | /* | |||
3484 | * We drop the object into a special copy object | |||
3485 | * that contains the object directly. These copy objects | |||
3486 | * are distinguished by entries_pageable == FALSE | |||
3487 | * and null links. | |||
3488 | */ | |||
3489 | ||||
3490 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); | |||
3491 | vm_map_copy_first_entry(copy)((copy)->c_u.hdr.links.next) = | |||
3492 | vm_map_copy_last_entry(copy)((copy)->c_u.hdr.links.prev) = VM_MAP_ENTRY_NULL((vm_map_entry_t) 0); | |||
3493 | copy->type = VM_MAP_COPY_OBJECT2; | |||
3494 | copy->cpy_objectc_u.c_o.object = object; | |||
3495 | copy->offset = offset; | |||
3496 | copy->size = size; | |||
3497 | ||||
3498 | *copy_result = copy; | |||
3499 | return(KERN_SUCCESS0); | |||
3500 | } | |||
3501 | ||||
3502 | /* | |||
3503 | * vm_map_copyin_page_list_cont: | |||
3504 | * | |||
3505 | * Continuation routine for vm_map_copyin_page_list. | |||
3506 | * | |||
3507 | * If vm_map_copyin_page_list can't fit the entire vm range | |||
3508 | * into a single page list object, it creates a continuation. | |||
3509 | * When the target of the operation has used the pages in the | |||
3510 | * initial page list, it invokes the continuation, which calls | |||
3511 | * this routine. If an error happens, the continuation is aborted | |||
3512 | * (abort arg to this routine is TRUE). To avoid deadlocks, the | |||
3513 | * pages are discarded from the initial page list before invoking | |||
3514 | * the continuation. | |||
3515 | * | |||
3516 | * NOTE: This is not the same sort of continuation used by | |||
3517 | * the scheduler. | |||
3518 | */ | |||
3519 | ||||
3520 | kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result) | |||
3521 | vm_map_copyin_args_t cont_args; | |||
3522 | vm_map_copy_t *copy_result; /* OUT */ | |||
3523 | { | |||
3524 | kern_return_t result = 0; /* '=0' to quiet gcc warnings */ | |||
3525 | register boolean_t do_abort, src_destroy, src_destroy_only; | |||
3526 | ||||
3527 | /* | |||
3528 | * Check for cases that only require memory destruction. | |||
3529 | */ | |||
3530 | do_abort = (copy_result == (vm_map_copy_t *) 0); | |||
3531 | src_destroy = (cont_args->destroy_len != (vm_size_t) 0); | |||
3532 | src_destroy_only = (cont_args->src_len == (vm_size_t) 0); | |||
3533 | ||||
3534 | if (do_abort || src_destroy_only) { | |||
3535 | if (src_destroy) | |||
3536 | result = vm_map_remove(cont_args->map, | |||
3537 | cont_args->destroy_addr, | |||
3538 | cont_args->destroy_addr + cont_args->destroy_len); | |||
3539 | if (!do_abort) | |||
3540 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); | |||
3541 | } | |||
3542 | else { | |||
3543 | result = vm_map_copyin_page_list(cont_args->map, | |||
3544 | cont_args->src_addr, cont_args->src_len, src_destroy, | |||
3545 | cont_args->steal_pages, copy_result, TRUE((boolean_t) 1)); | |||
3546 | ||||
3547 | if (src_destroy && !cont_args->steal_pages && | |||
3548 | vm_map_copy_has_cont(*copy_result)(((*copy_result)->c_u.c_p.cont) != (kern_return_t (*)()) 0 )) { | |||
3549 | vm_map_copyin_args_t new_args; | |||
3550 | /* | |||
3551 | * Transfer old destroy info. | |||
3552 | */ | |||
3553 | new_args = (vm_map_copyin_args_t) | |||
3554 | (*copy_result)->cpy_cont_argsc_u.c_p.cont_args; | |||
3555 | new_args->destroy_addr = cont_args->destroy_addr; | |||
3556 | new_args->destroy_len = cont_args->destroy_len; | |||
3557 | } | |||
3558 | } | |||
3559 | ||||
3560 | vm_map_deallocate(cont_args->map); | |||
3561 | kfree((vm_offset_t)cont_args, sizeof(vm_map_copyin_args_data_t)); | |||
3562 | ||||
3563 | return(result); | |||
3564 | } | |||
3565 | ||||
3566 | /* | |||
3567 | * vm_map_copyin_page_list: | |||
3568 | * | |||
3569 | * This is a variant of vm_map_copyin that copies in a list of pages. | |||
3570 | * If steal_pages is TRUE, the pages are only in the returned list. | |||
3571 | * If steal_pages is FALSE, the pages are busy and still in their | |||
3572 | * objects. A continuation may be returned if not all the pages fit: | |||
3573 | * the recipient of this copy_result must be prepared to deal with it. | |||
3574 | */ | |||
3575 | ||||
3576 | kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy, | |||
3577 | steal_pages, copy_result, is_cont) | |||
3578 | vm_map_t src_map; | |||
3579 | vm_offset_t src_addr; | |||
3580 | vm_size_t len; | |||
3581 | boolean_t src_destroy; | |||
3582 | boolean_t steal_pages; | |||
3583 | vm_map_copy_t *copy_result; /* OUT */ | |||
3584 | boolean_t is_cont; | |||
3585 | { | |||
3586 | vm_map_entry_t src_entry; | |||
3587 | vm_page_t m; | |||
3588 | vm_offset_t src_start; | |||
3589 | vm_offset_t src_end; | |||
3590 | vm_size_t src_size; | |||
3591 | register | |||
3592 | vm_object_t src_object; | |||
3593 | register | |||
3594 | vm_offset_t src_offset; | |||
3595 | vm_offset_t src_last_offset; | |||
3596 | register | |||
3597 | vm_map_copy_t copy; /* Resulting copy */ | |||
3598 | kern_return_t result = KERN_SUCCESS0; | |||
3599 | boolean_t need_map_lookup; | |||
3600 | vm_map_copyin_args_t cont_args; | |||
3601 | ||||
3602 | /* | |||
3603 | * If steal_pages is FALSE, this leaves busy pages in | |||
3604 | * the object. A continuation must be used if src_destroy | |||
3605 | * is true in this case (!steal_pages && src_destroy). | |||
3606 | * | |||
3607 | * XXX Still have a more general problem of what happens | |||
3608 | * XXX if the same page occurs twice in a list. Deadlock | |||
3609 | * XXX can happen if vm_fault_page was called. A | |||
3610 | * XXX possible solution is to use a continuation if vm_fault_page | |||
3611 | * XXX is called and we cross a map entry boundary. | |||
3612 | */ | |||
3613 | ||||
3614 | /* | |||
3615 | * Check for copies of zero bytes. | |||
3616 | */ | |||
3617 | ||||
3618 | if (len == 0) { | |||
3619 | *copy_result = VM_MAP_COPY_NULL((vm_map_copy_t) 0); | |||
3620 | return(KERN_SUCCESS0); | |||
3621 | } | |||
3622 | ||||
3623 | /* | |||
3624 | * Compute start and end of region | |||
3625 | */ | |||
3626 | ||||
3627 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); | |||
3628 | src_end = round_page(src_addr + len)((vm_offset_t)((((vm_offset_t)(src_addr + len)) + ((1 << 12)-1)) & ~((1 << 12)-1))); | |||
3629 | ||||
3630 | /* | |||
3631 | * Check that the end address doesn't overflow | |||
3632 | */ | |||
3633 | ||||
3634 | if (src_end <= src_start && (src_end < src_start || src_start != 0)) { | |||
3635 | return KERN_INVALID_ADDRESS1; | |||
3636 | } | |||
3637 | ||||
3638 | /* | |||
3639 | * Allocate a header element for the page list. | |||
3640 | * | |||
3641 | * Record original offset and size, as caller may not | |||
3642 | * be page-aligned. | |||
3643 | */ | |||
3644 | ||||
3645 | copy = (vm_map_copy_t) kmem_cache_alloc(&vm_map_copy_cache); | |||
3646 | copy->type = VM_MAP_COPY_PAGE_LIST3; | |||
3647 | copy->cpy_npagesc_u.c_p.npages = 0; | |||
3648 | copy->offset = src_addr; | |||
3649 | copy->size = len; | |||
3650 | copy->cpy_contc_u.c_p.cont = ((kern_return_t (*)()) 0); | |||
3651 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) VM_MAP_COPYIN_ARGS_NULL((vm_map_copyin_args_t) 0); | |||
3652 | ||||
3653 | /* | |||
3654 | * Find the beginning of the region. | |||
3655 | */ | |||
3656 | ||||
3657 | do_map_lookup: | |||
3658 | ||||
3659 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
3660 | ||||
3661 | if (!vm_map_lookup_entry(src_map, src_start, &src_entry)) { | |||
3662 | result = KERN_INVALID_ADDRESS1; | |||
3663 | goto error; | |||
3664 | } | |||
3665 | need_map_lookup = FALSE((boolean_t) 0); | |||
3666 | ||||
3667 | /* | |||
3668 | * Go through entries until we get to the end. | |||
3669 | */ | |||
3670 | ||||
3671 | while (TRUE((boolean_t) 1)) { | |||
3672 | ||||
3673 | if (! (src_entry->protection & VM_PROT_READ((vm_prot_t) 0x01))) { | |||
3674 | result = KERN_PROTECTION_FAILURE2; | |||
3675 | goto error; | |||
3676 | } | |||
3677 | ||||
3678 | if (src_end > src_entry->vme_endlinks.end) | |||
3679 | src_size = src_entry->vme_endlinks.end - src_start; | |||
3680 | else | |||
3681 | src_size = src_end - src_start; | |||
3682 | ||||
3683 | src_object = src_entry->object.vm_object; | |||
3684 | src_offset = src_entry->offset + | |||
3685 | (src_start - src_entry->vme_startlinks.start); | |||
3686 | ||||
3687 | /* | |||
3688 | * If src_object is NULL, allocate it now; | |||
3689 | * we're going to fault on it shortly. | |||
3690 | */ | |||
3691 | if (src_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||
3692 | src_object = vm_object_allocate((vm_size_t) | |||
3693 | src_entry->vme_endlinks.end - | |||
3694 | src_entry->vme_startlinks.start); | |||
3695 | src_entry->object.vm_object = src_object; | |||
3696 | } | |||
3697 | ||||
3698 | /* | |||
3699 | * Iterate over pages. Fault in ones that aren't present. | |||
3700 | */ | |||
3701 | src_last_offset = src_offset + src_size; | |||
3702 | for (; (src_offset < src_last_offset && !need_map_lookup); | |||
3703 | src_offset += PAGE_SIZE(1 << 12), src_start += PAGE_SIZE(1 << 12)) { | |||
3704 | ||||
3705 | if (copy->cpy_npagesc_u.c_p.npages == VM_MAP_COPY_PAGE_LIST_MAX64) { | |||
3706 | make_continuation: | |||
3707 | /* | |||
3708 | * At this point we have the max number of | |||
3709 | * pages busy for this thread that we're | |||
3710 | * willing to allow. Stop here and record | |||
3711 | * arguments for the remainder. Note: | |||
3712 | * this means that this routine isn't atomic, | |||
3713 | * but that's the breaks. Note that only | |||
3714 | * the first vm_map_copy_t that comes back | |||
3715 | * from this routine has the right offset | |||
3716 | * and size; those from continuations are | |||
3717 | * page rounded, and short by the amount | |||
3718 | * already done. | |||
3719 | * | |||
3720 | * Reset src_end so the src_destroy | |||
3721 | * code at the bottom doesn't do | |||
3722 | * something stupid. | |||
3723 | */ | |||
3724 | ||||
3725 | cont_args = (vm_map_copyin_args_t) | |||
3726 | kalloc(sizeof(vm_map_copyin_args_data_t)); | |||
3727 | cont_args->map = src_map; | |||
3728 | vm_map_reference(src_map); | |||
3729 | cont_args->src_addr = src_start; | |||
3730 | cont_args->src_len = len - (src_start - src_addr); | |||
3731 | if (src_destroy) { | |||
3732 | cont_args->destroy_addr = cont_args->src_addr; | |||
3733 | cont_args->destroy_len = cont_args->src_len; | |||
3734 | } | |||
3735 | else { | |||
3736 | cont_args->destroy_addr = (vm_offset_t) 0; | |||
3737 | cont_args->destroy_len = (vm_offset_t) 0; | |||
3738 | } | |||
3739 | cont_args->steal_pages = steal_pages; | |||
3740 | ||||
3741 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; | |||
3742 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; | |||
3743 | ||||
3744 | src_end = src_start; | |||
3745 | vm_map_clip_end(src_map, src_entry, src_end)({ if ((src_end) < (src_entry)->links.end) _vm_map_clip_end (&(src_map)->hdr,(src_entry),(src_end)); }); | |||
3746 | break; | |||
3747 | } | |||
3748 | ||||
3749 | /* | |||
3750 | * Try to find the page of data. | |||
3751 | */ | |||
3752 | vm_object_lock(src_object); | |||
3753 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); | |||
3754 | if (((m = vm_page_lookup(src_object, src_offset)) != | |||
3755 | VM_PAGE_NULL((vm_page_t) 0)) && !m->busy && !m->fictitious && | |||
3756 | !m->absent && !m->error) { | |||
3757 | ||||
3758 | /* | |||
3759 | * This is the page. Mark it busy | |||
3760 | * and keep the paging reference on | |||
3761 | * the object whilst we do our thing. | |||
3762 | */ | |||
3763 | m->busy = TRUE((boolean_t) 1); | |||
3764 | ||||
3765 | /* | |||
3766 | * Also write-protect the page, so | |||
3767 | * that the map`s owner cannot change | |||
3768 | * the data. The busy bit will prevent | |||
3769 | * faults on the page from succeeding | |||
3770 | * until the copy is released; after | |||
3771 | * that, the page can be re-entered | |||
3772 | * as writable, since we didn`t alter | |||
3773 | * the map entry. This scheme is a | |||
3774 | * cheap copy-on-write. | |||
3775 | * | |||
3776 | * Don`t forget the protection and | |||
3777 | * the page_lock value! | |||
3778 | * | |||
3779 | * If the source is being destroyed | |||
3780 | * AND not shared writable, we don`t | |||
3781 | * have to protect the page, since | |||
3782 | * we will destroy the (only) | |||
3783 | * writable mapping later. | |||
3784 | */ | |||
3785 | if (!src_destroy || | |||
3786 | src_object->use_shared_copy) | |||
3787 | { | |||
3788 | pmap_page_protect(m->phys_addr, | |||
3789 | src_entry->protection | |||
3790 | & ~m->page_lock | |||
3791 | & ~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||
3792 | } | |||
3793 | ||||
3794 | } | |||
3795 | else { | |||
3796 | vm_prot_t result_prot; | |||
3797 | vm_page_t top_page; | |||
3798 | kern_return_t kr; | |||
3799 | ||||
3800 | /* | |||
3801 | * Have to fault the page in; must | |||
3802 | * unlock the map to do so. While | |||
3803 | * the map is unlocked, anything | |||
3804 | * can happen, we must lookup the | |||
3805 | * map entry before continuing. | |||
3806 | */ | |||
3807 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
3808 | need_map_lookup = TRUE((boolean_t) 1); | |||
3809 | retry: | |||
3810 | result_prot = VM_PROT_READ((vm_prot_t) 0x01); | |||
3811 | ||||
3812 | kr = vm_fault_page(src_object, src_offset, | |||
3813 | VM_PROT_READ((vm_prot_t) 0x01), FALSE((boolean_t) 0), FALSE((boolean_t) 0), | |||
3814 | &result_prot, &m, &top_page, | |||
3815 | FALSE((boolean_t) 0), (void (*)()) 0); | |||
3816 | /* | |||
3817 | * Cope with what happened. | |||
3818 | */ | |||
3819 | switch (kr) { | |||
3820 | case VM_FAULT_SUCCESS0: | |||
3821 | break; | |||
3822 | case VM_FAULT_INTERRUPTED2: /* ??? */ | |||
3823 | case VM_FAULT_RETRY1: | |||
3824 | vm_object_lock(src_object); | |||
3825 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); | |||
3826 | goto retry; | |||
3827 | case VM_FAULT_MEMORY_SHORTAGE3: | |||
3828 | VM_PAGE_WAIT((void (*)()) 0)vm_page_wait((void (*)()) 0); | |||
3829 | vm_object_lock(src_object); | |||
3830 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); | |||
3831 | goto retry; | |||
3832 | case VM_FAULT_FICTITIOUS_SHORTAGE4: | |||
3833 | vm_page_more_fictitious(); | |||
3834 | vm_object_lock(src_object); | |||
3835 | vm_object_paging_begin(src_object)((src_object)->paging_in_progress++); | |||
3836 | goto retry; | |||
3837 | case VM_FAULT_MEMORY_ERROR5: | |||
3838 | /* | |||
3839 | * Something broke. If this | |||
3840 | * is a continuation, return | |||
3841 | * a partial result if possible, | |||
3842 | * else fail the whole thing. | |||
3843 | * In the continuation case, the | |||
3844 | * next continuation call will | |||
3845 | * get this error if it persists. | |||
3846 | */ | |||
3847 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
3848 | if (is_cont && | |||
3849 | copy->cpy_npagesc_u.c_p.npages != 0) | |||
3850 | goto make_continuation; | |||
3851 | ||||
3852 | result = KERN_MEMORY_ERROR10; | |||
3853 | goto error; | |||
3854 | } | |||
3855 | ||||
3856 | if (top_page != VM_PAGE_NULL((vm_page_t) 0)) { | |||
3857 | vm_object_lock(src_object); | |||
3858 | VM_PAGE_FREE(top_page)({ ; vm_page_free(top_page); ; }); | |||
3859 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 3859); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); | |||
3860 | vm_object_unlock(src_object); | |||
3861 | } | |||
3862 | ||||
3863 | /* | |||
3864 | * We do not need to write-protect | |||
3865 | * the page, since it cannot have | |||
3866 | * been in the pmap (and we did not | |||
3867 | * enter it above). The busy bit | |||
3868 | * will protect the page from being | |||
3869 | * entered as writable until it is | |||
3870 | * unlocked. | |||
3871 | */ | |||
3872 | ||||
3873 | } | |||
3874 | ||||
3875 | /* | |||
3876 | * The page is busy, its object is locked, and | |||
3877 | * we have a paging reference on it. Either | |||
3878 | * the map is locked, or need_map_lookup is | |||
3879 | * TRUE. | |||
3880 | * | |||
3881 | * Put the page in the page list. | |||
3882 | */ | |||
3883 | copy->cpy_page_listc_u.c_p.page_list[copy->cpy_npagesc_u.c_p.npages++] = m; | |||
3884 | vm_object_unlock(m->object); | |||
3885 | } | |||
3886 | ||||
3887 | /* | |||
3888 | * DETERMINE whether the entire region | |||
3889 | * has been copied. | |||
3890 | */ | |||
3891 | if (src_start >= src_end && src_end != 0) { | |||
3892 | if (need_map_lookup) | |||
3893 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
3894 | break; | |||
3895 | } | |||
3896 | ||||
3897 | /* | |||
3898 | * If need_map_lookup is TRUE, have to start over with | |||
3899 | * another map lookup. Note that we dropped the map | |||
3900 | * lock (to call vm_fault_page) above only in this case. | |||
3901 | */ | |||
3902 | if (need_map_lookup) | |||
3903 | goto do_map_lookup; | |||
3904 | ||||
3905 | /* | |||
3906 | * Verify that there are no gaps in the region | |||
3907 | */ | |||
3908 | ||||
3909 | src_start = src_entry->vme_endlinks.end; | |||
3910 | src_entry = src_entry->vme_nextlinks.next; | |||
3911 | if (src_entry->vme_startlinks.start != src_start) { | |||
3912 | result = KERN_INVALID_ADDRESS1; | |||
3913 | goto error; | |||
3914 | } | |||
3915 | } | |||
3916 | ||||
3917 | /* | |||
3918 | * If steal_pages is true, make sure all | |||
3919 | * pages in the copy are not in any object | |||
3920 | * We try to remove them from the original | |||
3921 | * object, but we may have to copy them. | |||
3922 | * | |||
3923 | * At this point every page in the list is busy | |||
3924 | * and holds a paging reference to its object. | |||
3925 | * When we're done stealing, every page is busy, | |||
3926 | * and in no object (m->tabled == FALSE). | |||
3927 | */ | |||
3928 | src_start = trunc_page(src_addr)((vm_offset_t)(((vm_offset_t)(src_addr)) & ~((1 << 12 )-1))); | |||
3929 | if (steal_pages) { | |||
3930 | register int i; | |||
3931 | vm_offset_t unwire_end; | |||
3932 | ||||
3933 | unwire_end = src_start; | |||
3934 | for (i = 0; i < copy->cpy_npagesc_u.c_p.npages; i++) { | |||
3935 | ||||
3936 | /* | |||
3937 | * Remove the page from its object if it | |||
3938 | * can be stolen. It can be stolen if: | |||
3939 | * | |||
3940 | * (1) The source is being destroyed, | |||
3941 | * the object is temporary, and | |||
3942 | * not shared. | |||
3943 | * (2) The page is not precious. | |||
3944 | * | |||
3945 | * The not shared check consists of two | |||
3946 | * parts: (a) there are no objects that | |||
3947 | * shadow this object. (b) it is not the | |||
3948 | * object in any shared map entries (i.e., | |||
3949 | * use_shared_copy is not set). | |||
3950 | * | |||
3951 | * The first check (a) means that we can't | |||
3952 | * steal pages from objects that are not | |||
3953 | * at the top of their shadow chains. This | |||
3954 | * should not be a frequent occurrence. | |||
3955 | * | |||
3956 | * Stealing wired pages requires telling the | |||
3957 | * pmap module to let go of them. | |||
3958 | * | |||
3959 | * NOTE: stealing clean pages from objects | |||
3960 | * whose mappings survive requires a call to | |||
3961 | * the pmap module. Maybe later. | |||
3962 | */ | |||
3963 | m = copy->cpy_page_listc_u.c_p.page_list[i]; | |||
3964 | src_object = m->object; | |||
3965 | vm_object_lock(src_object); | |||
3966 | ||||
3967 | if (src_destroy && | |||
3968 | src_object->temporary && | |||
3969 | (!src_object->shadowed) && | |||
3970 | (!src_object->use_shared_copy) && | |||
3971 | !m->precious) { | |||
3972 | vm_offset_t page_vaddr; | |||
3973 | ||||
3974 | page_vaddr = src_start + (i * PAGE_SIZE(1 << 12)); | |||
3975 | if (m->wire_count > 0) { | |||
3976 | ||||
3977 | assert(m->wire_count == 1)({ if (!(m->wire_count == 1)) Assert("m->wire_count == 1" , "../vm/vm_map.c", 3977); }); | |||
3978 | /* | |||
3979 | * In order to steal a wired | |||
3980 | * page, we have to unwire it | |||
3981 | * first. We do this inline | |||
3982 | * here because we have the page. | |||
3983 | * | |||
3984 | * Step 1: Unwire the map entry. | |||
3985 | * Also tell the pmap module | |||
3986 | * that this piece of the | |||
3987 | * pmap is pageable. | |||
3988 | */ | |||
3989 | vm_object_unlock(src_object); | |||
3990 | if (page_vaddr >= unwire_end) { | |||
3991 | if (!vm_map_lookup_entry(src_map, | |||
3992 | page_vaddr, &src_entry)) | |||
3993 | panic("vm_map_copyin_page_list: missing wired map entry"); | |||
3994 | ||||
3995 | vm_map_clip_start(src_map, src_entry,({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }) | |||
3996 | page_vaddr)({ if ((page_vaddr) > (src_entry)->links.start) _vm_map_clip_start (&(src_map)->hdr,(src_entry),(page_vaddr)); }); | |||
3997 | vm_map_clip_end(src_map, src_entry,({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }) | |||
3998 | src_start + src_size)({ if ((src_start + src_size) < (src_entry)->links.end) _vm_map_clip_end(&(src_map)->hdr,(src_entry),(src_start + src_size)); }); | |||
3999 | ||||
4000 | assert(src_entry->wired_count > 0)({ if (!(src_entry->wired_count > 0)) Assert("src_entry->wired_count > 0" , "../vm/vm_map.c", 4000); }); | |||
4001 | src_entry->wired_count = 0; | |||
4002 | src_entry->user_wired_count = 0; | |||
4003 | unwire_end = src_entry->vme_endlinks.end; | |||
4004 | pmap_pageable(vm_map_pmap(src_map)((src_map)->pmap), | |||
4005 | page_vaddr, unwire_end, TRUE((boolean_t) 1)); | |||
4006 | } | |||
4007 | ||||
4008 | /* | |||
4009 | * Step 2: Unwire the page. | |||
4010 | * pmap_remove handles this for us. | |||
4011 | */ | |||
4012 | vm_object_lock(src_object); | |||
4013 | } | |||
4014 | ||||
4015 | /* | |||
4016 | * Don't need to remove the mapping; | |||
4017 | * vm_map_delete will handle it. | |||
4018 | * | |||
4019 | * Steal the page. Setting the wire count | |||
4020 | * to zero is vm_page_unwire without | |||
4021 | * activating the page. | |||
4022 | */ | |||
4023 | vm_page_lock_queues(); | |||
4024 | vm_page_remove(m); | |||
4025 | if (m->wire_count > 0) { | |||
4026 | m->wire_count = 0; | |||
4027 | vm_page_wire_count--; | |||
4028 | } else { | |||
4029 | VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { register queue_entry_t next, prev; next = (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active ) == next) (&vm_page_queue_active)->prev = prev; else ( (vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active ) == prev) (&vm_page_queue_active)->next = next; else ( (vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t ) 0); vm_page_active_count--; } if (m->inactive) { { register queue_entry_t next, prev; next = (m)->pageq.next; prev = ( m)->pageq.prev; if ((&vm_page_queue_inactive) == next) (&vm_page_queue_inactive)->prev = prev; else ((vm_page_t )next)->pageq.prev = prev; if ((&vm_page_queue_inactive ) == prev) (&vm_page_queue_inactive)->next = next; else ((vm_page_t)prev)->pageq.next = next; }; m->inactive = ((boolean_t) 0); vm_page_inactive_count--; } }); | |||
4030 | } | |||
4031 | vm_page_unlock_queues(); | |||
4032 | } | |||
4033 | else { | |||
4034 | /* | |||
4035 | * Have to copy this page. Have to | |||
4036 | * unlock the map while copying, | |||
4037 | * hence no further page stealing. | |||
4038 | * Hence just copy all the pages. | |||
4039 | * Unlock the map while copying; | |||
4040 | * This means no further page stealing. | |||
4041 | */ | |||
4042 | vm_object_unlock(src_object); | |||
4043 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
4044 | ||||
4045 | vm_map_copy_steal_pages(copy); | |||
4046 | ||||
4047 | vm_map_lock(src_map)({ lock_write(&(src_map)->lock); (src_map)->timestamp ++; }); | |||
4048 | break; | |||
4049 | } | |||
4050 | ||||
4051 | vm_object_paging_end(src_object)({ ({ if (!((src_object)->paging_in_progress != 0)) Assert ("(src_object)->paging_in_progress != 0", "../vm/vm_map.c" , 4051); }); if (--(src_object)->paging_in_progress == 0) { ({ if ((src_object)->all_wanted & (1 << (2))) thread_wakeup_prim (((event_t)(((vm_offset_t) src_object) + (2))), ((boolean_t) 0 ), 0); (src_object)->all_wanted &= ~(1 << (2)); } ); } }); | |||
4052 | vm_object_unlock(src_object); | |||
4053 | } | |||
4054 | ||||
4055 | /* | |||
4056 | * If the source should be destroyed, do it now, since the | |||
4057 | * copy was successful. | |||
4058 | */ | |||
4059 | ||||
4060 | if (src_destroy) { | |||
4061 | (void) vm_map_delete(src_map, src_start, src_end); | |||
4062 | } | |||
4063 | } | |||
4064 | else { | |||
4065 | /* | |||
4066 | * !steal_pages leaves busy pages in the map. | |||
4067 | * This will cause src_destroy to hang. Use | |||
4068 | * a continuation to prevent this. | |||
4069 | */ | |||
4070 | if (src_destroy && !vm_map_copy_has_cont(copy)(((copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) { | |||
4071 | cont_args = (vm_map_copyin_args_t) | |||
4072 | kalloc(sizeof(vm_map_copyin_args_data_t)); | |||
4073 | vm_map_reference(src_map); | |||
4074 | cont_args->map = src_map; | |||
4075 | cont_args->src_addr = (vm_offset_t) 0; | |||
4076 | cont_args->src_len = (vm_size_t) 0; | |||
4077 | cont_args->destroy_addr = src_start; | |||
4078 | cont_args->destroy_len = src_end - src_start; | |||
4079 | cont_args->steal_pages = FALSE((boolean_t) 0); | |||
4080 | ||||
4081 | copy->cpy_cont_argsc_u.c_p.cont_args = (char *) cont_args; | |||
4082 | copy->cpy_contc_u.c_p.cont = vm_map_copyin_page_list_cont; | |||
4083 | } | |||
4084 | ||||
4085 | } | |||
4086 | ||||
4087 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
4088 | ||||
4089 | *copy_result = copy; | |||
4090 | return(result); | |||
4091 | ||||
4092 | error: | |||
4093 | vm_map_unlock(src_map)lock_done(&(src_map)->lock); | |||
4094 | vm_map_copy_discard(copy); | |||
4095 | return(result); | |||
4096 | } | |||
4097 | ||||
4098 | /* | |||
4099 | * vm_map_fork: | |||
4100 | * | |||
4101 | * Create and return a new map based on the old | |||
4102 | * map, according to the inheritance values on the | |||
4103 | * regions in that map. | |||
4104 | * | |||
4105 | * The source map must not be locked. | |||
4106 | */ | |||
4107 | vm_map_t vm_map_fork(old_map) | |||
4108 | vm_map_t old_map; | |||
4109 | { | |||
4110 | vm_map_t new_map; | |||
4111 | register | |||
4112 | vm_map_entry_t old_entry; | |||
4113 | register | |||
4114 | vm_map_entry_t new_entry; | |||
4115 | pmap_t new_pmap = pmap_create((vm_size_t) 0); | |||
4116 | vm_size_t new_size = 0; | |||
4117 | vm_size_t entry_size; | |||
4118 | register | |||
4119 | vm_object_t object; | |||
4120 | ||||
4121 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); | |||
4122 | ||||
4123 | new_map = vm_map_create(new_pmap, | |||
4124 | old_map->min_offsethdr.links.start, | |||
4125 | old_map->max_offsethdr.links.end, | |||
4126 | old_map->hdr.entries_pageable); | |||
4127 | ||||
4128 | for ( | |||
4129 | old_entry = vm_map_first_entry(old_map)((old_map)->hdr.links.next); | |||
4130 | old_entry != vm_map_to_entry(old_map)((struct vm_map_entry *) &(old_map)->hdr.links); | |||
4131 | ) { | |||
4132 | if (old_entry->is_sub_map) | |||
4133 | panic("vm_map_fork: encountered a submap"); | |||
4134 | ||||
4135 | entry_size = (old_entry->vme_endlinks.end - old_entry->vme_startlinks.start); | |||
4136 | ||||
4137 | switch (old_entry->inheritance) { | |||
4138 | case VM_INHERIT_NONE((vm_inherit_t) 2): | |||
4139 | break; | |||
4140 | ||||
4141 | case VM_INHERIT_SHARE((vm_inherit_t) 0): | |||
4142 | /* | |||
4143 | * New sharing code. New map entry | |||
4144 | * references original object. Temporary | |||
4145 | * objects use asynchronous copy algorithm for | |||
4146 | * future copies. First make sure we have | |||
4147 | * the right object. If we need a shadow, | |||
4148 | * or someone else already has one, then | |||
4149 | * make a new shadow and share it. | |||
4150 | */ | |||
4151 | ||||
4152 | object = old_entry->object.vm_object; | |||
4153 | if (object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||
4154 | object = vm_object_allocate( | |||
4155 | (vm_size_t)(old_entry->vme_endlinks.end - | |||
4156 | old_entry->vme_startlinks.start)); | |||
4157 | old_entry->offset = 0; | |||
4158 | old_entry->object.vm_object = object; | |||
4159 | assert(!old_entry->needs_copy)({ if (!(!old_entry->needs_copy)) Assert("!old_entry->needs_copy" , "../vm/vm_map.c", 4159); }); | |||
4160 | } | |||
4161 | else if (old_entry->needs_copy || object->shadowed || | |||
4162 | (object->temporary && !old_entry->is_shared && | |||
4163 | object->size > (vm_size_t)(old_entry->vme_endlinks.end - | |||
4164 | old_entry->vme_startlinks.start))) { | |||
4165 | ||||
4166 | assert(object->temporary)({ if (!(object->temporary)) Assert("object->temporary" , "../vm/vm_map.c", 4166); }); | |||
4167 | assert(!(object->shadowed && old_entry->is_shared))({ if (!(!(object->shadowed && old_entry->is_shared ))) Assert("!(object->shadowed && old_entry->is_shared)" , "../vm/vm_map.c", 4167); }); | |||
4168 | vm_object_shadow( | |||
4169 | &old_entry->object.vm_object, | |||
4170 | &old_entry->offset, | |||
4171 | (vm_size_t) (old_entry->vme_endlinks.end - | |||
4172 | old_entry->vme_startlinks.start)); | |||
4173 | ||||
4174 | /* | |||
4175 | * If we're making a shadow for other than | |||
4176 | * copy on write reasons, then we have | |||
4177 | * to remove write permission. | |||
4178 | */ | |||
4179 | ||||
4180 | if (!old_entry->needs_copy && | |||
4181 | (old_entry->protection & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
4182 | pmap_protect(vm_map_pmap(old_map)((old_map)->pmap), | |||
4183 | old_entry->vme_startlinks.start, | |||
4184 | old_entry->vme_endlinks.end, | |||
4185 | old_entry->protection & | |||
4186 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||
4187 | } | |||
4188 | old_entry->needs_copy = FALSE((boolean_t) 0); | |||
4189 | object = old_entry->object.vm_object; | |||
4190 | } | |||
4191 | ||||
4192 | /* | |||
4193 | * Set use_shared_copy to indicate that | |||
4194 | * object must use shared (delayed) copy-on | |||
4195 | * write. This is ignored for permanent objects. | |||
4196 | * Bump the reference count for the new entry | |||
4197 | */ | |||
4198 | ||||
4199 | vm_object_lock(object); | |||
4200 | object->use_shared_copy = TRUE((boolean_t) 1); | |||
4201 | object->ref_count++; | |||
4202 | vm_object_unlock(object); | |||
4203 | ||||
4204 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); | |||
4205 | ||||
4206 | if (old_entry->projected_on != 0) { | |||
4207 | /* | |||
4208 | * If entry is projected buffer, clone the | |||
4209 | * entry exactly. | |||
4210 | */ | |||
4211 | ||||
4212 | vm_map_entry_copy_full(new_entry, old_entry)(*(new_entry) = *(old_entry)); | |||
4213 | ||||
4214 | } else { | |||
4215 | /* | |||
4216 | * Clone the entry, using object ref from above. | |||
4217 | * Mark both entries as shared. | |||
4218 | */ | |||
4219 | ||||
4220 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); | |||
4221 | old_entry->is_shared = TRUE((boolean_t) 1); | |||
4222 | new_entry->is_shared = TRUE((boolean_t) 1); | |||
4223 | } | |||
4224 | ||||
4225 | /* | |||
4226 | * Insert the entry into the new map -- we | |||
4227 | * know we're inserting at the end of the new | |||
4228 | * map. | |||
4229 | */ | |||
4230 | ||||
4231 | vm_map_entry_link(({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4234); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) | |||
4232 | new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4234); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) | |||
4233 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4234); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) | |||
4234 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4234); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); | |||
4235 | ||||
4236 | /* | |||
4237 | * Update the physical map | |||
4238 | */ | |||
4239 | ||||
4240 | pmap_copy(new_map->pmap, old_map->pmap, | |||
4241 | new_entry->vme_start, | |||
4242 | entry_size, | |||
4243 | old_entry->vme_start); | |||
4244 | ||||
4245 | new_size += entry_size; | |||
4246 | break; | |||
4247 | ||||
4248 | case VM_INHERIT_COPY((vm_inherit_t) 1): | |||
4249 | if (old_entry->wired_count == 0) { | |||
4250 | boolean_t src_needs_copy; | |||
4251 | boolean_t new_entry_needs_copy; | |||
4252 | ||||
4253 | new_entry = vm_map_entry_create(new_map)_vm_map_entry_create(&(new_map)->hdr); | |||
4254 | vm_map_entry_copy(new_entry, old_entry)({ *(new_entry) = *(old_entry); (new_entry)->is_shared = ( (boolean_t) 0); (new_entry)->needs_wakeup = ((boolean_t) 0 ); (new_entry)->in_transition = ((boolean_t) 0); (new_entry )->wired_count = 0; (new_entry)->user_wired_count = 0; } ); | |||
4255 | ||||
4256 | if (vm_object_copy_temporary( | |||
4257 | &new_entry->object.vm_object, | |||
4258 | &new_entry->offset, | |||
4259 | &src_needs_copy, | |||
4260 | &new_entry_needs_copy)) { | |||
4261 | ||||
4262 | /* | |||
4263 | * Handle copy-on-write obligations | |||
4264 | */ | |||
4265 | ||||
4266 | if (src_needs_copy && !old_entry->needs_copy) { | |||
4267 | vm_object_pmap_protect( | |||
4268 | old_entry->object.vm_object, | |||
4269 | old_entry->offset, | |||
4270 | entry_size, | |||
4271 | (old_entry->is_shared ? | |||
4272 | PMAP_NULL((pmap_t) 0) : | |||
4273 | old_map->pmap), | |||
4274 | old_entry->vme_startlinks.start, | |||
4275 | old_entry->protection & | |||
4276 | ~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||
4277 | ||||
4278 | old_entry->needs_copy = TRUE((boolean_t) 1); | |||
4279 | } | |||
4280 | ||||
4281 | new_entry->needs_copy = new_entry_needs_copy; | |||
4282 | ||||
4283 | /* | |||
4284 | * Insert the entry at the end | |||
4285 | * of the map. | |||
4286 | */ | |||
4287 | ||||
4288 | vm_map_entry_link(new_map,({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4290); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) | |||
4289 | vm_map_last_entry(new_map),({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4290); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }) | |||
4290 | new_entry)({ (&(new_map)->hdr)->nentries++; (new_entry)->links .prev = (((new_map)->hdr.links.prev)); (new_entry)->links .next = (((new_map)->hdr.links.prev))->links.next; (new_entry )->links.prev->links.next = (new_entry)->links.next-> links.prev = (new_entry); ({ struct rbtree_node *___cur, *___prev ; int ___diff, ___index; ___prev = ((void *) 0); ___index = - 1; ___cur = (&(&(new_map)->hdr)->tree)->root ; while (___cur != ((void *) 0)) { ___diff = vm_map_entry_cmp_insert (&(new_entry)->tree_node, ___cur); ({ if (!(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4290); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur-> children[___index]; } rbtree_insert_rebalance(&(&(new_map )->hdr)->tree, ___prev, ___index, &(new_entry)-> tree_node); }); }); | |||
4291 | ||||
4292 | ||||
4293 | new_size += entry_size; | |||
4294 | break; | |||
4295 | } | |||
4296 | ||||
4297 | vm_map_entry_dispose(new_map, new_entry)_vm_map_entry_dispose(&(new_map)->hdr, (new_entry)); | |||
4298 | } | |||
4299 | ||||
4300 | /* INNER BLOCK (copy cannot be optimized) */ { | |||
4301 | ||||
4302 | vm_offset_t start = old_entry->vme_startlinks.start; | |||
4303 | vm_map_copy_t copy; | |||
4304 | vm_map_entry_t last = vm_map_last_entry(new_map)((new_map)->hdr.links.prev); | |||
4305 | ||||
4306 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); | |||
4307 | if (vm_map_copyin(old_map, | |||
4308 | start, | |||
4309 | entry_size, | |||
4310 | FALSE((boolean_t) 0), | |||
4311 | ©) | |||
4312 | != KERN_SUCCESS0) { | |||
4313 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); | |||
4314 | if (!vm_map_lookup_entry(old_map, start, &last)) | |||
4315 | last = last->vme_nextlinks.next; | |||
4316 | old_entry = last; | |||
4317 | /* | |||
4318 | * For some error returns, want to | |||
4319 | * skip to the next element. | |||
4320 | */ | |||
4321 | ||||
4322 | continue; | |||
4323 | } | |||
4324 | ||||
4325 | /* | |||
4326 | * Insert the copy into the new map | |||
4327 | */ | |||
4328 | ||||
4329 | vm_map_copy_insert(new_map, last, copy)({ struct rbtree_node *node, *tmp; for (node = rbtree_postwalk_deepest (&(copy)->c_u.hdr.tree), tmp = rbtree_postwalk_unlink( node); node != ((void *) 0); node = tmp, tmp = rbtree_postwalk_unlink (node)) ({ struct rbtree_node *___cur, *___prev; int ___diff, ___index; ___prev = ((void *) 0); ___index = -1; ___cur = (& (new_map)->hdr.tree)->root; while (___cur != ((void *) 0 )) { ___diff = vm_map_entry_cmp_insert(node, ___cur); ({ if ( !(___diff != 0)) Assert("___diff != 0", "../vm/vm_map.c", 4329 ); }); ___prev = ___cur; ___index = rbtree_d2i(___diff); ___cur = ___cur->children[___index]; } rbtree_insert_rebalance(& (new_map)->hdr.tree, ___prev, ___index, node); }); (((last )->links.next)->links.prev = ((copy)->c_u.hdr.links. prev)) ->links.next = ((last)->links.next); ((last)-> links.next = ((copy)->c_u.hdr.links.next)) ->links.prev = (last); (new_map)->hdr.nentries += (copy)->c_u.hdr.nentries ; kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy) ; }); | |||
4330 | new_size += entry_size; | |||
4331 | ||||
4332 | /* | |||
4333 | * Pick up the traversal at the end of | |||
4334 | * the copied region. | |||
4335 | */ | |||
4336 | ||||
4337 | vm_map_lock(old_map)({ lock_write(&(old_map)->lock); (old_map)->timestamp ++; }); | |||
4338 | start += entry_size; | |||
4339 | if (!vm_map_lookup_entry(old_map, start, &last)) | |||
4340 | last = last->vme_nextlinks.next; | |||
4341 | else | |||
4342 | vm_map_clip_start(old_map, last, start)({ if ((start) > (last)->links.start) _vm_map_clip_start (&(old_map)->hdr,(last),(start)); }); | |||
4343 | old_entry = last; | |||
4344 | ||||
4345 | continue; | |||
4346 | /* INNER BLOCK (copy cannot be optimized) */ } | |||
4347 | } | |||
4348 | old_entry = old_entry->vme_nextlinks.next; | |||
4349 | } | |||
4350 | ||||
4351 | new_map->size = new_size; | |||
4352 | vm_map_unlock(old_map)lock_done(&(old_map)->lock); | |||
4353 | ||||
4354 | return(new_map); | |||
4355 | } | |||
4356 | ||||
4357 | /* | |||
4358 | * vm_map_lookup: | |||
4359 | * | |||
4360 | * Finds the VM object, offset, and | |||
4361 | * protection for a given virtual address in the | |||
4362 | * specified map, assuming a page fault of the | |||
4363 | * type specified. | |||
4364 | * | |||
4365 | * Returns the (object, offset, protection) for | |||
4366 | * this address, whether it is wired down, and whether | |||
4367 | * this map has the only reference to the data in question. | |||
4368 | * In order to later verify this lookup, a "version" | |||
4369 | * is returned. | |||
4370 | * | |||
4371 | * The map should not be locked; it will not be | |||
4372 | * locked on exit. In order to guarantee the | |||
4373 | * existence of the returned object, it is returned | |||
4374 | * locked. | |||
4375 | * | |||
4376 | * If a lookup is requested with "write protection" | |||
4377 | * specified, the map may be changed to perform virtual | |||
4378 | * copying operations, although the data referenced will | |||
4379 | * remain the same. | |||
4380 | */ | |||
4381 | kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version, | |||
4382 | object, offset, out_prot, wired) | |||
4383 | vm_map_t *var_map; /* IN/OUT */ | |||
4384 | register vm_offset_t vaddr; | |||
4385 | register vm_prot_t fault_type; | |||
4386 | ||||
4387 | vm_map_version_t *out_version; /* OUT */ | |||
4388 | vm_object_t *object; /* OUT */ | |||
4389 | vm_offset_t *offset; /* OUT */ | |||
4390 | vm_prot_t *out_prot; /* OUT */ | |||
4391 | boolean_t *wired; /* OUT */ | |||
4392 | { | |||
4393 | register vm_map_entry_t entry; | |||
4394 | register vm_map_t map = *var_map; | |||
4395 | register vm_prot_t prot; | |||
4396 | ||||
4397 | RetryLookup: ; | |||
4398 | ||||
4399 | /* | |||
4400 | * Lookup the faulting address. | |||
4401 | */ | |||
4402 | ||||
4403 | vm_map_lock_read(map)lock_read(&(map)->lock); | |||
4404 | ||||
4405 | #define RETURN(why) \ | |||
4406 | { \ | |||
4407 | vm_map_unlock_read(map)lock_done(&(map)->lock); \ | |||
4408 | return(why); \ | |||
4409 | } | |||
4410 | ||||
4411 | /* | |||
4412 | * If the map has an interesting hint, try it before calling | |||
4413 | * full blown lookup routine. | |||
4414 | */ | |||
4415 | ||||
4416 | simple_lock(&map->hint_lock); | |||
4417 | entry = map->hint; | |||
4418 | simple_unlock(&map->hint_lock); | |||
4419 | ||||
4420 | if ((entry == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || | |||
4421 | (vaddr < entry->vme_startlinks.start) || (vaddr >= entry->vme_endlinks.end)) { | |||
4422 | vm_map_entry_t tmp_entry; | |||
4423 | ||||
4424 | /* | |||
4425 | * Entry was either not a valid hint, or the vaddr | |||
4426 | * was not contained in the entry, so do a full lookup. | |||
4427 | */ | |||
4428 | if (!vm_map_lookup_entry(map, vaddr, &tmp_entry)) | |||
4429 | RETURN(KERN_INVALID_ADDRESS1); | |||
4430 | ||||
4431 | entry = tmp_entry; | |||
4432 | } | |||
4433 | ||||
4434 | /* | |||
4435 | * Handle submaps. | |||
4436 | */ | |||
4437 | ||||
4438 | if (entry->is_sub_map) { | |||
4439 | vm_map_t old_map = map; | |||
4440 | ||||
4441 | *var_map = map = entry->object.sub_map; | |||
4442 | vm_map_unlock_read(old_map)lock_done(&(old_map)->lock); | |||
4443 | goto RetryLookup; | |||
4444 | } | |||
4445 | ||||
4446 | /* | |||
4447 | * Check whether this task is allowed to have | |||
4448 | * this page. | |||
4449 | */ | |||
4450 | ||||
4451 | prot = entry->protection; | |||
4452 | ||||
4453 | if ((fault_type & (prot)) != fault_type) { | |||
4454 | if ((prot & VM_PROT_NOTIFY((vm_prot_t) 0x10)) && (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02))) { | |||
4455 | RETURN(KERN_WRITE_PROTECTION_FAILURE24); | |||
4456 | } else { | |||
4457 | RETURN(KERN_PROTECTION_FAILURE2); | |||
4458 | } | |||
4459 | } | |||
4460 | ||||
4461 | /* | |||
4462 | * If this page is not pageable, we have to get | |||
4463 | * it for all possible accesses. | |||
4464 | */ | |||
4465 | ||||
4466 | if ((*wired = (entry->wired_count != 0))) | |||
4467 | prot = fault_type = entry->protection; | |||
4468 | ||||
4469 | /* | |||
4470 | * If the entry was copy-on-write, we either ... | |||
4471 | */ | |||
4472 | ||||
4473 | if (entry->needs_copy) { | |||
4474 | /* | |||
4475 | * If we want to write the page, we may as well | |||
4476 | * handle that now since we've got the map locked. | |||
4477 | * | |||
4478 | * If we don't need to write the page, we just | |||
4479 | * demote the permissions allowed. | |||
4480 | */ | |||
4481 | ||||
4482 | if (fault_type & VM_PROT_WRITE((vm_prot_t) 0x02)) { | |||
4483 | /* | |||
4484 | * Make a new object, and place it in the | |||
4485 | * object chain. Note that no new references | |||
4486 | * have appeared -- one just moved from the | |||
4487 | * map to the new object. | |||
4488 | */ | |||
4489 | ||||
4490 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { | |||
4491 | goto RetryLookup; | |||
4492 | } | |||
4493 | map->timestamp++; | |||
4494 | ||||
4495 | vm_object_shadow( | |||
4496 | &entry->object.vm_object, | |||
4497 | &entry->offset, | |||
4498 | (vm_size_t) (entry->vme_endlinks.end - entry->vme_startlinks.start)); | |||
4499 | ||||
4500 | entry->needs_copy = FALSE((boolean_t) 0); | |||
4501 | ||||
4502 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); | |||
4503 | } | |||
4504 | else { | |||
4505 | /* | |||
4506 | * We're attempting to read a copy-on-write | |||
4507 | * page -- don't allow writes. | |||
4508 | */ | |||
4509 | ||||
4510 | prot &= (~VM_PROT_WRITE((vm_prot_t) 0x02)); | |||
4511 | } | |||
4512 | } | |||
4513 | ||||
4514 | /* | |||
4515 | * Create an object if necessary. | |||
4516 | */ | |||
4517 | if (entry->object.vm_object == VM_OBJECT_NULL((vm_object_t) 0)) { | |||
4518 | ||||
4519 | if (vm_map_lock_read_to_write(map)(lock_read_to_write(&(map)->lock) || (((map)->timestamp ++), 0))) { | |||
4520 | goto RetryLookup; | |||
4521 | } | |||
4522 | ||||
4523 | entry->object.vm_object = vm_object_allocate( | |||
4524 | (vm_size_t)(entry->vme_endlinks.end - entry->vme_startlinks.start)); | |||
4525 | entry->offset = 0; | |||
4526 | vm_map_lock_write_to_read(map)lock_write_to_read(&(map)->lock); | |||
4527 | } | |||
4528 | ||||
4529 | /* | |||
4530 | * Return the object/offset from this entry. If the entry | |||
4531 | * was copy-on-write or empty, it has been fixed up. Also | |||
4532 | * return the protection. | |||
4533 | */ | |||
4534 | ||||
4535 | *offset = (vaddr - entry->vme_startlinks.start) + entry->offset; | |||
4536 | *object = entry->object.vm_object; | |||
4537 | *out_prot = prot; | |||
4538 | ||||
4539 | /* | |||
4540 | * Lock the object to prevent it from disappearing | |||
4541 | */ | |||
4542 | ||||
4543 | vm_object_lock(*object); | |||
4544 | ||||
4545 | /* | |||
4546 | * Save the version number and unlock the map. | |||
4547 | */ | |||
4548 | ||||
4549 | out_version->main_timestamp = map->timestamp; | |||
4550 | ||||
4551 | RETURN(KERN_SUCCESS0); | |||
4552 | ||||
4553 | #undef RETURN | |||
4554 | } | |||
4555 | ||||
4556 | /* | |||
4557 | * vm_map_verify: | |||
4558 | * | |||
4559 | * Verifies that the map in question has not changed | |||
4560 | * since the given version. If successful, the map | |||
4561 | * will not change until vm_map_verify_done() is called. | |||
4562 | */ | |||
4563 | boolean_t vm_map_verify(map, version) | |||
4564 | register | |||
4565 | vm_map_t map; | |||
4566 | register | |||
4567 | vm_map_version_t *version; /* REF */ | |||
4568 | { | |||
4569 | boolean_t result; | |||
4570 | ||||
4571 | vm_map_lock_read(map)lock_read(&(map)->lock); | |||
4572 | result = (map->timestamp == version->main_timestamp); | |||
4573 | ||||
4574 | if (!result) | |||
4575 | vm_map_unlock_read(map)lock_done(&(map)->lock); | |||
4576 | ||||
4577 | return(result); | |||
4578 | } | |||
4579 | ||||
4580 | /* | |||
4581 | * vm_map_verify_done: | |||
4582 | * | |||
4583 | * Releases locks acquired by a vm_map_verify. | |||
4584 | * | |||
4585 | * This is now a macro in vm/vm_map.h. It does a | |||
4586 | * vm_map_unlock_read on the map. | |||
4587 | */ | |||
4588 | ||||
4589 | /* | |||
4590 | * vm_region: | |||
4591 | * | |||
4592 | * User call to obtain information about a region in | |||
4593 | * a task's address map. | |||
4594 | */ | |||
4595 | ||||
4596 | kern_return_t vm_region(map, address, size, | |||
4597 | protection, max_protection, | |||
4598 | inheritance, is_shared, | |||
4599 | object_name, offset_in_object) | |||
4600 | vm_map_t map; | |||
4601 | vm_offset_t *address; /* IN/OUT */ | |||
4602 | vm_size_t *size; /* OUT */ | |||
4603 | vm_prot_t *protection; /* OUT */ | |||
4604 | vm_prot_t *max_protection; /* OUT */ | |||
4605 | vm_inherit_t *inheritance; /* OUT */ | |||
4606 | boolean_t *is_shared; /* OUT */ | |||
4607 | ipc_port_t *object_name; /* OUT */ | |||
4608 | vm_offset_t *offset_in_object; /* OUT */ | |||
4609 | { | |||
4610 | vm_map_entry_t tmp_entry; | |||
4611 | register | |||
4612 | vm_map_entry_t entry; | |||
4613 | register | |||
4614 | vm_offset_t tmp_offset; | |||
4615 | vm_offset_t start; | |||
4616 | ||||
4617 | if (map == VM_MAP_NULL((vm_map_t) 0)) | |||
4618 | return(KERN_INVALID_ARGUMENT4); | |||
4619 | ||||
4620 | start = *address; | |||
4621 | ||||
4622 | vm_map_lock_read(map)lock_read(&(map)->lock); | |||
4623 | if (!vm_map_lookup_entry(map, start, &tmp_entry)) { | |||
4624 | if ((entry = tmp_entry->vme_nextlinks.next) == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) { | |||
4625 | vm_map_unlock_read(map)lock_done(&(map)->lock); | |||
4626 | return(KERN_NO_SPACE3); | |||
4627 | } | |||
4628 | } else { | |||
4629 | entry = tmp_entry; | |||
4630 | } | |||
4631 | ||||
4632 | start = entry->vme_startlinks.start; | |||
4633 | *protection = entry->protection; | |||
4634 | *max_protection = entry->max_protection; | |||
4635 | *inheritance = entry->inheritance; | |||
4636 | *address = start; | |||
4637 | *size = (entry->vme_endlinks.end - start); | |||
4638 | ||||
4639 | tmp_offset = entry->offset; | |||
4640 | ||||
4641 | ||||
4642 | if (entry->is_sub_map) { | |||
4643 | *is_shared = FALSE((boolean_t) 0); | |||
4644 | *object_name = IP_NULL((ipc_port_t) ((ipc_object_t) 0)); | |||
4645 | *offset_in_object = tmp_offset; | |||
4646 | } else { | |||
4647 | *is_shared = entry->is_shared; | |||
4648 | *object_name = vm_object_name(entry->object.vm_object); | |||
4649 | *offset_in_object = tmp_offset; | |||
4650 | } | |||
4651 | ||||
4652 | vm_map_unlock_read(map)lock_done(&(map)->lock); | |||
4653 | ||||
4654 | return(KERN_SUCCESS0); | |||
4655 | } | |||
4656 | ||||
4657 | /* | |||
4658 | * Routine: vm_map_simplify | |||
4659 | * | |||
4660 | * Description: | |||
4661 | * Attempt to simplify the map representation in | |||
4662 | * the vicinity of the given starting address. | |||
4663 | * Note: | |||
4664 | * This routine is intended primarily to keep the | |||
4665 | * kernel maps more compact -- they generally don't | |||
4666 | * benefit from the "expand a map entry" technology | |||
4667 | * at allocation time because the adjacent entry | |||
4668 | * is often wired down. | |||
4669 | */ | |||
4670 | void vm_map_simplify(map, start) | |||
4671 | vm_map_t map; | |||
4672 | vm_offset_t start; | |||
4673 | { | |||
4674 | vm_map_entry_t this_entry; | |||
4675 | vm_map_entry_t prev_entry; | |||
4676 | ||||
4677 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
4678 | if ( | |||
4679 | (vm_map_lookup_entry(map, start, &this_entry)) && | |||
4680 | ((prev_entry = this_entry->vme_prevlinks.prev) != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) && | |||
4681 | ||||
4682 | (prev_entry->vme_endlinks.end == start) && | |||
4683 | ||||
4684 | (prev_entry->is_shared == FALSE((boolean_t) 0)) && | |||
4685 | (prev_entry->is_sub_map == FALSE((boolean_t) 0)) && | |||
4686 | ||||
4687 | (this_entry->is_shared == FALSE((boolean_t) 0)) && | |||
4688 | (this_entry->is_sub_map == FALSE((boolean_t) 0)) && | |||
4689 | ||||
4690 | (prev_entry->inheritance == this_entry->inheritance) && | |||
4691 | (prev_entry->protection == this_entry->protection) && | |||
4692 | (prev_entry->max_protection == this_entry->max_protection) && | |||
4693 | (prev_entry->wired_count == this_entry->wired_count) && | |||
4694 | (prev_entry->user_wired_count == this_entry->user_wired_count) && | |||
4695 | ||||
4696 | (prev_entry->needs_copy == this_entry->needs_copy) && | |||
4697 | ||||
4698 | (prev_entry->object.vm_object == this_entry->object.vm_object) && | |||
4699 | ((prev_entry->offset + (prev_entry->vme_endlinks.end - prev_entry->vme_startlinks.start)) | |||
4700 | == this_entry->offset) && | |||
4701 | (prev_entry->projected_on == 0) && | |||
4702 | (this_entry->projected_on == 0) | |||
4703 | ) { | |||
4704 | if (map->first_free == this_entry) | |||
4705 | map->first_free = prev_entry; | |||
4706 | ||||
4707 | SAVE_HINT(map, prev_entry); (map)->hint = (prev_entry); ;; | |||
4708 | vm_map_entry_unlink(map, this_entry)({ (&(map)->hdr)->nentries--; (this_entry)->links .next->links.prev = (this_entry)->links.prev; (this_entry )->links.prev->links.next = (this_entry)->links.next ; rbtree_remove(&(&(map)->hdr)->tree, &(this_entry )->tree_node); }); | |||
4709 | prev_entry->vme_endlinks.end = this_entry->vme_endlinks.end; | |||
4710 | vm_object_deallocate(this_entry->object.vm_object); | |||
4711 | vm_map_entry_dispose(map, this_entry)_vm_map_entry_dispose(&(map)->hdr, (this_entry)); | |||
4712 | } | |||
4713 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
4714 | } | |||
4715 | ||||
4716 | ||||
4717 | /* | |||
4718 | * Routine: vm_map_machine_attribute | |||
4719 | * Purpose: | |||
4720 | * Provide machine-specific attributes to mappings, | |||
4721 | * such as cachability etc. for machines that provide | |||
4722 | * them. NUMA architectures and machines with big/strange | |||
4723 | * caches will use this. | |||
4724 | * Note: | |||
4725 | * Responsibilities for locking and checking are handled here, | |||
4726 | * everything else in the pmap module. If any non-volatile | |||
4727 | * information must be kept, the pmap module should handle | |||
4728 | * it itself. [This assumes that attributes do not | |||
4729 | * need to be inherited, which seems ok to me] | |||
4730 | */ | |||
4731 | kern_return_t vm_map_machine_attribute(map, address, size, attribute, value) | |||
4732 | vm_map_t map; | |||
4733 | vm_offset_t address; | |||
4734 | vm_size_t size; | |||
4735 | vm_machine_attribute_t attribute; | |||
4736 | vm_machine_attribute_val_t* value; /* IN/OUT */ | |||
4737 | { | |||
4738 | kern_return_t ret; | |||
4739 | ||||
4740 | if (address < vm_map_min(map)((map)->hdr.links.start) || | |||
4741 | (address + size) > vm_map_max(map)((map)->hdr.links.end)) | |||
4742 | return KERN_INVALID_ARGUMENT4; | |||
4743 | ||||
4744 | vm_map_lock(map)({ lock_write(&(map)->lock); (map)->timestamp++; }); | |||
4745 | ||||
4746 | ret = pmap_attribute(map->pmap, address, size, attribute, value)(1); | |||
4747 | ||||
4748 | vm_map_unlock(map)lock_done(&(map)->lock); | |||
4749 | ||||
4750 | return ret; | |||
4751 | } | |||
4752 | ||||
4753 | ||||
4754 | #if MACH_KDB0 | |||
4755 | ||||
4756 | #define printf kdbprintf | |||
4757 | ||||
4758 | /* | |||
4759 | * vm_map_print: [ debug ] | |||
4760 | */ | |||
4761 | void vm_map_print(map) | |||
4762 | register vm_map_t map; | |||
4763 | { | |||
4764 | register vm_map_entry_t entry; | |||
4765 | ||||
4766 | iprintf("Task map 0x%X: pmap=0x%X,", | |||
4767 | (vm_offset_t) map, (vm_offset_t) (map->pmap)); | |||
4768 | printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries); | |||
4769 | printf("version=%d\n", map->timestamp); | |||
4770 | indent += 2; | |||
4771 | for (entry = vm_map_first_entry(map)((map)->hdr.links.next); | |||
4772 | entry != vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links); | |||
4773 | entry = entry->vme_nextlinks.next) { | |||
4774 | static char *inheritance_name[3] = { "share", "copy", "none"}; | |||
4775 | ||||
4776 | iprintf("map entry 0x%X: ", (vm_offset_t) entry); | |||
4777 | printf("start=0x%X, end=0x%X, ", | |||
4778 | (vm_offset_t) entry->vme_startlinks.start, (vm_offset_t) entry->vme_endlinks.end); | |||
4779 | printf("prot=%X/%X/%s, ", | |||
4780 | entry->protection, | |||
4781 | entry->max_protection, | |||
4782 | inheritance_name[entry->inheritance]); | |||
4783 | if (entry->wired_count != 0) { | |||
4784 | printf("wired("); | |||
4785 | if (entry->user_wired_count != 0) | |||
4786 | printf("u"); | |||
4787 | if (entry->wired_count > | |||
4788 | ((entry->user_wired_count == 0) ? 0 : 1)) | |||
4789 | printf("k"); | |||
4790 | printf(") "); | |||
4791 | } | |||
4792 | if (entry->in_transition) { | |||
4793 | printf("in transition"); | |||
4794 | if (entry->needs_wakeup) | |||
4795 | printf("(wake request)"); | |||
4796 | printf(", "); | |||
4797 | } | |||
4798 | if (entry->is_sub_map) { | |||
4799 | printf("submap=0x%X, offset=0x%X\n", | |||
4800 | (vm_offset_t) entry->object.sub_map, | |||
4801 | (vm_offset_t) entry->offset); | |||
4802 | } else { | |||
4803 | printf("object=0x%X, offset=0x%X", | |||
4804 | (vm_offset_t) entry->object.vm_object, | |||
4805 | (vm_offset_t) entry->offset); | |||
4806 | if (entry->is_shared) | |||
4807 | printf(", shared"); | |||
4808 | if (entry->needs_copy) | |||
4809 | printf(", copy needed"); | |||
4810 | printf("\n"); | |||
4811 | ||||
4812 | if ((entry->vme_prevlinks.prev == vm_map_to_entry(map)((struct vm_map_entry *) &(map)->hdr.links)) || | |||
4813 | (entry->vme_prevlinks.prev->object.vm_object != entry->object.vm_object)) { | |||
4814 | indent += 2; | |||
4815 | vm_object_print(entry->object.vm_object); | |||
4816 | indent -= 2; | |||
4817 | } | |||
4818 | } | |||
4819 | } | |||
4820 | indent -= 2; | |||
4821 | } | |||
4822 | ||||
4823 | /* | |||
4824 | * Routine: vm_map_copy_print | |||
4825 | * Purpose: | |||
4826 | * Pretty-print a copy object for ddb. | |||
4827 | */ | |||
4828 | ||||
4829 | void vm_map_copy_print(copy) | |||
4830 | vm_map_copy_t copy; | |||
4831 | { | |||
4832 | int i, npages; | |||
4833 | ||||
4834 | printf("copy object 0x%x\n", copy); | |||
4835 | ||||
4836 | indent += 2; | |||
4837 | ||||
4838 | iprintf("type=%d", copy->type); | |||
4839 | switch (copy->type) { | |||
4840 | case VM_MAP_COPY_ENTRY_LIST1: | |||
4841 | printf("[entry_list]"); | |||
4842 | break; | |||
4843 | ||||
4844 | case VM_MAP_COPY_OBJECT2: | |||
4845 | printf("[object]"); | |||
4846 | break; | |||
4847 | ||||
4848 | case VM_MAP_COPY_PAGE_LIST3: | |||
4849 | printf("[page_list]"); | |||
4850 | break; | |||
4851 | ||||
4852 | default: | |||
4853 | printf("[bad type]"); | |||
4854 | break; | |||
4855 | } | |||
4856 | printf(", offset=0x%x", copy->offset); | |||
4857 | printf(", size=0x%x\n", copy->size); | |||
4858 | ||||
4859 | switch (copy->type) { | |||
4860 | case VM_MAP_COPY_ENTRY_LIST1: | |||
4861 | /* XXX add stuff here */ | |||
4862 | break; | |||
4863 | ||||
4864 | case VM_MAP_COPY_OBJECT2: | |||
4865 | iprintf("object=0x%x\n", copy->cpy_objectc_u.c_o.object); | |||
4866 | break; | |||
4867 | ||||
4868 | case VM_MAP_COPY_PAGE_LIST3: | |||
4869 | iprintf("npages=%d", copy->cpy_npagesc_u.c_p.npages); | |||
4870 | printf(", cont=%x", copy->cpy_contc_u.c_p.cont); | |||
4871 | printf(", cont_args=%x\n", copy->cpy_cont_argsc_u.c_p.cont_args); | |||
4872 | if (copy->cpy_npagesc_u.c_p.npages < 0) { | |||
4873 | npages = 0; | |||
4874 | } else if (copy->cpy_npagesc_u.c_p.npages > VM_MAP_COPY_PAGE_LIST_MAX64) { | |||
4875 | npages = VM_MAP_COPY_PAGE_LIST_MAX64; | |||
4876 | } else { | |||
4877 | npages = copy->cpy_npagesc_u.c_p.npages; | |||
4878 | } | |||
4879 | iprintf("copy->cpy_page_list[0..%d] = {", npages); | |||
4880 | for (i = 0; i < npages - 1; i++) { | |||
4881 | printf("0x%x, ", copy->cpy_page_listc_u.c_p.page_list[i]); | |||
4882 | } | |||
4883 | if (npages > 0) { | |||
4884 | printf("0x%x", copy->cpy_page_listc_u.c_p.page_list[npages - 1]); | |||
4885 | } | |||
4886 | printf("}\n"); | |||
4887 | break; | |||
4888 | } | |||
4889 | ||||
4890 | indent -=2; | |||
4891 | } | |||
4892 | #endif /* MACH_KDB */ |