Bug Summary

File:obj-scan-build/../vm/memory_object.c
Location:line 149, column 3
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University.
4 * Copyright (c) 1993,1994 The University of Utah and
5 * the Computer Systems Laboratory (CSL).
6 * All rights reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
15 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
16 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
17 * THIS SOFTWARE.
18 *
19 * Carnegie Mellon requests users of this software to return to
20 *
21 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
22 * School of Computer Science
23 * Carnegie Mellon University
24 * Pittsburgh PA 15213-3890
25 *
26 * any improvements or extensions that they make and grant Carnegie Mellon
27 * the rights to redistribute these changes.
28 */
29/*
30 * File: vm/memory_object.c
31 * Author: Michael Wayne Young
32 *
33 * External memory management interface control functions.
34 */
35
36/*
37 * Interface dependencies:
38 */
39
40#include <mach/std_types.h> /* For pointer_t */
41#include <mach/mach_types.h>
42
43#include <mach/kern_return.h>
44#include <vm/vm_map.h>
45#include <vm/vm_object.h>
46#include <mach/memory_object.h>
47#include <mach/boolean.h>
48#include <mach/vm_prot.h>
49#include <mach/message.h>
50
51#include <vm/memory_object_user.user.h>
52#include <vm/memory_object_default.user.h>
53
54/*
55 * Implementation dependencies:
56 */
57#include <vm/memory_object.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pageout.h>
60#include <vm/pmap.h> /* For copy_to_phys, pmap_clear_modify */
61#include <kern/debug.h> /* For panic() */
62#include <kern/thread.h> /* For current_thread() */
63#include <kern/host.h>
64#include <vm/vm_kern.h> /* For kernel_map, vm_move */
65#include <vm/vm_map.h> /* For vm_map_pageable */
66#include <ipc/ipc_port.h>
67
68#if MACH_PAGEMAP1
69#include <vm/vm_external.h>
70#endif /* MACH_PAGEMAP */
71
72typedef int memory_object_lock_result_t; /* moved from below */
73
74
75ipc_port_t memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
76decl_simple_lock_data(,memory_manager_default_lock)
77
78/*
79 * Important note:
80 * All of these routines gain a reference to the
81 * object (first argument) as part of the automatic
82 * argument conversion. Explicit deallocation is necessary.
83 */
84
85kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
86 lock_value, precious, reply_to, reply_to_type)
87 register
88 vm_object_t object;
89 register
90 vm_offset_t offset;
91 vm_map_copy_t data_copy;
92 unsigned int data_cnt;
93 vm_prot_t lock_value;
94 boolean_t precious;
95 ipc_port_t reply_to;
96 mach_msg_type_name_t reply_to_type;
97{
98 kern_return_t result = KERN_SUCCESS0;
99 vm_offset_t error_offset = 0;
100 register
101 vm_page_t m;
102 register
103 vm_page_t data_m;
104 vm_size_t original_length;
105 vm_offset_t original_offset;
106 vm_page_t *page_list;
107 boolean_t was_absent;
108 vm_map_copy_t orig_copy = data_copy;
109
110 /*
111 * Look for bogus arguments
112 */
113
114 if (object == VM_OBJECT_NULL((vm_object_t) 0)) {
2
Assuming 'object' is not equal to null
3
Taking false branch
115 return(KERN_INVALID_ARGUMENT4);
116 }
117
118 if (lock_value & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) {
4
Taking false branch
119 vm_object_deallocate(object);
120 return(KERN_INVALID_ARGUMENT4);
121 }
122
123 if ((data_cnt % PAGE_SIZE(1 << 12)) != 0) {
5
Taking false branch
124 vm_object_deallocate(object);
125 return(KERN_INVALID_ARGUMENT4);
126 }
127
128 /*
129 * Adjust the offset from the memory object to the offset
130 * within the vm_object.
131 */
132
133 original_length = data_cnt;
134 original_offset = offset;
135
136 assert(data_copy->type == VM_MAP_COPY_PAGE_LIST)({ if (!(data_copy->type == 3)) Assert("data_copy->type == VM_MAP_COPY_PAGE_LIST"
, "../vm/memory_object.c", 136); })
;
137 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
138
139 vm_object_lock(object);
140 vm_object_paging_begin(object)((object)->paging_in_progress++);
141 offset -= object->paging_offset;
142
143 /*
144 * Loop over copy stealing pages for pagein.
145 */
146
147 for (; data_cnt > 0 ; data_cnt -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12)) {
6
Assuming 'data_cnt' is > 0
7
Loop condition is true. Entering loop body
22
Assuming 'data_cnt' is > 0
23
Loop condition is true. Entering loop body
148
149 assert(data_copy->cpy_npages > 0)({ if (!(data_copy->c_u.c_p.npages > 0)) Assert("data_copy->cpy_npages > 0"
, "../vm/memory_object.c", 149); })
;
24
Within the expansion of the macro 'assert':
a
Dereference of null pointer
150 data_m = *page_list;
151
152 if (data_m == VM_PAGE_NULL((vm_page_t) 0) || data_m->tabled ||
8
Assuming 'data_m' is not equal to null
9
Taking false branch
153 data_m->error || data_m->absent || data_m->fictitious) {
154
155 panic("Data_supply: bad page");
156 }
157
158 /*
159 * Look up target page and check its state.
160 */
161
162retry_lookup:
163 m = vm_page_lookup(object,offset);
164 if (m == VM_PAGE_NULL((vm_page_t) 0)) {
10
Assuming 'm' is not equal to null
11
Taking false branch
14
Assuming 'm' is equal to null
15
Taking true branch
165 was_absent = FALSE((boolean_t) 0);
166 }
167 else {
168 if (m->absent && m->busy) {
169
170 /*
171 * Page was requested. Free the busy
172 * page waiting for it. Insertion
173 * of new page happens below.
174 */
175
176 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
177 was_absent = TRUE((boolean_t) 1);
178 }
179 else {
180
181 /*
182 * Have to wait for page that is busy and
183 * not absent. This is probably going to
184 * be an error, but go back and check.
185 */
186 if (m->busy) {
12
Taking true branch
187 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
188 vm_object_unlock(object);
189 thread_block((void (*)()) 0);
190 vm_object_lock(object);
191 goto retry_lookup;
13
Control jumps to line 163
192 }
193
194 /*
195 * Page already present; error.
196 * This is an error if data is precious.
197 */
198 result = KERN_MEMORY_PRESENT23;
199 error_offset = offset + object->paging_offset;
200
201 break;
202 }
203 }
204
205 /*
206 * Ok to pagein page. Target object now has no page
207 * at offset. Set the page parameters, then drop
208 * in new page and set up pageout state. Object is
209 * still locked here.
210 *
211 * Must clear busy bit in page before inserting it.
212 * Ok to skip wakeup logic because nobody else
213 * can possibly know about this page.
214 */
215
216 data_m->busy = FALSE((boolean_t) 0);
217 data_m->dirty = FALSE((boolean_t) 0);
218 pmap_clear_modify(data_m->phys_addr);
219
220 data_m->page_lock = lock_value;
221 data_m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
222 data_m->precious = precious;
223
224 vm_page_lock_queues();
225 vm_page_insert(data_m, object, offset);
226
227 if (was_absent)
16
Taking false branch
228 vm_page_activate(data_m);
229 else
230 vm_page_deactivate(data_m);
231
232 vm_page_unlock_queues();
233
234 /*
235 * Null out this page list entry, and advance to next
236 * page.
237 */
238
239 *page_list++ = VM_PAGE_NULL((vm_page_t) 0);
240
241 if (--(data_copy->cpy_npagesc_u.c_p.npages) == 0 &&
17
Taking true branch
242 vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0)) {
243 vm_map_copy_t new_copy;
244
245 vm_object_unlock(object);
246
247 vm_map_copy_invoke_cont(data_copy, &new_copy, &result)({ vm_map_copy_page_discard(data_copy); *&result = (*((data_copy
)->c_u.c_p.cont))((data_copy)->c_u.c_p.cont_args, &
new_copy); (data_copy)->c_u.c_p.cont = (kern_return_t (*)(
)) 0; })
;
248
249 if (result == KERN_SUCCESS0) {
18
Assuming 'result' is equal to 0
19
Taking true branch
250
251 /*
252 * Consume on success requires that
253 * we keep the original vm_map_copy
254 * around in case something fails.
255 * Free the old copy if it's not the original
256 */
257 if (data_copy != orig_copy) {
20
Taking false branch
258 vm_map_copy_discard(data_copy);
259 }
260
261 if ((data_copy = new_copy) != VM_MAP_COPY_NULL((vm_map_copy_t) 0))
21
Taking false branch
262 page_list = &data_copy->cpy_page_listc_u.c_p.page_list[0];
263
264 vm_object_lock(object);
265 }
266 else {
267 vm_object_lock(object);
268 error_offset = offset + object->paging_offset +
269 PAGE_SIZE(1 << 12);
270 break;
271 }
272 }
273 }
274
275 /*
276 * Send reply if one was requested.
277 */
278 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 278); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
279 vm_object_unlock(object);
280
281 if (vm_map_copy_has_cont(data_copy)(((data_copy)->c_u.c_p.cont) != (kern_return_t (*)()) 0))
282 vm_map_copy_abort_cont(data_copy)({ vm_map_copy_page_discard(data_copy); (*((data_copy)->c_u
.c_p.cont))((data_copy)->c_u.c_p.cont_args, (vm_map_copy_t
*) 0); (data_copy)->c_u.c_p.cont = (kern_return_t (*)()) 0
; (data_copy)->c_u.c_p.cont_args = (char *) 0; })
;
283
284 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
285 memory_object_supply_completed(
286 reply_to, reply_to_type,
287 object->pager_request,
288 original_offset,
289 original_length,
290 result,
291 error_offset);
292 }
293
294 vm_object_deallocate(object);
295
296 /*
297 * Consume on success: The final data copy must be
298 * be discarded if it is not the original. The original
299 * gets discarded only if this routine succeeds.
300 */
301 if (data_copy != orig_copy)
302 vm_map_copy_discard(data_copy);
303 if (result == KERN_SUCCESS0)
304 vm_map_copy_discard(orig_copy);
305
306
307 return(result);
308}
309
310
311/*
312 * If successful, destroys the map copy object.
313 */
314kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
315 lock_value)
316 vm_object_t object;
317 vm_offset_t offset;
318 pointer_t data;
319 unsigned int data_cnt;
320 vm_prot_t lock_value;
321{
322 return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
1
Calling 'memory_object_data_supply'
323 data_cnt, lock_value, FALSE((boolean_t) 0), IP_NULL((ipc_port_t) ((ipc_object_t) 0)),
324 0);
325}
326
327
328kern_return_t memory_object_data_error(object, offset, size, error_value)
329 vm_object_t object;
330 vm_offset_t offset;
331 vm_size_t size;
332 kern_return_t error_value;
333{
334 if (object == VM_OBJECT_NULL((vm_object_t) 0))
335 return(KERN_INVALID_ARGUMENT4);
336
337 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
338 return(KERN_INVALID_ARGUMENT4);
339
340 vm_object_lock(object);
341 offset -= object->paging_offset;
342
343 while (size != 0) {
344 register vm_page_t m;
345
346 m = vm_page_lookup(object, offset);
347 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
348 m->error = TRUE((boolean_t) 1);
349 m->absent = FALSE((boolean_t) 0);
350 vm_object_absent_release(object)({ (object)->absent_count--; ({ if (((object))->all_wanted
& (1 << (3))) thread_wakeup_prim(((event_t)(((vm_offset_t
) (object)) + (3))), ((boolean_t) 0), 0); ((object))->all_wanted
&= ~(1 << (3)); }); })
;
351
352 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
353
354 vm_page_lock_queues();
355 vm_page_activate(m);
356 vm_page_unlock_queues();
357 }
358
359 size -= PAGE_SIZE(1 << 12);
360 offset += PAGE_SIZE(1 << 12);
361 }
362 vm_object_unlock(object);
363
364 vm_object_deallocate(object);
365 return(KERN_SUCCESS0);
366}
367
368kern_return_t memory_object_data_unavailable(object, offset, size)
369 vm_object_t object;
370 vm_offset_t offset;
371 vm_size_t size;
372{
373#if MACH_PAGEMAP1
374 vm_external_t existence_info = VM_EXTERNAL_NULL((vm_external_t) 0);
375#endif /* MACH_PAGEMAP */
376
377 if (object == VM_OBJECT_NULL((vm_object_t) 0))
378 return(KERN_INVALID_ARGUMENT4);
379
380 if (size != round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
)
381 return(KERN_INVALID_ARGUMENT4);
382
383#if MACH_PAGEMAP1
384 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192) &&
385 (object->existence_info == VM_EXTERNAL_NULL((vm_external_t) 0))) {
386 existence_info = vm_external_create(VM_EXTERNAL_SMALL_SIZE128);
387 }
388#endif /* MACH_PAGEMAP */
389
390 vm_object_lock(object);
391#if MACH_PAGEMAP1
392 if (existence_info != VM_EXTERNAL_NULL((vm_external_t) 0)) {
393 object->existence_info = existence_info;
394 }
395 if ((offset == 0) && (size > VM_EXTERNAL_LARGE_SIZE8192)) {
396 vm_object_unlock(object);
397 vm_object_deallocate(object);
398 return(KERN_SUCCESS0);
399 }
400#endif /* MACH_PAGEMAP */
401 offset -= object->paging_offset;
402
403 while (size != 0) {
404 register vm_page_t m;
405
406 /*
407 * We're looking for pages that are both busy and
408 * absent (waiting to be filled), converting them
409 * to just absent.
410 *
411 * Pages that are just busy can be ignored entirely.
412 */
413
414 m = vm_page_lookup(object, offset);
415 if ((m != VM_PAGE_NULL((vm_page_t) 0)) && m->busy && m->absent) {
416 PAGE_WAKEUP_DONE(m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)->
wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)),
((boolean_t) 0), 0); } })
;
417
418 vm_page_lock_queues();
419 vm_page_activate(m);
420 vm_page_unlock_queues();
421 }
422 size -= PAGE_SIZE(1 << 12);
423 offset += PAGE_SIZE(1 << 12);
424 }
425
426 vm_object_unlock(object);
427
428 vm_object_deallocate(object);
429 return(KERN_SUCCESS0);
430}
431
432/*
433 * Routine: memory_object_lock_page
434 *
435 * Description:
436 * Perform the appropriate lock operations on the
437 * given page. See the description of
438 * "memory_object_lock_request" for the meanings
439 * of the arguments.
440 *
441 * Returns an indication that the operation
442 * completed, blocked, or that the page must
443 * be cleaned.
444 */
445
446#define MEMORY_OBJECT_LOCK_RESULT_DONE0 0
447#define MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1 1
448#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2 2
449#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3 3
450
451memory_object_lock_result_t memory_object_lock_page(m, should_return,
452 should_flush, prot)
453 vm_page_t m;
454 memory_object_return_t should_return;
455 boolean_t should_flush;
456 vm_prot_t prot;
457{
458 /*
459 * Don't worry about pages for which the kernel
460 * does not have any data.
461 */
462
463 if (m->absent)
464 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
465
466 /*
467 * If we cannot change access to the page,
468 * either because a mapping is in progress
469 * (busy page) or because a mapping has been
470 * wired, then give up.
471 */
472
473 if (m->busy)
474 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
475
476 assert(!m->fictitious)({ if (!(!m->fictitious)) Assert("!m->fictitious", "../vm/memory_object.c"
, 476); })
;
477
478 if (m->wire_count != 0) {
479 /*
480 * If no change would take place
481 * anyway, return successfully.
482 *
483 * No change means:
484 * Not flushing AND
485 * No change to page lock [2 checks] AND
486 * Don't need to send page to manager
487 *
488 * Don't need to send page to manager means:
489 * No clean or return request OR (
490 * Page is not dirty [2 checks] AND (
491 * Page is not precious OR
492 * No request to return precious pages ))
493 *
494 * Now isn't that straightforward and obvious ?? ;-)
495 *
496 * XXX This doesn't handle sending a copy of a wired
497 * XXX page to the pager, but that will require some
498 * XXX significant surgery.
499 */
500
501 if (!should_flush &&
502 ((m->page_lock == prot) || (prot == VM_PROT_NO_CHANGE((vm_prot_t) 0x08))) &&
503 ((should_return == MEMORY_OBJECT_RETURN_NONE0) ||
504 (!m->dirty && !pmap_is_modified(m->phys_addr) &&
505 (!m->precious ||
506 should_return != MEMORY_OBJECT_RETURN_ALL2)))) {
507 /*
508 * Restart page unlock requests,
509 * even though no change took place.
510 * [Memory managers may be expecting
511 * to see new requests.]
512 */
513 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
514 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
515
516 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
517 }
518
519 return(MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1);
520 }
521
522 /*
523 * If the page is to be flushed, allow
524 * that to be done as part of the protection.
525 */
526
527 if (should_flush)
528 prot = VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04));
529
530 /*
531 * Set the page lock.
532 *
533 * If we are decreasing permission, do it now;
534 * let the fault handler take care of increases
535 * (pmap_page_protect may not increase protection).
536 */
537
538 if (prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)) {
539 if ((m->page_lock ^ prot) & prot) {
540 pmap_page_protect(m->phys_addr, VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)) & ~prot);
541 }
542 m->page_lock = prot;
543
544 /*
545 * Restart any past unlock requests, even if no
546 * change resulted. If the manager explicitly
547 * requested no protection change, then it is assumed
548 * to be remembering past requests.
549 */
550
551 m->unlock_request = VM_PROT_NONE((vm_prot_t) 0x00);
552 PAGE_WAKEUP(m)({ if ((m)->wanted) { (m)->wanted = ((boolean_t) 0); thread_wakeup_prim
(((event_t) (m)), ((boolean_t) 0), 0); } })
;
553 }
554
555 /*
556 * Handle cleaning.
557 */
558
559 if (should_return != MEMORY_OBJECT_RETURN_NONE0) {
560 /*
561 * Check whether the page is dirty. If
562 * write permission has not been removed,
563 * this may have unpredictable results.
564 */
565
566 if (!m->dirty)
567 m->dirty = pmap_is_modified(m->phys_addr);
568
569 if (m->dirty || (m->precious &&
570 should_return == MEMORY_OBJECT_RETURN_ALL2)) {
571 /*
572 * If we weren't planning
573 * to flush the page anyway,
574 * we may need to remove the
575 * page from the pageout
576 * system and from physical
577 * maps now.
578 */
579
580 vm_page_lock_queues();
581 VM_PAGE_QUEUES_REMOVE(m)({ if (m->active) { { register queue_entry_t next, prev; next
= (m)->pageq.next; prev = (m)->pageq.prev; if ((&vm_page_queue_active
) == next) (&vm_page_queue_active)->prev = prev; else (
(vm_page_t)next)->pageq.prev = prev; if ((&vm_page_queue_active
) == prev) (&vm_page_queue_active)->next = next; else (
(vm_page_t)prev)->pageq.next = next; }; m->active = ((boolean_t
) 0); vm_page_active_count--; } if (m->inactive) { { register
queue_entry_t next, prev; next = (m)->pageq.next; prev = (
m)->pageq.prev; if ((&vm_page_queue_inactive) == next)
(&vm_page_queue_inactive)->prev = prev; else ((vm_page_t
)next)->pageq.prev = prev; if ((&vm_page_queue_inactive
) == prev) (&vm_page_queue_inactive)->next = next; else
((vm_page_t)prev)->pageq.next = next; }; m->inactive =
((boolean_t) 0); vm_page_inactive_count--; } })
;
582 vm_page_unlock_queues();
583
584 if (!should_flush)
585 pmap_page_protect(m->phys_addr,
586 VM_PROT_NONE((vm_prot_t) 0x00));
587
588 /*
589 * Cleaning a page will cause
590 * it to be flushed.
591 */
592
593 if (m->dirty)
594 return(MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2);
595 else
596 return(MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3);
597 }
598 }
599
600 /*
601 * Handle flushing
602 */
603
604 if (should_flush) {
605 VM_PAGE_FREE(m)({ ; vm_page_free(m); ; });
606 } else {
607 extern boolean_t vm_page_deactivate_hint;
608
609 /*
610 * XXX Make clean but not flush a paging hint,
611 * and deactivate the pages. This is a hack
612 * because it overloads flush/clean with
613 * implementation-dependent meaning. This only
614 * happens to pages that are already clean.
615 */
616
617 if (vm_page_deactivate_hint &&
618 (should_return != MEMORY_OBJECT_RETURN_NONE0)) {
619 vm_page_lock_queues();
620 vm_page_deactivate(m);
621 vm_page_unlock_queues();
622 }
623 }
624
625 return(MEMORY_OBJECT_LOCK_RESULT_DONE0);
626}
627
628/*
629 * Routine: memory_object_lock_request [user interface]
630 *
631 * Description:
632 * Control use of the data associated with the given
633 * memory object. For each page in the given range,
634 * perform the following operations, in order:
635 * 1) restrict access to the page (disallow
636 * forms specified by "prot");
637 * 2) return data to the manager (if "should_return"
638 * is RETURN_DIRTY and the page is dirty, or
639 * "should_return" is RETURN_ALL and the page
640 * is either dirty or precious); and,
641 * 3) flush the cached copy (if "should_flush"
642 * is asserted).
643 * The set of pages is defined by a starting offset
644 * ("offset") and size ("size"). Only pages with the
645 * same page alignment as the starting offset are
646 * considered.
647 *
648 * A single acknowledgement is sent (to the "reply_to"
649 * port) when these actions are complete. If successful,
650 * the naked send right for reply_to is consumed.
651 */
652
653kern_return_t
654memory_object_lock_request(object, offset, size,
655 should_return, should_flush, prot,
656 reply_to, reply_to_type)
657 register vm_object_t object;
658 register vm_offset_t offset;
659 register vm_size_t size;
660 memory_object_return_t should_return;
661 boolean_t should_flush;
662 vm_prot_t prot;
663 ipc_port_t reply_to;
664 mach_msg_type_name_t reply_to_type;
665{
666 register vm_page_t m;
667 vm_offset_t original_offset = offset;
668 vm_size_t original_size = size;
669 vm_offset_t paging_offset = 0;
670 vm_object_t new_object = VM_OBJECT_NULL((vm_object_t) 0);
671 vm_offset_t new_offset = 0;
672 vm_offset_t last_offset = offset;
673 int page_lock_result;
674 int pageout_action = 0; /* '=0' to quiet lint */
675
676#define DATA_WRITE_MAX32 32
677 vm_page_t holding_pages[DATA_WRITE_MAX32];
678
679 /*
680 * Check for bogus arguments.
681 */
682 if (object == VM_OBJECT_NULL((vm_object_t) 0) ||
683 ((prot & ~VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04))) != 0 && prot != VM_PROT_NO_CHANGE((vm_prot_t) 0x08)))
684 return (KERN_INVALID_ARGUMENT4);
685
686 size = round_page(size)((vm_offset_t)((((vm_offset_t)(size)) + ((1 << 12)-1)) &
~((1 << 12)-1)))
;
687
688 /*
689 * Lock the object, and acquire a paging reference to
690 * prevent the memory_object and control ports from
691 * being destroyed.
692 */
693
694 vm_object_lock(object);
695 vm_object_paging_begin(object)((object)->paging_in_progress++);
696 offset -= object->paging_offset;
697
698 /*
699 * To avoid blocking while scanning for pages, save
700 * dirty pages to be cleaned all at once.
701 *
702 * XXXO A similar strategy could be used to limit the
703 * number of times that a scan must be restarted for
704 * other reasons. Those pages that would require blocking
705 * could be temporarily collected in another list, or
706 * their offsets could be recorded in a small array.
707 */
708
709 /*
710 * XXX NOTE: May want to consider converting this to a page list
711 * XXX vm_map_copy interface. Need to understand object
712 * XXX coalescing implications before doing so.
713 */
714
715#define PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 715); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
\({
716MACRO_BEGIN({ \
717 vm_map_copy_t copy; \
718 register int i; \
719 register vm_page_t hp; \
720 \
721 vm_object_unlock(object); \
722 \
723 (void) vm_map_copyin_object(new_object, 0, new_offset, &copy); \
724 \
725 if (object->use_old_pageout) { \
726 assert(pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN)({ if (!(pageout_action == 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 726); })
; \
727 (void) memory_object_data_write( \
728 object->pager, \
729 object->pager_request, \
730 paging_offset, \
731 (pointer_t) copy, \
732 new_offset); \
733 } \
734 else { \
735 (void) memory_object_data_return( \
736 object->pager, \
737 object->pager_request, \
738 paging_offset, \
739 (pointer_t) copy, \
740 new_offset, \
741 (pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2), \
742 !should_flush); \
743 } \
744 \
745 vm_object_lock(object); \
746 \
747 for (i = 0; i < atop(new_offset)(((vm_size_t)(new_offset)) >> 12); i++) { \
748 hp = holding_pages[i]; \
749 if (hp != VM_PAGE_NULL((vm_page_t) 0)) \
750 VM_PAGE_FREE(hp)({ ; vm_page_free(hp); ; }); \
751 } \
752 \
753 new_object = VM_OBJECT_NULL((vm_object_t) 0); \})
754MACRO_END})
755
756 for (;
757 size != 0;
758 size -= PAGE_SIZE(1 << 12), offset += PAGE_SIZE(1 << 12))
759 {
760 /*
761 * Limit the number of pages to be cleaned at once.
762 */
763 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
764 new_offset >= PAGE_SIZE(1 << 12) * DATA_WRITE_MAX32)
765 {
766 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 766); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
767 }
768
769 while ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL((vm_page_t) 0)) {
770 switch ((page_lock_result = memory_object_lock_page(m,
771 should_return,
772 should_flush,
773 prot)))
774 {
775 case MEMORY_OBJECT_LOCK_RESULT_DONE0:
776 /*
777 * End of a cluster of dirty pages.
778 */
779 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
780 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 780); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
781 continue;
782 }
783 break;
784
785 case MEMORY_OBJECT_LOCK_RESULT_MUST_BLOCK1:
786 /*
787 * Since it is necessary to block,
788 * clean any dirty pages now.
789 */
790 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
791 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 791); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
792 continue;
793 }
794
795 PAGE_ASSERT_WAIT(m, FALSE)({ (m)->wanted = ((boolean_t) 1); assert_wait((event_t) (m
), (((boolean_t) 0))); })
;
796 vm_object_unlock(object);
797 thread_block((void (*)()) 0);
798 vm_object_lock(object);
799 continue;
800
801 case MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN2:
802 case MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN3:
803 /*
804 * The clean and return cases are similar.
805 *
806 * Mark the page busy since we unlock the
807 * object below.
808 */
809 m->busy = TRUE((boolean_t) 1);
810
811 /*
812 * if this would form a discontiguous block,
813 * clean the old pages and start anew.
814 *
815 * NOTE: The first time through here, new_object
816 * is null, hiding the fact that pageout_action
817 * is not initialized.
818 */
819 if (new_object != VM_OBJECT_NULL((vm_object_t) 0) &&
820 (last_offset != offset ||
821 pageout_action != page_lock_result)) {
822 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 822); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
823 }
824
825 vm_object_unlock(object);
826
827 /*
828 * If we have not already allocated an object
829 * for a range of pages to be written, do so
830 * now.
831 */
832 if (new_object == VM_OBJECT_NULL((vm_object_t) 0)) {
833 new_object = vm_object_allocate(original_size);
834 new_offset = 0;
835 paging_offset = m->offset +
836 object->paging_offset;
837 pageout_action = page_lock_result;
838 }
839
840 /*
841 * Move or copy the dirty page into the
842 * new object.
843 */
844 m = vm_pageout_setup(m,
845 m->offset + object->paging_offset,
846 new_object,
847 new_offset,
848 should_flush);
849
850 /*
851 * Save the holding page if there is one.
852 */
853 holding_pages[atop(new_offset)(((vm_size_t)(new_offset)) >> 12)] = m;
854 new_offset += PAGE_SIZE(1 << 12);
855 last_offset = offset + PAGE_SIZE(1 << 12);
856
857 vm_object_lock(object);
858 break;
859 }
860 break;
861 }
862 }
863
864 /*
865 * We have completed the scan for applicable pages.
866 * Clean any pages that have been saved.
867 */
868 if (new_object != VM_OBJECT_NULL((vm_object_t) 0)) {
869 PAGEOUT_PAGES({ vm_map_copy_t copy; register int i; register vm_page_t hp;
; (void) vm_map_copyin_object(new_object, 0, new_offset, &
copy); if (object->use_old_pageout) { ({ if (!(pageout_action
== 2)) Assert("pageout_action == MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN"
, "../vm/memory_object.c", 869); }); (void) memory_object_data_write
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset); } else { (void) memory_object_data_return
( object->pager, object->pager_request, paging_offset, (
pointer_t) copy, new_offset, (pageout_action == 2), !should_flush
); } ; for (i = 0; i < (((vm_size_t)(new_offset)) >>
12); i++) { hp = holding_pages[i]; if (hp != ((vm_page_t) 0)
) ({ ; vm_page_free(hp); ; }); } new_object = ((vm_object_t) 0
); })
;
870 }
871
872 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
873 vm_object_unlock(object);
874
875 /* consumes our naked send-once/send right for reply_to */
876 (void) memory_object_lock_completed(reply_to, reply_to_type,
877 object->pager_request, original_offset, original_size);
878
879 vm_object_lock(object);
880 }
881
882 vm_object_paging_end(object)({ ({ if (!((object)->paging_in_progress != 0)) Assert("(object)->paging_in_progress != 0"
, "../vm/memory_object.c", 882); }); if (--(object)->paging_in_progress
== 0) { ({ if ((object)->all_wanted & (1 << (2)
)) thread_wakeup_prim(((event_t)(((vm_offset_t) object) + (2)
)), ((boolean_t) 0), 0); (object)->all_wanted &= ~(1 <<
(2)); }); } })
;
883 vm_object_unlock(object);
884 vm_object_deallocate(object);
885
886 return (KERN_SUCCESS0);
887}
888
889kern_return_t
890memory_object_set_attributes_common(object, object_ready, may_cache,
891 copy_strategy, use_old_pageout)
892 vm_object_t object;
893 boolean_t object_ready;
894 boolean_t may_cache;
895 memory_object_copy_strategy_t copy_strategy;
896 boolean_t use_old_pageout;
897{
898 if (object == VM_OBJECT_NULL((vm_object_t) 0))
899 return(KERN_INVALID_ARGUMENT4);
900
901 /*
902 * Verify the attributes of importance
903 */
904
905 switch(copy_strategy) {
906 case MEMORY_OBJECT_COPY_NONE0:
907 case MEMORY_OBJECT_COPY_CALL1:
908 case MEMORY_OBJECT_COPY_DELAY2:
909 case MEMORY_OBJECT_COPY_TEMPORARY3:
910 break;
911 default:
912 vm_object_deallocate(object);
913 return(KERN_INVALID_ARGUMENT4);
914 }
915
916 if (object_ready)
917 object_ready = TRUE((boolean_t) 1);
918 if (may_cache)
919 may_cache = TRUE((boolean_t) 1);
920
921 vm_object_lock(object);
922
923 /*
924 * Wake up anyone waiting for the ready attribute
925 * to become asserted.
926 */
927
928 if (object_ready && !object->pager_ready) {
929 object->use_old_pageout = use_old_pageout;
930 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY)({ if ((object)->all_wanted & (1 << (1))) thread_wakeup_prim
(((event_t)(((vm_offset_t) object) + (1))), ((boolean_t) 0), 0
); (object)->all_wanted &= ~(1 << (1)); })
;
931 }
932
933 /*
934 * Copy the attributes
935 */
936
937 object->can_persist = may_cache;
938 object->pager_ready = object_ready;
939 if (copy_strategy == MEMORY_OBJECT_COPY_TEMPORARY3) {
940 object->temporary = TRUE((boolean_t) 1);
941 } else {
942 object->copy_strategy = copy_strategy;
943 }
944
945 vm_object_unlock(object);
946
947 vm_object_deallocate(object);
948
949 return(KERN_SUCCESS0);
950}
951
952/*
953 * XXX rpd claims that reply_to could be obviated in favor of a client
954 * XXX stub that made change_attributes an RPC. Need investigation.
955 */
956
957kern_return_t memory_object_change_attributes(object, may_cache,
958 copy_strategy, reply_to, reply_to_type)
959 vm_object_t object;
960 boolean_t may_cache;
961 memory_object_copy_strategy_t copy_strategy;
962 ipc_port_t reply_to;
963 mach_msg_type_name_t reply_to_type;
964{
965 kern_return_t result;
966
967 /*
968 * Do the work and throw away our object reference. It
969 * is important that the object reference be deallocated
970 * BEFORE sending the reply. The whole point of the reply
971 * is that it shows up after the terminate message that
972 * may be generated by setting the object uncacheable.
973 *
974 * XXX may_cache may become a tri-valued variable to handle
975 * XXX uncache if not in use.
976 */
977 result = memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
978 may_cache, copy_strategy,
979 FALSE((boolean_t) 0));
980
981 if (IP_VALID(reply_to)(((&(reply_to)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(reply_to)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
982
983 /* consumes our naked send-once/send right for reply_to */
984 (void) memory_object_change_completed(reply_to, reply_to_type,
985 may_cache, copy_strategy);
986
987 }
988
989 return(result);
990}
991
992kern_return_t
993memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
994 vm_object_t object;
995 boolean_t object_ready;
996 boolean_t may_cache;
997 memory_object_copy_strategy_t copy_strategy;
998{
999 return memory_object_set_attributes_common(object, object_ready,
1000 may_cache, copy_strategy,
1001 TRUE((boolean_t) 1));
1002}
1003
1004kern_return_t memory_object_ready(object, may_cache, copy_strategy)
1005 vm_object_t object;
1006 boolean_t may_cache;
1007 memory_object_copy_strategy_t copy_strategy;
1008{
1009 return memory_object_set_attributes_common(object, TRUE((boolean_t) 1),
1010 may_cache, copy_strategy,
1011 FALSE((boolean_t) 0));
1012}
1013
1014kern_return_t memory_object_get_attributes(object, object_ready,
1015 may_cache, copy_strategy)
1016 vm_object_t object;
1017 boolean_t *object_ready;
1018 boolean_t *may_cache;
1019 memory_object_copy_strategy_t *copy_strategy;
1020{
1021 if (object == VM_OBJECT_NULL((vm_object_t) 0))
1022 return(KERN_INVALID_ARGUMENT4);
1023
1024 vm_object_lock(object);
1025 *may_cache = object->can_persist;
1026 *object_ready = object->pager_ready;
1027 *copy_strategy = object->copy_strategy;
1028 vm_object_unlock(object);
1029
1030 vm_object_deallocate(object);
1031
1032 return(KERN_SUCCESS0);
1033}
1034
1035/*
1036 * If successful, consumes the supplied naked send right.
1037 */
1038kern_return_t vm_set_default_memory_manager(host, default_manager)
1039 host_t host;
1040 ipc_port_t *default_manager;
1041{
1042 ipc_port_t current_manager;
1043 ipc_port_t new_manager;
1044 ipc_port_t returned_manager;
1045
1046 if (host == HOST_NULL((host_t)0))
1047 return(KERN_INVALID_HOST22);
1048
1049 new_manager = *default_manager;
1050 simple_lock(&memory_manager_default_lock);
1051 current_manager = memory_manager_default;
1052
1053 if (new_manager == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) {
1054 /*
1055 * Retrieve the current value.
1056 */
1057
1058 returned_manager = ipc_port_copy_send(current_manager);
1059 } else {
1060 /*
1061 * Retrieve the current value,
1062 * and replace it with the supplied value.
1063 * We consume the supplied naked send right.
1064 */
1065
1066 returned_manager = current_manager;
1067 memory_manager_default = new_manager;
1068
1069 /*
1070 * In case anyone's been waiting for a memory
1071 * manager to be established, wake them up.
1072 */
1073
1074 thread_wakeup((event_t) &memory_manager_default)thread_wakeup_prim(((event_t) &memory_manager_default), (
(boolean_t) 0), 0)
;
1075 }
1076
1077 simple_unlock(&memory_manager_default_lock);
1078
1079 *default_manager = returned_manager;
1080 return(KERN_SUCCESS0);
1081}
1082
1083/*
1084 * Routine: memory_manager_default_reference
1085 * Purpose:
1086 * Returns a naked send right for the default
1087 * memory manager. The returned right is always
1088 * valid (not IP_NULL or IP_DEAD).
1089 */
1090
1091ipc_port_t memory_manager_default_reference(void)
1092{
1093 ipc_port_t current_manager;
1094
1095 simple_lock(&memory_manager_default_lock);
1096
1097 while (current_manager = ipc_port_copy_send(memory_manager_default),
1098 !IP_VALID(current_manager)(((&(current_manager)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current_manager)->ip_target.ipt_object
) != ((ipc_object_t) -1)))
) {
1099 thread_sleep((event_t) &memory_manager_default,
1100 simple_lock_addr(memory_manager_default_lock)((simple_lock_t)0),
1101 FALSE((boolean_t) 0));
1102 simple_lock(&memory_manager_default_lock);
1103 }
1104
1105 simple_unlock(&memory_manager_default_lock);
1106
1107 return current_manager;
1108}
1109
1110/*
1111 * Routine: memory_manager_default_port
1112 * Purpose:
1113 * Returns true if the receiver for the port
1114 * is the default memory manager.
1115 *
1116 * This is a hack to let ds_read_done
1117 * know when it should keep memory wired.
1118 */
1119
1120boolean_t memory_manager_default_port(port)
1121 ipc_port_t port;
1122{
1123 ipc_port_t current;
1124 boolean_t result;
1125
1126 simple_lock(&memory_manager_default_lock);
1127 current = memory_manager_default;
1128 if (IP_VALID(current)(((&(current)->ip_target.ipt_object) != ((ipc_object_t
) 0)) && ((&(current)->ip_target.ipt_object) !=
((ipc_object_t) -1)))
) {
1129 /*
1130 * There is no point in bothering to lock
1131 * both ports, which would be painful to do.
1132 * If the receive rights are moving around,
1133 * we might be inaccurate.
1134 */
1135
1136 result = port->ip_receiverdata.receiver == current->ip_receiverdata.receiver;
1137 } else
1138 result = FALSE((boolean_t) 0);
1139 simple_unlock(&memory_manager_default_lock);
1140
1141 return result;
1142}
1143
1144void memory_manager_default_init(void)
1145{
1146 memory_manager_default = IP_NULL((ipc_port_t) ((ipc_object_t) 0));
1147 simple_lock_init(&memory_manager_default_lock);
1148}