Bug Summary

File:obj-scan-build/mach-defpager/../../mach-defpager/default_pager.c
Location:line 2016, column 2
Description:Dereference of undefined pointer value

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993-1989 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * Default pager. Pages to paging partition.
28 *
29 * MUST BE ABLE TO ALLOCATE WIRED-DOWN MEMORY!!!
30 */
31
32#include <mach.h>
33#include <mach/message.h>
34#include <mach/notify.h>
35#include <mach/mig_errors.h>
36#include <mach/thread_switch.h>
37#include <mach/task_info.h>
38#include <mach/default_pager_types.h>
39
40#include <pthread.h>
41
42#include <device/device_types.h>
43#include <device/device.h>
44
45#include <queue.h>
46#include <wiring.h>
47#include <kalloc.h>
48#include <default_pager.h>
49
50#include <assert.h>
51#include <errno(*__errno_location ()).h>
52#include <stdio.h>
53#include <string.h>
54
55#include <file_io.h>
56
57#include "default_pager_S.h"
58
59#define debug0 0
60
61static char my_name[] = "(default pager):";
62
63static pthread_mutex_t printf_lock = PTHREAD_MUTEX_INITIALIZER{ ((__pthread_spinlock_t) 0), ((__pthread_spinlock_t) 0), 0, 0
, 0, 0, 0, 0 }
;
64
65#if 0
66#define dprintf(f, x...) \
67 ({ pthread_mutex_lock (&printf_lock); \
68 printf (f , ##x); \
69 fflush (stdoutstdout); \
70 pthread_mutex_unlock (&printf_lock); })
71#else
72#define dprintf(f, x...)
73#endif
74
75#if 0
76#define ddprintf(f, x...) \
77 ({ pthread_mutex_lock (&printf_lock); \
78 printf (f , ##x); \
79 fflush (stdoutstdout); \
80 pthread_mutex_unlock (&printf_lock); })
81#else
82#define ddprintf(f, x...)
83#endif
84
85/*
86 * parallel vs serial switch
87 */
88#define PARALLEL1 1
89
90#if 0
91#define CHECKSUM 1
92#endif
93
94#define USE_PRECIOUS1 1
95
96#define ptoa(p)((p)*vm_page_size) ((p)*vm_page_size)
97#define atop(a)((a)/vm_page_size) ((a)/vm_page_size)
98
99/*
100
101 */
102/*
103 * Bitmap allocation.
104 */
105typedef unsigned int bm_entry_t;
106#define NB_BM32 32
107#define BM_MASK0xffffffff 0xffffffff
108
109#define howmany(a,b)(((a) + (b) - 1)/(b)) (((a) + (b) - 1)/(b))
110
111/*
112 * Value to indicate no block assigned
113 */
114#define NO_BLOCK((vm_offset_t)-1) ((vm_offset_t)-1)
115
116/*
117 * 'Partition' structure for each paging area.
118 * Controls allocation of blocks within paging area.
119 */
120struct part {
121 pthread_mutex_t p_lock; /* for bitmap/free */
122 vm_size_t total_size; /* total number of blocks */
123 vm_size_t free; /* number of blocks free */
124 unsigned int id; /* named lookup */
125 bm_entry_t *bitmap; /* allocation map */
126 boolean_t going_away; /* destroy attempt in progress */
127 struct file_direct *file; /* file paged to */
128};
129typedef struct part *partition_t;
130
131struct {
132 pthread_mutex_t lock;
133 int n_partitions;
134 partition_t *partition_list;/* array, for quick mapping */
135} all_partitions; /* list of all such */
136
137typedef unsigned char p_index_t;
138
139#define P_INDEX_INVALID((p_index_t)-1) ((p_index_t)-1)
140
141#define no_partition(x)((x) == ((p_index_t)-1)) ((x) == P_INDEX_INVALID((p_index_t)-1))
142
143partition_t partition_of(x)
144 int x;
145{
146 if (x >= all_partitions.n_partitions || x < 0)
147 panic("partition_of x%x", x);
148 return all_partitions.partition_list[x];
149}
150
151void set_partition_of(x, p)
152 int x;
153 partition_t p;
154{
155 if (x >= all_partitions.n_partitions || x < 0)
156 panic("set_partition_of x%x", x);
157 all_partitions.partition_list[x] = p;
158}
159
160/*
161 * Simple mapping from (file)NAME to id
162 * Saves space, filenames can be long.
163 */
164unsigned int
165part_id(const char *name)
166{
167 unsigned int id, xorid;
168 size_t len;
169
170 len = strlen(name);
171 id = xorid = 0;
172 while (len--) {
173 xorid ^= *name;
174 id += *name++;
175 }
176 return (id << 8) | xorid;
177}
178
179void
180partition_init()
181{
182 pthread_mutex_init(&all_partitions.lock, NULL((void*)0));
183 all_partitions.n_partitions = 0;
184}
185
186static partition_t
187new_partition (const char *name, struct file_direct *fdp,
188 int check_linux_signature)
189{
190 partition_t part;
191 vm_size_t size, bmsize;
192 vm_offset_t raddr;
193 mach_msg_type_number_t rsize;
194 int rc;
195 unsigned int id = part_id(name);
196
197 pthread_mutex_lock(&all_partitions.lock);
198 {
199 unsigned int i;
200 for (i = 0; i < all_partitions.n_partitions; i++)
201 {
202 part = partition_of(i);
203 if (part && part->id == id)
204 {
205 printf ("(default pager): Already paging to partition %s!\n",
206 name);
207 pthread_mutex_unlock(&all_partitions.lock);
208 return 0;
209 }
210 }
211 }
212 pthread_mutex_unlock(&all_partitions.lock);
213
214 size = atop(fdp->fd_size * fdp->fd_bsize)((fdp->fd_size * fdp->fd_bsize)/vm_page_size);
215 bmsize = howmany(size, NB_BM)(((size) + (32) - 1)/(32)) * sizeof(bm_entry_t);
216
217 part = (partition_t) kalloc(sizeof(struct part));
218 pthread_mutex_init(&part->p_lock, NULL((void*)0));
219 part->total_size = size;
220 part->free = size;
221 part->id = id;
222 part->bitmap = (bm_entry_t *)kalloc(bmsize);
223 part->going_away= FALSE((boolean_t) 0);
224 part->file = fdp;
225
226 bzero((char *)part->bitmap, bmsize);
227
228 if (check_linux_signature < 0)
229 {
230 if (check_linux_signature != -3)
231 printf("(default pager): "
232 "Paging to raw partition %s (%uk paging space)\n",
233 name, part->total_size * (vm_page_size / 1024));
234 return part;
235 }
236
237#define LINUX_PAGE_SIZE4096 4096 /* size of pages in Linux swap partitions */
238 rc = page_read_file_direct(part->file,
239 0, LINUX_PAGE_SIZE4096,
240 &raddr,
241 &rsize);
242 if (rc)
243 panic("(default pager): cannot read first page of %s! rc=%#x\n",
244 name, rc);
245 while (rsize < LINUX_PAGE_SIZE4096)
246 {
247 /* Filesystem block size is smaller than page size,
248 so we must do several reads to get the whole page. */
249 vm_address_t baddr, bsize;
250 rc = page_read_file_direct(part->file,
251 rsize, LINUX_PAGE_SIZE4096-rsize,
252 &baddr,
253 &bsize);
254 if (rc)
255 panic("(default pager): "
256 "cannot read first page of %s! rc=%#x at %#x\n",
257 name, rc, rsize);
258
259 memcpy ((char *) raddr + rsize, (void *) baddr, bsize);
260 rsize += bsize;
261 vm_deallocate (mach_task_self ()((__mach_task_self_ + 0)), baddr, bsize);
262 }
263
264 if (!memcmp("SWAP-SPACE", (char *) raddr + LINUX_PAGE_SIZE4096-10, 10))
265 {
266 /* The partition's first page has a Linux swap signature.
267 This means the beginning of the page contains a bitmap
268 of good pages, and all others are bad. */
269 unsigned int i, j, bad, max;
270 int waste;
271
272 printf("(default pager): Found Linux 2.0 swap signature in %s\n",
273 name);
274
275 /* The first page, and the pages corresponding to the bits
276 occupied by the signature in the final 10 bytes of the page,
277 are always unavailable ("bad"). */
278 *(u_int32_t *)raddr &= ~(u_int32_t) 1;
279 memset((char *) raddr + LINUX_PAGE_SIZE4096-10, 0, 10);
280
281 max = LINUX_PAGE_SIZE4096 / sizeof(u_int32_t);
282 if (max > (part->total_size + 31) / 32)
283 max = (part->total_size + 31) / 32;
284
285 bad = 0;
286 for (i = 0; i < max; ++i)
287 {
288 u_int32_t bm = ((u_int32_t *) raddr)[i];
289 if (bm == ~(u_int32_t) 0)
290 continue;
291 /* There are some zero bits in this word. */
292 for (j = 0; j < 32; ++j)
293 if ((bm & (1 << j)) == 0)
294 {
295 unsigned int p = i*32 + j;
296 if (p >= part->total_size)
297 break;
298 ++bad;
299 part->bitmap[p / NB_BM32] |= 1 << (p % NB_BM32);
300 }
301 }
302 part->free -= bad;
303
304 --bad; /* Don't complain about first page. */
305 waste = part->total_size - (8 * (LINUX_PAGE_SIZE4096-10));
306 if (waste > 0)
307 {
308 /* The wasted pages were already marked "bad". */
309 bad -= waste;
310 if (bad > 0)
311 printf("\
312(default pager): Paging to %s, %dk swap-space (%dk bad, %dk wasted at end)\n",
313 name,
314 part->free * (LINUX_PAGE_SIZE4096 / 1024),
315 bad * (LINUX_PAGE_SIZE4096 / 1024),
316 waste * (LINUX_PAGE_SIZE4096 / 1024));
317 else
318 printf("\
319(default pager): Paging to %s, %dk swap-space (%dk wasted at end)\n",
320 name,
321 part->free * (LINUX_PAGE_SIZE4096 / 1024),
322 waste * (LINUX_PAGE_SIZE4096 / 1024));
323 }
324 else if (bad > 0)
325 printf("\
326(default pager): Paging to %s, %dk swap-space (excludes %dk marked bad)\n",
327 name,
328 part->free * (LINUX_PAGE_SIZE4096 / 1024),
329 bad * (LINUX_PAGE_SIZE4096 / 1024));
330 else
331 printf("\
332(default pager): Paging to %s, %dk swap-space\n",
333 name,
334 part->free * (LINUX_PAGE_SIZE4096 / 1024));
335 }
336 else if (!memcmp("SWAPSPACE2",
337 (char *) raddr + LINUX_PAGE_SIZE4096-10, 10))
338 {
339 struct
340 {
341 u_int8_t bootbits[1024];
342 u_int32_t version;
343 u_int32_t last_page;
344 u_int32_t nr_badpages;
345 u_int32_t padding[125];
346 u_int32_t badpages[1];
347 } *hdr = (void *) raddr;
348
349 printf("\
350(default pager): Found Linux 2.2 swap signature (v%u) in %s...",
351 hdr->version, name);
352
353 part->bitmap[0] |= 1; /* first page unusable */
354 part->free--;
355
356 switch (hdr->version)
357 {
358 default:
359 if (check_linux_signature)
360 {
361 printf ("version %u unknown! SKIPPING %s!\n",
362 hdr->version,
363 name);
364 vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), raddr, rsize);
365 kfree(part->bitmap, bmsize);
366 kfree(part, sizeof *part);
367 return 0;
368 }
369 else
370 printf ("version %u unknown! IGNORING SIGNATURE PAGE!"
371 " %dk swap-space\n",
372 hdr->version,
373 part->free * (LINUX_PAGE_SIZE4096 / 1024));
374 break;
375
376 case 1:
377 {
378 unsigned int waste, i;
379 if (hdr->last_page > part->total_size)
380 {
381 printf ("signature says %uk, partition has only %uk! ",
382 hdr->last_page * (LINUX_PAGE_SIZE4096 / 1024),
383 part->total_size * (LINUX_PAGE_SIZE4096 / 1024));
384 waste = 0;
385 }
386 else
387 {
388 waste = part->total_size - hdr->last_page;
389 part->total_size = hdr->last_page;
390 part->free = part->total_size - 1;
391 }
392 for (i = 0; i < hdr->nr_badpages; ++i)
393 {
394 const u_int32_t bad = hdr->badpages[i];
395 part->bitmap[bad / NB_BM32] |= 1 << (bad % NB_BM32);
396 part->free--;
397 }
398 printf ("%uk swap-space",
399 part->free * (LINUX_PAGE_SIZE4096 / 1024));
400 if (hdr->nr_badpages != 0)
401 printf (" (excludes %uk marked bad)",
402 hdr->nr_badpages * (LINUX_PAGE_SIZE4096 / 1024));
403 if (waste != 0)
404 printf (" (excludes %uk at end of partition)",
405 waste * (LINUX_PAGE_SIZE4096 / 1024));
406 printf ("\n");
407 }
408 }
409 }
410 else if (check_linux_signature)
411 {
412 printf ("(default pager): "
413 "Cannot find Linux swap signature page! "
414 "SKIPPING %s (%uk partition)!",
415 name, part->total_size * (vm_page_size / 1024));
416 kfree(part->bitmap, bmsize);
417 kfree(part, sizeof *part);
418 part = 0;
419 }
420 else
421 printf("(default pager): "
422 "Paging to raw partition %s (%uk paging space)\n",
423 name, part->total_size * (vm_page_size / 1024));
424
425 vm_deallocate(mach_task_self()((__mach_task_self_ + 0)), raddr, rsize);
426
427 return part;
428}
429
430/*
431 * Create a partition descriptor,
432 * add it to the list of all such.
433 * size is in BYTES.
434 */
435void
436create_paging_partition(const char *name,
437 struct file_direct *fdp, int isa_file,
438 int linux_signature)
439{
440 partition_t part;
441
442 part = new_partition (name, fdp, linux_signature);
443 if (!part)
444 return;
445
446 pthread_mutex_lock(&all_partitions.lock);
447 {
448 int i;
449
450 for (i = 0; i < all_partitions.n_partitions; i++)
451 if (partition_of(i) == 0) break;
452
453 if (i == all_partitions.n_partitions) {
454 partition_t *new_list, *old_list;
455 int n;
456
457 n = i ? (i<<1) : 2;
458 new_list = (partition_t *)
459 kalloc( n * sizeof(partition_t) );
460 if (new_list == 0) no_paging_space(TRUE((boolean_t) 1));
461 bzero(new_list, n*sizeof(partition_t));
462 if (i) {
463 old_list = all_partitions.partition_list;
464 bcopy(old_list, new_list, i*sizeof(partition_t));
465 }
466 all_partitions.partition_list = new_list;
467 all_partitions.n_partitions = n;
468 if (i) kfree(old_list, i*sizeof(partition_t));
469 }
470 set_partition_of(i, part);
471 }
472 pthread_mutex_unlock(&all_partitions.lock);
473
474#if 0
475 dprintf("%s Added paging %s %s\n", my_name,
476 (isa_file) ? "file" : "device", name);
477#endif
478 overcommitted(TRUE((boolean_t) 1), part->free);
479}
480
481/*
482 * Choose the most appropriate default partition
483 * for an object of SIZE bytes.
484 * Return the partition locked, unless
485 * the object has no CUR_PARTition.
486 */
487p_index_t
488choose_partition(size, cur_part)
489 unsigned int size;
490 p_index_t cur_part;
491{
492 partition_t part;
493 boolean_t found = FALSE((boolean_t) 0);
494 int i;
495
496 pthread_mutex_lock(&all_partitions.lock);
497 for (i = 0; i < all_partitions.n_partitions; i++) {
498
499 /* the undesirable one ? */
500 if (i == cur_part)
501 continue;
502
503ddprintf ("choose_partition(%x,%d,%d)\n",size,cur_part,i);
504 /* one that was removed ? */
505 if ((part = partition_of(i)) == 0)
506 continue;
507
508 /* one that is being removed ? */
509 if (part->going_away)
510 continue;
511
512 /* is it big enough ? */
513 pthread_mutex_lock(&part->p_lock);
514 if (ptoa(part->free)((part->free)*vm_page_size) >= size) {
515 if (cur_part != P_INDEX_INVALID((p_index_t)-1)) {
516 pthread_mutex_unlock(&all_partitions.lock);
517 return (p_index_t)i;
518 } else
519 found = TRUE((boolean_t) 1);
520 }
521 pthread_mutex_unlock(&part->p_lock);
522
523 if (found) break;
524 }
525 pthread_mutex_unlock(&all_partitions.lock);
526 return (found) ? (p_index_t)i : P_INDEX_INVALID((p_index_t)-1);
527}
528
529/*
530 * Allocate a page in a paging partition
531 * The partition is returned unlocked.
532 */
533vm_offset_t
534pager_alloc_page(pindex, lock_it)
535 p_index_t pindex;
536 boolean_t lock_it;
537{
538 int bm_e;
539 int bit;
540 int limit;
541 bm_entry_t *bm;
542 partition_t part;
543 static char here[] = "%spager_alloc_page";
544
545 if (no_partition(pindex)((pindex) == ((p_index_t)-1)))
546 return (NO_BLOCK((vm_offset_t)-1));
547ddprintf ("pager_alloc_page(%d,%d)\n",pindex,lock_it);
548 part = partition_of(pindex);
549
550 /* unlikely, but possible deadlock against destroy_partition */
551 if (!part || part->going_away)
552 return (NO_BLOCK((vm_offset_t)-1));
553
554 if (lock_it)
555 pthread_mutex_lock(&part->p_lock);
556
557 if (part->free == 0) {
558 /* out of paging space */
559 pthread_mutex_unlock(&part->p_lock);
560 return (NO_BLOCK((vm_offset_t)-1));
561 }
562
563 limit = howmany(part->total_size, NB_BM)(((part->total_size) + (32) - 1)/(32));
564 bm = part->bitmap;
565 for (bm_e = 0; bm_e < limit; bm_e++, bm++)
566 if (*bm != BM_MASK0xffffffff)
567 break;
568
569 if (bm_e == limit)
570 panic(here,my_name);
571
572 /*
573 * Find and set the proper bit
574 */
575 {
576 bm_entry_t b = *bm;
577
578 for (bit = 0; bit < NB_BM32; bit++)
579 if ((b & (1<<bit)) == 0)
580 break;
581 if (bit == NB_BM32)
582 panic(here,my_name);
583
584 *bm = b | (1<<bit);
585 part->free--;
586
587 }
588
589 pthread_mutex_unlock(&part->p_lock);
590
591 return (bm_e*NB_BM32+bit);
592}
593
594/*
595 * Deallocate a page in a paging partition
596 */
597void
598pager_dealloc_page(pindex, page, lock_it)
599 p_index_t pindex;
600 vm_offset_t page;
601 boolean_t lock_it;
602{
603 partition_t part;
604 int bit, bm_e;
605
606 /* be paranoid */
607 if (no_partition(pindex)((pindex) == ((p_index_t)-1)))
608 panic("%sdealloc_page",my_name);
609ddprintf ("pager_dealloc_page(%d,%x,%d)\n",pindex,page,lock_it);
610 part = partition_of(pindex);
611
612 if (page >= part->total_size)
613 panic("%sdealloc_page",my_name);
614
615 bm_e = page / NB_BM32;
616 bit = page % NB_BM32;
617
618 if (lock_it)
619 pthread_mutex_lock(&part->p_lock);
620
621 part->bitmap[bm_e] &= ~(1<<bit);
622 part->free++;
623
624 if (lock_it)
625 pthread_mutex_unlock(&part->p_lock);
626}
627
628/*
629
630 */
631/*
632 * Allocation info for each paging object.
633 *
634 * Most operations, even pager_write_offset and pager_put_checksum,
635 * just need a read lock. Higher-level considerations prevent
636 * conflicting operations on a single page. The lock really protects
637 * the underlying size and block map memory, so pager_extend needs a
638 * write lock.
639 *
640 * An object can now span multiple paging partitions. The allocation
641 * info we keep is a pair (offset,p_index) where the index is in the
642 * array of all partition ptrs, and the offset is partition-relative.
643 * Size wise we are doing ok fitting the pair into a single integer:
644 * the offset really is in pages so we have vm_page_size bits available
645 * for the partition index.
646 */
647#define DEBUG_READER_CONFLICTS0 0
648
649#if DEBUG_READER_CONFLICTS0
650int default_pager_read_conflicts = 0;
651#endif
652
653union dp_map {
654
655 struct {
656 unsigned int p_offset : 24,
657 p_index : 8;
658 } block;
659
660 union dp_map *indirect;
661};
662typedef union dp_map *dp_map_t;
663
664/* quick check for part==block==invalid */
665#define no_block(e)((e).indirect == (dp_map_t)((vm_offset_t)-1)) ((e).indirect == (dp_map_t)NO_BLOCK((vm_offset_t)-1))
666#define invalidate_block(e)((e).indirect = (dp_map_t)((vm_offset_t)-1)) ((e).indirect = (dp_map_t)NO_BLOCK((vm_offset_t)-1))
667
668struct dpager {
669 pthread_mutex_t lock; /* lock for extending block map */
670 /* XXX should be read-write lock */
671#if DEBUG_READER_CONFLICTS0
672 int readers;
673 boolean_t writer;
674#endif
675 dp_map_t map; /* block map */
676 vm_size_t size; /* size of paging object, in pages */
677 vm_size_t limit; /* limit (bytes) allowed to grow to */
678 vm_size_t byte_limit; /* limit, which wasn't
679 rounded to page boundary */
680 p_index_t cur_partition;
681#ifdef CHECKSUM
682 vm_offset_t *checksum; /* checksum - parallel to block map */
683#define NO_CHECKSUM ((vm_offset_t)-1)
684#endif /* CHECKSUM */
685};
686typedef struct dpager *dpager_t;
687
688/*
689 * A paging object uses either a one- or a two-level map of offsets
690 * into a paging partition.
691 */
692#define PAGEMAP_ENTRIES64 64
693 /* number of pages in a second-level map */
694#define PAGEMAP_SIZE(npgs)((npgs)*sizeof(vm_offset_t)) ((npgs)*sizeof(vm_offset_t))
695
696#define INDIRECT_PAGEMAP_ENTRIES(npgs)((((npgs)-1)/64) + 1) \
697 ((((npgs)-1)/PAGEMAP_ENTRIES64) + 1)
698#define INDIRECT_PAGEMAP_SIZE(npgs)(((((npgs)-1)/64) + 1) * sizeof(vm_offset_t *)) \
699 (INDIRECT_PAGEMAP_ENTRIES(npgs)((((npgs)-1)/64) + 1) * sizeof(vm_offset_t *))
700#define INDIRECT_PAGEMAP(size)(size > 64) \
701 (size > PAGEMAP_ENTRIES64)
702
703#define ROUNDUP_TO_PAGEMAP(npgs)(((npgs) + 64 - 1) & ~(64 - 1)) \
704 (((npgs) + PAGEMAP_ENTRIES64 - 1) & ~(PAGEMAP_ENTRIES64 - 1))
705
706/*
707 * Object sizes are rounded up to the next power of 2,
708 * unless they are bigger than a given maximum size.
709 */
710vm_size_t max_doubled_size = 4 * 1024 * 1024; /* 4 meg */
711
712/*
713 * Return first level map for pager.
714 * If there is no such map, than allocate it.
715 */
716dp_map_t pager_get_direct_map(pager)
717 dpager_t pager;
718{
719 dp_map_t mapptr, emapptr;
720 vm_size_t size = pager->size;
721
722 if (pager->map)
723 return pager->map;
724 /*
725 * Allocate and initialize the block map
726 */
727 {
728 vm_size_t alloc_size;
729 dp_map_t init_value;
730
731 if (INDIRECT_PAGEMAP(size)(size > 64)) {
732 alloc_size = INDIRECT_PAGEMAP_SIZE(size)(((((size)-1)/64) + 1) * sizeof(vm_offset_t *));
733 init_value = (dp_map_t)0;
734 } else {
735 alloc_size = PAGEMAP_SIZE(size)((size)*sizeof(vm_offset_t));
736 init_value = (dp_map_t)NO_BLOCK((vm_offset_t)-1);
737 }
738
739 mapptr = (dp_map_t) kalloc(alloc_size);
740 for (emapptr = &mapptr[(alloc_size-1) / sizeof(vm_offset_t)];
741 emapptr >= mapptr;
742 emapptr--)
743 emapptr->indirect = init_value;
744 }
745 pager->map = mapptr;
746 return mapptr;
747}
748
749/*
750 * Attach a new paging object to a paging partition
751 */
752void
753pager_alloc(pager, part, size)
754 dpager_t pager;
755 p_index_t part;
756 vm_size_t size; /* in BYTES */
757{
758 int i;
759 dp_map_t mapptr, emapptr;
760
761 pthread_mutex_init(&pager->lock, NULL((void*)0));
762#if DEBUG_READER_CONFLICTS0
763 pager->readers = 0;
764 pager->writer = FALSE((boolean_t) 0);
765#endif
766 pager->cur_partition = part;
767
768 /*
769 * Convert byte size to number of pages, then increase to the nearest
770 * power of 2.
771 */
772 size = atop(size)((size)/vm_page_size);
773 if (size <= atop(max_doubled_size)((max_doubled_size)/vm_page_size)) {
774 i = 1;
775 while (i < size)
776 i <<= 1;
777 size = i;
778 } else
779 size = ROUNDUP_TO_PAGEMAP(size)(((size) + 64 - 1) & ~(64 - 1));
780
781 pager->map = NULL((void*)0);
782 pager->size = size;
783 pager->limit = (vm_size_t)-1;
784
785#ifdef CHECKSUM
786 if (INDIRECT_PAGEMAP(size)(size > 64)) {
787 mapptr = (vm_offset_t *)
788 kalloc(INDIRECT_PAGEMAP_SIZE(size)(((((size)-1)/64) + 1) * sizeof(vm_offset_t *)));
789 for (i = INDIRECT_PAGEMAP_ENTRIES(size)((((size)-1)/64) + 1); --i >= 0;)
790 mapptr[i] = 0;
791 } else {
792 mapptr = (vm_offset_t *) kalloc(PAGEMAP_SIZE(size)((size)*sizeof(vm_offset_t)));
793 for (i = 0; i < size; i++)
794 mapptr[i] = NO_CHECKSUM;
795 }
796 pager->checksum = mapptr;
797#endif /* CHECKSUM */
798}
799
800/*
801 * Return size (in bytes) of space actually allocated to this pager.
802 * The pager is read-locked.
803 */
804
805vm_size_t
806pager_allocated(pager)
807 dpager_t pager;
808{
809 vm_size_t size;
810 dp_map_t map, emap;
811 vm_size_t asize;
812
813 size = pager->size; /* in pages */
814 asize = 0; /* allocated, in pages */
815 map = pager_get_direct_map(pager);
816
817 if (INDIRECT_PAGEMAP(size)(size > 64)) {
818 for (emap = &map[INDIRECT_PAGEMAP_ENTRIES(size)((((size)-1)/64) + 1)];
819 map < emap; map++) {
820
821 dp_map_t map2, emap2;
822
823 if ((map2 = map->indirect) == 0)
824 continue;
825
826 for (emap2 = &map2[PAGEMAP_ENTRIES64];
827 map2 < emap2; map2++)
828 if ( ! no_block(*map2)((*map2).indirect == (dp_map_t)((vm_offset_t)-1)) )
829 asize++;
830
831 }
832 } else {
833 for (emap = &map[size]; map < emap; map++)
834 if ( ! no_block(*map)((*map).indirect == (dp_map_t)((vm_offset_t)-1)) )
835 asize++;
836 }
837
838 return ptoa(asize)((asize)*vm_page_size);
839}
840
841/*
842 * Find offsets (in the object) of pages actually allocated to this pager.
843 * Returns the number of allocated pages, whether or not they all fit.
844 * The pager is read-locked.
845 */
846
847unsigned int
848pager_pages(pager, pages, numpages)
849 dpager_t pager;
850 default_pager_page_t *pages;
851 unsigned int numpages;
852{
853 vm_size_t size;
854 dp_map_t map, emap;
855 unsigned int actual;
856 vm_offset_t offset;
857
858 size = pager->size; /* in pages */
859 map = pager_get_direct_map(pager);
860 actual = 0;
861 offset = 0;
862
863 if (INDIRECT_PAGEMAP(size)(size > 64)) {
864 for (emap = &map[INDIRECT_PAGEMAP_ENTRIES(size)((((size)-1)/64) + 1)];
865 map < emap; map++) {
866
867 dp_map_t map2, emap2;
868
869 if ((map2 = map->indirect) == 0) {
870 offset += vm_page_size * PAGEMAP_ENTRIES64;
871 continue;
872 }
873 for (emap2 = &map2[PAGEMAP_ENTRIES64];
874 map2 < emap2; map2++)
875 if ( ! no_block(*map2)((*map2).indirect == (dp_map_t)((vm_offset_t)-1)) ) {
876 if (actual++ < numpages)
877 pages++->dpp_offset = offset;
878 }
879 offset += vm_page_size;
880 }
881 } else {
882 for (emap = &map[size]; map < emap; map++) {
883 if ( ! no_block(*map)((*map).indirect == (dp_map_t)((vm_offset_t)-1)) ) {
884 if (actual++ < numpages)
885 pages++->dpp_offset = offset;
886 }
887 offset += vm_page_size;
888 }
889 }
890 return actual;
891}
892
893/*
894 * Extend the map for a paging object.
895 *
896 * XXX This implementation can allocate an arbitrary large amount
897 * of wired memory when extending a big block map. Because vm-privileged
898 * threads call pager_extend, this can crash the system by exhausting
899 * system memory.
900 */
901void
902pager_extend(pager, new_size)
903 dpager_t pager;
904 vm_size_t new_size; /* in pages */
905{
906 dp_map_t new_mapptr;
907 dp_map_t old_mapptr;
908 int i;
909 vm_size_t old_size;
910
911 pthread_mutex_lock(&pager->lock); /* XXX lock_write */
912#if DEBUG_READER_CONFLICTS0
913 pager->writer = TRUE((boolean_t) 1);
914#endif
915 /*
916 * Double current size until we cover new size.
917 * If object is 'too big' just use new size.
918 */
919 old_size = pager->size;
920
921 if (new_size <= atop(max_doubled_size)((max_doubled_size)/vm_page_size)) {
922 /* New size cannot be less than 1 */
923 i = old_size ? old_size : 1;
924 while (i < new_size)
925 i <<= 1;
926 new_size = i;
927 } else
928 new_size = ROUNDUP_TO_PAGEMAP(new_size)(((new_size) + 64 - 1) & ~(64 - 1));
929
930 if (INDIRECT_PAGEMAP(old_size)(old_size > 64)) {
931 /*
932 * Pager already uses two levels. Allocate
933 * a larger indirect block.
934 */
935 new_mapptr = (dp_map_t)
936 kalloc(INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
937 old_mapptr = pager_get_direct_map(pager);
938 for (i = 0; i < INDIRECT_PAGEMAP_ENTRIES(old_size)((((old_size)-1)/64) + 1); i++)
939 new_mapptr[i] = old_mapptr[i];
940 for (; i < INDIRECT_PAGEMAP_ENTRIES(new_size)((((new_size)-1)/64) + 1); i++)
941 new_mapptr[i].indirect = (dp_map_t)0;
942 kfree((char *)old_mapptr, INDIRECT_PAGEMAP_SIZE(old_size)(((((old_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
943 pager->map = new_mapptr;
944 pager->size = new_size;
945#ifdef CHECKSUM
946 new_mapptr = (vm_offset_t *)
947 kalloc(INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
948 old_mapptr = pager->checksum;
949 for (i = 0; i < INDIRECT_PAGEMAP_ENTRIES(old_size)((((old_size)-1)/64) + 1); i++)
950 new_mapptr[i] = old_mapptr[i];
951 for (; i < INDIRECT_PAGEMAP_ENTRIES(new_size)((((new_size)-1)/64) + 1); i++)
952 new_mapptr[i] = 0;
953 kfree((char *)old_mapptr, INDIRECT_PAGEMAP_SIZE(old_size)(((((old_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
954 pager->checksum = new_mapptr;
955#endif /* CHECKSUM */
956#if DEBUG_READER_CONFLICTS0
957 pager->writer = FALSE((boolean_t) 0);
958#endif
959 pthread_mutex_unlock(&pager->lock);
960#if 0
961 ddprintf ("pager_extend 1 mapptr %x [3b] = %x\n", new_mapptr,
962 new_mapptr[0x3b]);
963 if (new_mapptr[0x3b].indirect > 0x10000
964 && new_mapptr[0x3b].indirect != NO_BLOCK((vm_offset_t)-1))
965 panic ("debug panic");
966#endif
967 return;
968 }
969
970 if (INDIRECT_PAGEMAP(new_size)(new_size > 64)) {
971 /*
972 * Changing from direct map to indirect map.
973 * Allocate both indirect and direct map blocks,
974 * since second-level (direct) block must be
975 * full size (PAGEMAP_SIZE(PAGEMAP_ENTRIES)).
976 */
977
978 /*
979 * Allocate new second-level map first.
980 */
981 new_mapptr = (dp_map_t) kalloc(PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
982 old_mapptr = pager_get_direct_map(pager);
983 for (i = 0; i < old_size; i++)
984 new_mapptr[i] = old_mapptr[i];
985 for (; i < PAGEMAP_ENTRIES64; i++)
986 invalidate_block(new_mapptr[i])((new_mapptr[i]).indirect = (dp_map_t)((vm_offset_t)-1));
987 kfree((char *)old_mapptr, PAGEMAP_SIZE(old_size)((old_size)*sizeof(vm_offset_t)));
988 old_mapptr = new_mapptr;
989
990#if 0
991 ddprintf ("pager_extend 2 mapptr %x [3b] = %x\n", new_mapptr,
992 new_mapptr[0x3b]);
993 if (new_mapptr[0x3b].indirect > 0x10000
994 && new_mapptr[0x3b].indirect != NO_BLOCK((vm_offset_t)-1))
995 panic ("debug panic");
996#endif
997
998 /*
999 * Now allocate indirect map.
1000 */
1001 new_mapptr = (dp_map_t)
1002 kalloc(INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1003 new_mapptr[0].indirect = old_mapptr;
1004 for (i = 1; i < INDIRECT_PAGEMAP_ENTRIES(new_size)((((new_size)-1)/64) + 1); i++)
1005 new_mapptr[i].indirect = 0;
1006 pager->map = new_mapptr;
1007 pager->size = new_size;
1008#ifdef CHECKSUM
1009 /*
1010 * Allocate new second-level map first.
1011 */
1012 new_mapptr = (vm_offset_t *)kalloc(PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1013 old_mapptr = pager->checksum;
1014 for (i = 0; i < old_size; i++)
1015 new_mapptr[i] = old_mapptr[i];
1016 for (; i < PAGEMAP_ENTRIES64; i++)
1017 new_mapptr[i] = NO_CHECKSUM;
1018 kfree((char *)old_mapptr, PAGEMAP_SIZE(old_size)((old_size)*sizeof(vm_offset_t)));
1019 old_mapptr = new_mapptr;
1020
1021 /*
1022 * Now allocate indirect map.
1023 */
1024 new_mapptr = (vm_offset_t *)
1025 kalloc(INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1026 new_mapptr[0] = (vm_offset_t) old_mapptr;
1027 for (i = 1; i < INDIRECT_PAGEMAP_ENTRIES(new_size)((((new_size)-1)/64) + 1); i++)
1028 new_mapptr[i] = 0;
1029 pager->checksum = new_mapptr;
1030#endif /* CHECKSUM */
1031#if DEBUG_READER_CONFLICTS0
1032 pager->writer = FALSE((boolean_t) 0);
1033#endif
1034 pthread_mutex_unlock(&pager->lock);
1035 return;
1036 }
1037 /*
1038 * Enlarging a direct block.
1039 */
1040 new_mapptr = (dp_map_t) kalloc(PAGEMAP_SIZE(new_size)((new_size)*sizeof(vm_offset_t)));
1041 old_mapptr = pager_get_direct_map(pager);
1042 for (i = 0; i < old_size; i++)
1043 new_mapptr[i] = old_mapptr[i];
1044 for (; i < new_size; i++)
1045 invalidate_block(new_mapptr[i])((new_mapptr[i]).indirect = (dp_map_t)((vm_offset_t)-1));
1046 kfree((char *)old_mapptr, PAGEMAP_SIZE(old_size)((old_size)*sizeof(vm_offset_t)));
1047 pager->map = new_mapptr;
1048 pager->size = new_size;
1049#ifdef CHECKSUM
1050 new_mapptr = (vm_offset_t *)
1051 kalloc(PAGEMAP_SIZE(new_size)((new_size)*sizeof(vm_offset_t)));
1052 old_mapptr = pager->checksum;
1053 for (i = 0; i < old_size; i++)
1054 new_mapptr[i] = old_mapptr[i];
1055 for (; i < new_size; i++)
1056 new_mapptr[i] = NO_CHECKSUM;
1057 kfree((char *)old_mapptr, PAGEMAP_SIZE(old_size)((old_size)*sizeof(vm_offset_t)));
1058 pager->checksum = new_mapptr;
1059#endif /* CHECKSUM */
1060#if DEBUG_READER_CONFLICTS0
1061 pager->writer = FALSE((boolean_t) 0);
1062#endif
1063 pthread_mutex_unlock(&pager->lock);
1064}
1065
1066/* This deallocates the pages necessary to truncate a direct map
1067 previously of size NEW_SIZE to the smaller size OLD_SIZE. */
1068static void
1069dealloc_direct (dp_map_t mapptr,
1070 vm_size_t old_size, vm_size_t new_size)
1071{
1072 vm_size_t i;
1073
1074 if (!mapptr)
1075 return;
1076
1077 for (i = new_size; i < old_size; ++i)
1078 {
1079 const union dp_map entry = mapptr[i];
1080 if (!no_block(entry)((entry).indirect == (dp_map_t)((vm_offset_t)-1)))
1081 {
1082 pager_dealloc_page(entry.block.p_index, entry.block.p_offset,
1083 TRUE((boolean_t) 1));
1084 invalidate_block(mapptr[i])((mapptr[i]).indirect = (dp_map_t)((vm_offset_t)-1));
1085 }
1086 }
1087}
1088
1089/* Truncate a memory object. First, any pages between the new size
1090 and the (larger) old size are deallocated. Then, the size of
1091 the pagemap may be reduced, an indirect map may be turned into
1092 a direct map.
1093
1094 The pager must be locked by the caller. */
1095static void
1096pager_truncate(dpager_t pager, vm_size_t new_size) /* in pages */
1097{
1098 dp_map_t new_mapptr;
1099 dp_map_t old_mapptr;
1100 int i;
1101 vm_size_t old_size;
1102
1103 pthread_mutex_lock(&pager->lock); /* XXX lock_write */
1104
1105 if (!pager->map)
1106 goto done;
1107
1108 old_size = pager->size;
1109
1110 if (INDIRECT_PAGEMAP(old_size)(old_size > 64))
1111 {
1112 /* First handle the entire second-levels blocks that are being freed. */
1113 for (i = INDIRECT_PAGEMAP_ENTRIES(new_size)((((new_size)-1)/64) + 1);
1114 i < INDIRECT_PAGEMAP_ENTRIES(old_size)((((old_size)-1)/64) + 1);
1115 ++i)
1116 {
1117 const dp_map_t mapptr = pager->map[i].indirect;
1118 pager->map[i].indirect = (dp_map_t)0;
1119 dealloc_direct (mapptr, PAGEMAP_ENTRIES64, 0);
1120 kfree ((char *)mapptr, PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1121 }
1122
1123 /* Now truncate what's now the final nonempty direct block. */
1124 dealloc_direct (pager->map[(new_size - 1) / PAGEMAP_ENTRIES64].indirect,
1125 old_size & (PAGEMAP_ENTRIES64 - 1),
1126 new_size & (PAGEMAP_ENTRIES64 - 1));
1127
1128 if (INDIRECT_PAGEMAP (new_size)(new_size > 64))
1129 {
1130 const dp_map_t old_mapptr = pager->map;
1131 pager->map = (dp_map_t) kalloc (INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1132 memcpy (pager->map, old_mapptr, INDIRECT_PAGEMAP_SIZE(new_size)(((((new_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1133 kfree ((char *) old_mapptr, INDIRECT_PAGEMAP_SIZE (old_size)(((((old_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1134 }
1135 else
1136 {
1137 /* We are truncating to a size small enough that it goes to using
1138 a one-level map. We already have that map, as the first and only
1139 nonempty element in our indirect map. */
1140 const dp_map_t mapptr = pager->map[0].indirect;
1141 kfree((char *)pager->map, INDIRECT_PAGEMAP_SIZE(old_size)(((((old_size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1142 pager->map = mapptr;
1143 }
1144 }
1145
1146 if (! INDIRECT_PAGEMAP(old_size)(old_size > 64))
1147 {
1148 /* First deallocate pages in the truncated region. */
1149 dealloc_direct (pager->map, old_size, new_size);
1150 /* Now reduce the size of the direct map itself. We don't bother
1151 with kalloc/kfree if it's not shrinking enough that kalloc.c
1152 would actually use less. */
1153 if (PAGEMAP_SIZE (new_size)((new_size)*sizeof(vm_offset_t)) <= PAGEMAP_SIZE (old_size)((old_size)*sizeof(vm_offset_t)) / 2)
1154 {
1155 const dp_map_t old_mapptr = pager->map;
1156 pager->map = (dp_map_t) kalloc (PAGEMAP_SIZE (new_size)((new_size)*sizeof(vm_offset_t)));
1157 memcpy (pager->map, old_mapptr, PAGEMAP_SIZE (new_size)((new_size)*sizeof(vm_offset_t)));
1158 kfree ((char *) old_mapptr, PAGEMAP_SIZE (old_size)((old_size)*sizeof(vm_offset_t)));
1159 }
1160 }
1161
1162 done:
1163 pager->size = new_size;
1164 pthread_mutex_unlock(&pager->lock);
1165
1166#ifdef CHECKSUM
1167#error write me
1168#endif /* CHECKSUM */
1169}
1170
1171
1172/*
1173 * Given an offset within a paging object, find the
1174 * corresponding block within the paging partition.
1175 * Return NO_BLOCK if none allocated.
1176 */
1177union dp_map
1178pager_read_offset(pager, offset)
1179 dpager_t pager;
1180 vm_offset_t offset;
1181{
1182 vm_offset_t f_page;
1183 union dp_map pager_offset;
1184
1185 f_page = atop(offset)((offset)/vm_page_size);
1186
1187#if DEBUG_READER_CONFLICTS0
1188 if (pager->readers > 0)
1189 default_pager_read_conflicts++; /* would have proceeded with
1190 read/write lock */
1191#endif
1192 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1193#if DEBUG_READER_CONFLICTS0
1194 pager->readers++;
1195#endif
1196 if (f_page >= pager->size)
1197 {
1198 ddprintf ("%spager_read_offset pager %x: bad page %d >= size %d",
1199 my_name, pager, f_page, pager->size);
1200 pthread_mutex_unlock(&pager->lock);
1201 return (union dp_map) (union dp_map *) NO_BLOCK((vm_offset_t)-1);
1202#if 0
1203 panic("%spager_read_offset",my_name);
1204#endif
1205 }
1206
1207 invalidate_block(pager_offset)((pager_offset).indirect = (dp_map_t)((vm_offset_t)-1));
1208 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1209 dp_map_t mapptr;
1210
1211 if (pager->map) {
1212 mapptr = pager->map[f_page/PAGEMAP_ENTRIES64].indirect;
1213 if (mapptr)
1214 pager_offset = mapptr[f_page%PAGEMAP_ENTRIES64];
1215 }
1216 }
1217 else {
1218 if (pager->map)
1219 pager_offset = pager->map[f_page];
1220 }
1221
1222#if DEBUG_READER_CONFLICTS0
1223 pager->readers--;
1224#endif
1225 pthread_mutex_unlock(&pager->lock);
1226 return (pager_offset);
1227}
1228
1229#if USE_PRECIOUS1
1230/*
1231 * Release a single disk block.
1232 */
1233void pager_release_offset(pager, offset)
1234 dpager_t pager;
1235 vm_offset_t offset;
1236{
1237 union dp_map entry;
1238
1239 offset = atop(offset)((offset)/vm_page_size);
1240
1241 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1242
1243 assert (pager->map)((pager->map) ? (void) (0) : __assert_fail ("pager->map"
, "../../mach-defpager/default_pager.c", 1243, __PRETTY_FUNCTION__
))
;
1244 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1245 dp_map_t mapptr;
1246
1247 mapptr = pager->map[offset / PAGEMAP_ENTRIES64].indirect;
1248 entry = mapptr[offset % PAGEMAP_ENTRIES64];
1249 invalidate_block(mapptr[offset % PAGEMAP_ENTRIES])((mapptr[offset % 64]).indirect = (dp_map_t)((vm_offset_t)-1)
)
;
1250 } else {
1251 entry = pager->map[offset];
1252 invalidate_block(pager->map[offset])((pager->map[offset]).indirect = (dp_map_t)((vm_offset_t)-
1))
;
1253 }
1254
1255 pthread_mutex_unlock(&pager->lock);
1256
1257 pager_dealloc_page(entry.block.p_index, entry.block.p_offset, TRUE((boolean_t) 1));
1258}
1259#endif /*USE_PRECIOUS*/
1260
1261
1262/*
1263 * Move a page from one partition to another
1264 * New partition is locked, old partition is
1265 * locked unless LOCK_OLD sez otherwise.
1266 */
1267union dp_map
1268pager_move_page(block)
1269 union dp_map block;
1270{
1271 partition_t old_part, new_part;
1272 p_index_t old_pindex, new_pindex;
1273 union dp_map ret;
1274 vm_size_t size;
1275 vm_offset_t raddr, offset, new_offset;
1276 kern_return_t rc;
1277 static char here[] = "%spager_move_page";
1278
1279 old_pindex = block.block.p_index;
1280 invalidate_block(ret)((ret).indirect = (dp_map_t)((vm_offset_t)-1));
1281
1282 /* See if we have room to put it anywhere else */
1283 new_pindex = choose_partition( ptoa(1)((1)*vm_page_size), old_pindex);
1284 if (no_partition(new_pindex)((new_pindex) == ((p_index_t)-1)))
1285 return ret;
1286
1287 /* this unlocks the new partition */
1288 new_offset = pager_alloc_page(new_pindex, FALSE((boolean_t) 0));
1289 if (new_offset == NO_BLOCK((vm_offset_t)-1))
1290 panic(here,my_name);
1291
1292 /*
1293 * Got the resources, now move the data
1294 */
1295ddprintf ("pager_move_page(%x,%d,%d)\n",block.block.p_offset,old_pindex,new_pindex);
1296 old_part = partition_of(old_pindex);
1297 offset = ptoa(block.block.p_offset)((block.block.p_offset)*vm_page_size);
1298 rc = page_read_file_direct (old_part->file,
1299 offset,
1300 vm_page_size,
1301 &raddr,
1302 &size);
1303 if (rc != 0)
1304 panic(here,my_name);
1305
1306 /* release old */
1307 pager_dealloc_page(old_pindex, block.block.p_offset, FALSE((boolean_t) 0));
1308
1309 new_part = partition_of(new_pindex);
1310 offset = ptoa(new_offset)((new_offset)*vm_page_size);
1311 rc = page_write_file_direct (new_part->file,
1312 offset,
1313 raddr,
1314 size,
1315 &size);
1316 if (rc != 0)
1317 panic(here,my_name);
1318
1319 (void) vm_deallocate( mach_task_self()((__mach_task_self_ + 0)), raddr, size);
1320
1321 ret.block.p_offset = new_offset;
1322 ret.block.p_index = new_pindex;
1323
1324 return ret;
1325}
1326
1327#ifdef CHECKSUM
1328/*
1329 * Return the checksum for a block.
1330 */
1331int
1332pager_get_checksum(pager, offset)
1333 dpager_t pager;
1334 vm_offset_t offset;
1335{
1336 vm_offset_t f_page;
1337 int checksum;
1338
1339 f_page = atop(offset)((offset)/vm_page_size);
1340
1341 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1342 if (f_page >= pager->size)
1343 panic("%spager_get_checksum",my_name);
1344
1345 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1346 vm_offset_t *mapptr;
1347
1348 mapptr = (vm_offset_t *)pager->checksum[f_page/PAGEMAP_ENTRIES64];
1349 if (mapptr == 0)
1350 checksum = NO_CHECKSUM;
1351 else
1352 checksum = mapptr[f_page%PAGEMAP_ENTRIES64];
1353 }
1354 else {
1355 checksum = pager->checksum[f_page];
1356 }
1357
1358 pthread_mutex_unlock(&pager->lock);
1359 return (checksum);
1360}
1361
1362/*
1363 * Remember the checksum for a block.
1364 */
1365int
1366pager_put_checksum(pager, offset, checksum)
1367 dpager_t pager;
1368 vm_offset_t offset;
1369 int checksum;
1370{
1371 vm_offset_t f_page;
1372 static char here[] = "%spager_put_checksum";
1373
1374 f_page = atop(offset)((offset)/vm_page_size);
1375
1376 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1377 if (f_page >= pager->size)
1378 panic(here,my_name);
1379
1380 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1381 vm_offset_t *mapptr;
1382
1383 mapptr = (vm_offset_t *)pager->checksum[f_page/PAGEMAP_ENTRIES64];
1384 if (mapptr == 0)
1385 panic(here,my_name);
1386
1387 mapptr[f_page%PAGEMAP_ENTRIES64] = checksum;
1388 }
1389 else {
1390 pager->checksum[f_page] = checksum;
1391 }
1392 pthread_mutex_unlock(&pager->lock);
1393}
1394
1395/*
1396 * Compute a checksum - XOR each 32-bit word.
1397 */
1398int
1399compute_checksum(addr, size)
1400 vm_offset_t addr;
1401 vm_size_t size;
1402{
1403 int checksum = NO_CHECKSUM;
1404 int *ptr;
1405 int count;
1406
1407 ptr = (int *)addr;
1408 count = size / sizeof(int);
1409
1410 while (--count >= 0)
1411 checksum ^= *ptr++;
1412
1413 return (checksum);
1414}
1415#endif /* CHECKSUM */
1416
1417/*
1418 * Given an offset within a paging object, find the
1419 * corresponding block within the paging partition.
1420 * Allocate a new block if necessary.
1421 *
1422 * WARNING: paging objects apparently may be extended
1423 * without notice!
1424 */
1425union dp_map
1426pager_write_offset(pager, offset)
1427 dpager_t pager;
1428 vm_offset_t offset;
1429{
1430 vm_offset_t f_page;
1431 dp_map_t mapptr;
1432 union dp_map block;
1433
1434 invalidate_block(block)((block).indirect = (dp_map_t)((vm_offset_t)-1));
1435
1436 f_page = atop(offset)((offset)/vm_page_size);
1437
1438#if DEBUG_READER_CONFLICTS0
1439 if (pager->readers > 0)
1440 default_pager_read_conflicts++; /* would have proceeded with
1441 read/write lock */
1442#endif
1443 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1444#if DEBUG_READER_CONFLICTS0
1445 pager->readers++;
1446#endif
1447
1448 /* Catch the case where we had no initial fit partition
1449 for this object, but one was added later on */
1450 if (no_partition(pager->cur_partition)((pager->cur_partition) == ((p_index_t)-1))) {
1451 p_index_t new_part;
1452 vm_size_t size;
1453
1454 size = (f_page > pager->size) ? f_page : pager->size;
1455 new_part = choose_partition(ptoa(size)((size)*vm_page_size), P_INDEX_INVALID((p_index_t)-1));
1456 if (no_partition(new_part)((new_part) == ((p_index_t)-1)))
1457 new_part = choose_partition(ptoa(1)((1)*vm_page_size), P_INDEX_INVALID((p_index_t)-1));
1458 if (no_partition(new_part)((new_part) == ((p_index_t)-1)))
1459 /* give up right now to avoid confusion */
1460 goto out;
1461 else
1462 pager->cur_partition = new_part;
1463 }
1464
1465 while (f_page >= pager->size) {
1466 ddprintf ("pager_write_offset: extending: %x %x\n", f_page, pager->size);
1467
1468 /*
1469 * Paging object must be extended.
1470 * Remember that offset is 0-based, but size is 1-based.
1471 */
1472#if DEBUG_READER_CONFLICTS0
1473 pager->readers--;
1474#endif
1475 pthread_mutex_unlock(&pager->lock);
1476 pager_extend(pager, f_page + 1);
1477#if DEBUG_READER_CONFLICTS0
1478 if (pager->readers > 0)
1479 default_pager_read_conflicts++; /* would have proceeded with
1480 read/write lock */
1481#endif
1482 pthread_mutex_lock(&pager->lock); /* XXX lock_read */
1483#if DEBUG_READER_CONFLICTS0
1484 pager->readers++;
1485#endif
1486 ddprintf ("pager_write_offset: done extending: %x %x\n", f_page, pager->size);
1487 }
1488
1489 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1490 ddprintf ("pager_write_offset: indirect\n");
1491 mapptr = pager_get_direct_map(pager);
1492 mapptr = mapptr[f_page/PAGEMAP_ENTRIES64].indirect;
1493 if (mapptr == 0) {
1494 /*
1495 * Allocate the indirect block
1496 */
1497 int i;
1498 ddprintf ("pager_write_offset: allocating indirect\n");
1499
1500 mapptr = (dp_map_t) kalloc(PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1501 if (mapptr == 0) {
1502 /* out of space! */
1503 no_paging_space(TRUE((boolean_t) 1));
1504 goto out;
1505 }
1506 pager->map[f_page/PAGEMAP_ENTRIES64].indirect = mapptr;
1507 for (i = 0; i < PAGEMAP_ENTRIES64; i++)
1508 invalidate_block(mapptr[i])((mapptr[i]).indirect = (dp_map_t)((vm_offset_t)-1));
1509#ifdef CHECKSUM
1510 {
1511 vm_offset_t *cksumptr;
1512 int j;
1513
1514 cksumptr = (vm_offset_t *)
1515 kalloc(PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1516 if (cksumptr == 0) {
1517 /* out of space! */
1518 no_paging_space(TRUE((boolean_t) 1));
1519 goto out;
1520 }
1521 pager->checksum[f_page/PAGEMAP_ENTRIES64]
1522 = (vm_offset_t)cksumptr;
1523 for (j = 0; j < PAGEMAP_ENTRIES64; j++)
1524 cksumptr[j] = NO_CHECKSUM;
1525 }
1526#endif /* CHECKSUM */
1527 }
1528 f_page %= PAGEMAP_ENTRIES64;
1529 }
1530 else {
1531 mapptr = pager_get_direct_map(pager);
1532 }
1533
1534 block = mapptr[f_page];
1535 ddprintf ("pager_write_offset: block starts as %x[%x] %x\n", mapptr, f_page, block);
1536 if (no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1))) {
1537 vm_offset_t off;
1538
1539 /* get room now */
1540 off = pager_alloc_page(pager->cur_partition, TRUE((boolean_t) 1));
1541 if (off == NO_BLOCK((vm_offset_t)-1)) {
1542 /*
1543 * Before giving up, try all other partitions.
1544 */
1545 p_index_t new_part;
1546
1547 ddprintf ("pager_write_offset: could not allocate block\n");
1548 /* returns it locked (if any one is non-full) */
1549 new_part = choose_partition( ptoa(1)((1)*vm_page_size), pager->cur_partition);
1550 if ( ! no_partition(new_part)((new_part) == ((p_index_t)-1)) ) {
1551
1552#if debug0
1553dprintf("%s partition %x filled,", my_name, pager->cur_partition);
1554dprintf("extending object %x (size %x) to %x.\n",
1555 pager, pager->size, new_part);
1556#endif
1557
1558 /* this one tastes better */
1559 pager->cur_partition = new_part;
1560
1561 /* this unlocks the partition too */
1562 off = pager_alloc_page(pager->cur_partition, FALSE((boolean_t) 0));
1563
1564 }
1565
1566 if (off == NO_BLOCK((vm_offset_t)-1)) {
1567 /*
1568 * Oh well.
1569 */
1570 overcommitted(FALSE((boolean_t) 0), 1);
1571 goto out;
1572 }
1573 ddprintf ("pager_write_offset: decided to allocate block\n");
1574 }
1575 block.block.p_offset = off;
1576 block.block.p_index = pager->cur_partition;
1577 mapptr[f_page] = block;
1578 }
1579
1580out:
1581
1582#if DEBUG_READER_CONFLICTS0
1583 pager->readers--;
1584#endif
1585 pthread_mutex_unlock(&pager->lock);
1586 return (block);
1587}
1588
1589/*
1590 * Deallocate all of the blocks belonging to a paging object.
1591 * No locking needed because no other operations can be in progress.
1592 */
1593void
1594pager_dealloc(pager)
1595 dpager_t pager;
1596{
1597 int i, j;
1598 dp_map_t mapptr;
1599 union dp_map block;
1600
1601 if (!pager->map)
1602 return;
1603
1604 if (INDIRECT_PAGEMAP(pager->size)(pager->size > 64)) {
1605 for (i = INDIRECT_PAGEMAP_ENTRIES(pager->size)((((pager->size)-1)/64) + 1); --i >= 0; ) {
1606 mapptr = pager->map[i].indirect;
1607 if (mapptr != 0) {
1608 for (j = 0; j < PAGEMAP_ENTRIES64; j++) {
1609 block = mapptr[j];
1610 if ( ! no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)) )
1611 pager_dealloc_page(block.block.p_index,
1612 block.block.p_offset, TRUE((boolean_t) 1));
1613 }
1614 kfree((char *)mapptr, PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1615 pager->map[i].indirect = (dp_map_t) 0;
1616 }
1617 }
1618 kfree((char *)pager->map, INDIRECT_PAGEMAP_SIZE(pager->size)(((((pager->size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1619 pager->map = (dp_map_t) 0;
1620#ifdef CHECKSUM
1621 for (i = INDIRECT_PAGEMAP_ENTRIES(pager->size)((((pager->size)-1)/64) + 1); --i >= 0; ) {
1622 mapptr = (vm_offset_t *)pager->checksum[i];
1623 if (mapptr) {
1624 kfree((char *)mapptr, PAGEMAP_SIZE(PAGEMAP_ENTRIES)((64)*sizeof(vm_offset_t)));
1625 }
1626 }
1627 kfree((char *)pager->checksum,
1628 INDIRECT_PAGEMAP_SIZE(pager->size)(((((pager->size)-1)/64) + 1) * sizeof(vm_offset_t *)));
1629#endif /* CHECKSUM */
1630 }
1631 else {
1632 mapptr = pager->map;
1633 for (i = 0; i < pager->size; i++ ) {
1634 block = mapptr[i];
1635 if ( ! no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)) )
1636 pager_dealloc_page(block.block.p_index,
1637 block.block.p_offset, TRUE((boolean_t) 1));
1638 }
1639 kfree((char *)pager->map, PAGEMAP_SIZE(pager->size)((pager->size)*sizeof(vm_offset_t)));
1640 pager->map = (dp_map_t) 0;
1641#ifdef CHECKSUM
1642 kfree((char *)pager->checksum, PAGEMAP_SIZE(pager->size)((pager->size)*sizeof(vm_offset_t)));
1643#endif /* CHECKSUM */
1644 }
1645}
1646
1647/*
1648 * Move all the pages of a PAGER that live in a
1649 * partition PINDEX somewhere else.
1650 * Pager should be write-locked, partition too.
1651 * Returns FALSE if it could not do it, but
1652 * some pages might have been moved nonetheless.
1653 */
1654boolean_t
1655pager_realloc(pager, pindex)
1656 dpager_t pager;
1657 p_index_t pindex;
1658{
1659 dp_map_t map, emap;
1660 vm_size_t size;
1661 union dp_map block;
1662
1663 if (!pager->map)
1664 return TRUE((boolean_t) 1);
1665
1666 size = pager->size; /* in pages */
1667 map = pager->map;
1668
1669 if (INDIRECT_PAGEMAP(size)(size > 64)) {
1670 for (emap = &map[INDIRECT_PAGEMAP_ENTRIES(size)((((size)-1)/64) + 1)];
1671 map < emap; map++) {
1672
1673 dp_map_t map2, emap2;
1674
1675 if ((map2 = map->indirect) == 0)
1676 continue;
1677
1678 for (emap2 = &map2[PAGEMAP_ENTRIES64];
1679 map2 < emap2; map2++)
1680 if ( map2->block.p_index == pindex) {
1681
1682 block = pager_move_page(*map2);
1683 if (!no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)))
1684 *map2 = block;
1685 else
1686 return FALSE((boolean_t) 0);
1687 }
1688
1689 }
1690 goto ok;
1691 }
1692
1693 /* A small one */
1694 for (emap = &map[size]; map < emap; map++)
1695 if (map->block.p_index == pindex) {
1696 block = pager_move_page(*map);
1697 if (!no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)))
1698 *map = block;
1699 else
1700 return FALSE((boolean_t) 0);
1701 }
1702ok:
1703 pager->cur_partition = choose_partition(0, P_INDEX_INVALID((p_index_t)-1));
1704 return TRUE((boolean_t) 1);
1705}
1706
1707/*
1708
1709 */
1710
1711/*
1712 * Read/write routines.
1713 */
1714#define PAGER_SUCCESS0 0
1715#define PAGER_ABSENT1 1
1716#define PAGER_ERROR2 2
1717
1718/*
1719 * Read data from a default pager. Addr is the address of a buffer
1720 * to fill. Out_addr returns the buffer that contains the data;
1721 * if it is different from <addr>, it must be deallocated after use.
1722 */
1723int
1724default_read(ds, addr, size, offset, out_addr, deallocate, external)
1725 dpager_t ds;
1726 vm_offset_t addr; /* pointer to block to fill */
1727 vm_size_t size;
1728 vm_offset_t offset;
1729 vm_offset_t *out_addr;
1730 /* returns pointer to data */
1731 boolean_t deallocate;
1732 boolean_t external;
1733{
1734 union dp_map block;
1735 vm_offset_t raddr;
1736 vm_size_t rsize;
1737 int rc;
1738 boolean_t first_time;
1739 partition_t part;
1740#ifdef CHECKSUM
1741 vm_size_t original_size = size;
1742#endif /* CHECKSUM */
1743 vm_offset_t original_offset = offset;
1744
1745 /*
1746 * Find the block in the paging partition
1747 */
1748 block = pager_read_offset(ds, offset);
1749 if ( no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)) ) {
1750 if (external) {
1751 /*
1752 * An external object is requesting unswapped data,
1753 * zero fill the page and return.
1754 */
1755 bzero((char *) addr, vm_page_size);
1756 *out_addr = addr;
1757 return (PAGER_SUCCESS0);
1758 }
1759 return (PAGER_ABSENT1);
1760 }
1761
1762 /*
1763 * Read it, trying for the entire page.
1764 */
1765 offset = ptoa(block.block.p_offset)((block.block.p_offset)*vm_page_size);
1766ddprintf ("default_read(%x,%x,%x,%d)\n",addr,size,offset,block.block.p_index);
1767 part = partition_of(block.block.p_index);
1768 first_time = TRUE((boolean_t) 1);
1769 *out_addr = addr;
1770
1771 do {
1772 rc = page_read_file_direct(part->file,
1773 offset,
1774 size,
1775 &raddr,
1776 &rsize);
1777 if (rc != 0)
1778 return (PAGER_ERROR2);
1779
1780 /*
1781 * If we got the entire page on the first read, return it.
1782 */
1783 if (first_time && rsize == size) {
1784 *out_addr = raddr;
1785 break;
1786 }
1787 /*
1788 * Otherwise, copy the data into the
1789 * buffer we were passed, and try for
1790 * the next piece.
1791 */
1792 first_time = FALSE((boolean_t) 0);
1793 bcopy((char *)raddr, (char *)addr, rsize);
1794 addr += rsize;
1795 offset += rsize;
1796 size -= rsize;
1797 } while (size != 0);
1798
1799#if USE_PRECIOUS1
1800 if (deallocate)
1801 pager_release_offset(ds, original_offset);
1802#endif /*USE_PRECIOUS*/
1803
1804#ifdef CHECKSUM
1805 {
1806 int write_checksum,
1807 read_checksum;
1808
1809 write_checksum = pager_get_checksum(ds, original_offset);
1810 read_checksum = compute_checksum(*out_addr, original_size);
1811 if (write_checksum != read_checksum) {
1812 panic(
1813 "PAGER CHECKSUM ERROR: offset 0x%x, written 0x%x, read 0x%x",
1814 original_offset, write_checksum, read_checksum);
1815 }
1816 }
1817#endif /* CHECKSUM */
1818 return (PAGER_SUCCESS0);
1819}
1820
1821int
1822default_write(ds, addr, size, offset)
1823 dpager_t ds;
1824 vm_offset_t addr;
1825 vm_size_t size;
1826 vm_offset_t offset;
1827{
1828 union dp_map block;
1829 partition_t part;
1830 vm_size_t wsize;
1831 int rc;
1832
1833 ddprintf ("default_write: pager offset %x\n", offset);
1834
1835 /*
1836 * Find block in paging partition
1837 */
1838 block = pager_write_offset(ds, offset);
1839 if ( no_block(block)((block).indirect == (dp_map_t)((vm_offset_t)-1)) )
1840 return (PAGER_ERROR2);
1841
1842#ifdef CHECKSUM
1843 /*
1844 * Save checksum
1845 */
1846 {
1847 int checksum;
1848
1849 checksum = compute_checksum(addr, size);
1850 pager_put_checksum(ds, offset, checksum);
1851 }
1852#endif /* CHECKSUM */
1853 offset = ptoa(block.block.p_offset)((block.block.p_offset)*vm_page_size);
1854ddprintf ("default_write(%x,%x,%x,%d)\n",addr,size,offset,block.block.p_index);
1855 part = partition_of(block.block.p_index);
1856
1857 /*
1858 * There are various assumptions made here,we
1859 * will not get into the next disk 'block' by
1860 * accident. It might well be non-contiguous.
1861 */
1862 do {
1863 rc = page_write_file_direct(part->file,
1864 offset,
1865 addr,
1866 size,
1867 &wsize);
1868 if (rc != 0) {
1869 dprintf("*** PAGER ERROR: default_write: ");
1870 dprintf("ds=0x%x addr=0x%x size=0x%x offset=0x%x resid=0x%x\n",
1871 ds, addr, size, offset, wsize);
1872 return (PAGER_ERROR2);
1873 }
1874 addr += wsize;
1875 offset += wsize;
1876 size -= wsize;
1877 } while (size != 0);
1878 return (PAGER_SUCCESS0);
1879}
1880
1881boolean_t
1882default_has_page(ds, offset)
1883 dpager_t ds;
1884 vm_offset_t offset;
1885{
1886 return ( ! no_block(pager_read_offset(ds, offset))((pager_read_offset(ds, offset)).indirect == (dp_map_t)((vm_offset_t
)-1))
);
1887}
1888/*
1889
1890 */
1891
1892/*
1893 * Mapping between pager port and paging object.
1894 */
1895struct dstruct {
1896 queue_chain_t links; /* Link in pager-port list */
1897
1898 pthread_mutex_t lock; /* Lock for the structure */
1899 pthread_cond_t
1900 waiting_seqno, /* someone waiting on seqno */
1901 waiting_read, /* someone waiting on readers */
1902 waiting_write, /* someone waiting on writers */
1903 waiting_refs; /* someone waiting on refs */
1904
1905 memory_object_t pager; /* Pager port */
1906 mach_port_seqno_t seqno; /* Pager port sequence number */
1907 mach_port_t pager_request; /* Request port */
1908 mach_port_urefs_t request_refs; /* Request port user-refs */
1909 mach_port_t pager_name; /* Name port */
1910 mach_port_urefs_t name_refs; /* Name port user-refs */
1911 boolean_t external; /* Is an external object? */
1912
1913 unsigned int readers; /* Reads in progress */
1914 unsigned int writers; /* Writes in progress */
1915
1916 /* This is the reply port of an outstanding
1917 default_pager_object_set_size call. */
1918 mach_port_t lock_request;
1919
1920 unsigned int errors; /* Pageout error count */
1921 struct dpager dpager; /* Actual pager */
1922};
1923typedef struct dstruct * default_pager_t;
1924#define DEFAULT_PAGER_NULL((default_pager_t)0) ((default_pager_t)0)
1925
1926#if PARALLEL1
1927#define dstruct_lock_init(ds)pthread_mutex_init(&ds->lock, ((void*)0)) pthread_mutex_init(&ds->lock, NULL((void*)0))
1928#define dstruct_lock(ds)pthread_mutex_lock(&ds->lock) pthread_mutex_lock(&ds->lock)
1929#define dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock) pthread_mutex_unlock(&ds->lock)
1930#else /* PARALLEL */
1931#define dstruct_lock_init(ds)pthread_mutex_init(&ds->lock, ((void*)0))
1932#define dstruct_lock(ds)pthread_mutex_lock(&ds->lock)
1933#define dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock)
1934#endif /* PARALLEL */
1935
1936/*
1937 * List of all pagers. A specific pager is
1938 * found directly via its port, this list is
1939 * only used for monitoring purposes by the
1940 * default_pager_object* calls
1941 */
1942struct pager_port {
1943 queue_head_t queue;
1944 pthread_mutex_t lock;
1945 int count; /* saves code */
1946 queue_head_t leak_queue;
1947} all_pagers;
1948
1949#define pager_port_list_init(){ pthread_mutex_init(&all_pagers.lock, ((void*)0)); ((&
all_pagers.queue)->next = (&all_pagers.queue)->prev
= &all_pagers.queue); ((&all_pagers.leak_queue)->
next = (&all_pagers.leak_queue)->prev = &all_pagers
.leak_queue); all_pagers.count = 0; }
\
1950{ \
1951 pthread_mutex_init(&all_pagers.lock, NULL((void*)0)); \
1952 queue_init(&all_pagers.queue)((&all_pagers.queue)->next = (&all_pagers.queue)->
prev = &all_pagers.queue)
; \
1953 queue_init(&all_pagers.leak_queue)((&all_pagers.leak_queue)->next = (&all_pagers.leak_queue
)->prev = &all_pagers.leak_queue)
; \
1954 all_pagers.count = 0; \
1955}
1956
1957void pager_port_list_insert(port, ds)
1958 mach_port_t port;
1959 default_pager_t ds;
1960{
1961 pthread_mutex_lock(&all_pagers.lock);
1962 queue_enter(&all_pagers.queue, ds, default_pager_t, links){ queue_entry_t prev; prev = (&all_pagers.queue)->prev
; if ((&all_pagers.queue) == prev) { (&all_pagers.queue
)->next = (queue_entry_t) (ds); } else { ((default_pager_t
)prev)->links.next = (queue_entry_t)(ds); } (ds)->links
.prev = prev; (ds)->links.next = &all_pagers.queue; (&
all_pagers.queue)->prev = (queue_entry_t) ds; }
;
1963 all_pagers.count++;
1964 pthread_mutex_unlock(&all_pagers.lock);
1965}
1966
1967/* given a data structure return a good port-name to associate it to */
1968#define pnameof(_x_)(((vm_offset_t)(_x_))+1) (((vm_offset_t)(_x_))+1)
1969/* reverse, assumes no-odd-pointers */
1970#define dnameof(_x_)(((vm_offset_t)(_x_))&~1) (((vm_offset_t)(_x_))&~1)
1971
1972/* The magic typecast */
1973#define pager_port_lookup(_port_)((! (((_port_) != ((mach_port_t) 0)) && ((_port_) != (
(mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(_port_
))&~1))->pager != (_port_)) ? ((default_pager_t)0) : (
default_pager_t)(((vm_offset_t)(_port_))&~1))
\
1974 ((! MACH_PORT_VALID(_port_)(((_port_) != ((mach_port_t) 0)) && ((_port_) != ((mach_port_t
) ~0)))
|| \
1975 ((default_pager_t)dnameof(_port_)(((vm_offset_t)(_port_))&~1))->pager != (_port_)) ? \
1976 DEFAULT_PAGER_NULL((default_pager_t)0) : (default_pager_t)dnameof(_port_)(((vm_offset_t)(_port_))&~1))
1977
1978void pager_port_list_delete(ds)
1979 default_pager_t ds;
1980{
1981 pthread_mutex_lock(&all_pagers.lock);
1982 queue_remove(&all_pagers.queue, ds, default_pager_t, links){ queue_entry_t next, prev; next = (ds)->links.next; prev =
(ds)->links.prev; if ((&all_pagers.queue) == next) (&
all_pagers.queue)->prev = prev; else ((default_pager_t)next
)->links.prev = prev; if ((&all_pagers.queue) == prev)
(&all_pagers.queue)->next = next; else ((default_pager_t
)prev)->links.next = next; }
;
1983 all_pagers.count--;
1984 pthread_mutex_unlock(&all_pagers.lock);
1985}
1986
1987/*
1988 * Destroy a paging partition.
1989 * XXX this is not re-entrant XXX
1990 */
1991kern_return_t
1992destroy_paging_partition(name, pp_private)
1993 char *name;
1994 void **pp_private;
1995{
1996 unsigned int id = part_id(name);
1997 partition_t part;
1
Variable 'part' declared without an initial value
1998 boolean_t all_ok = TRUE((boolean_t) 1);
1999 default_pager_t entry;
2000 int pindex;
2001
2002 /*
2003 * Find and take partition out of list
2004 * This prevents choose_partition from
2005 * getting in the way.
2006 */
2007 pthread_mutex_lock(&all_partitions.lock);
2008 for (pindex = 0; pindex < all_partitions.n_partitions; pindex++) {
2
Loop condition is false. Execution continues on line 2012
2009 part = partition_of(pindex);
2010 if (part && (part->id == id)) break;
2011 }
2012 if (pindex == all_partitions.n_partitions) {
3
Taking false branch
2013 pthread_mutex_unlock(&all_partitions.lock);
2014 return KERN_INVALID_ARGUMENT4;
2015 }
2016 part->going_away = TRUE((boolean_t) 1);
4
Dereference of undefined pointer value
2017 pthread_mutex_unlock(&all_partitions.lock);
2018
2019 /*
2020 * This might take a while..
2021 */
2022all_over_again:
2023#if debug0
2024dprintf("Partition x%x (id x%x) for %s, all_ok %d\n", part, id, name, all_ok);
2025#endif
2026 all_ok = TRUE((boolean_t) 1);
2027 pthread_mutex_lock(&part->p_lock);
2028
2029 pthread_mutex_lock(&all_pagers.lock);
2030 queue_iterate(&all_pagers.queue, entry, default_pager_t, links)for ((entry) = (default_pager_t) ((&all_pagers.queue)->
next); !(((&all_pagers.queue)) == ((queue_entry_t)(entry)
)); (entry) = (default_pager_t) ((&(entry)->links)->
next))
{
2031
2032 dstruct_lock(entry)pthread_mutex_lock(&entry->lock);
2033
2034 if (pthread_mutex_trylock(&entry->dpager.lock)) {
2035
2036 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
2037 pthread_mutex_unlock(&all_pagers.lock);
2038 pthread_mutex_unlock(&part->p_lock);
2039
2040 /* yield the processor */
2041 (void) thread_switch(MACH_PORT_NULL((mach_port_t) 0),
2042 SWITCH_OPTION_NONE0, 0);
2043
2044 goto all_over_again;
2045
2046 }
2047
2048 /*
2049 * See if we can relocate all the pages of this object
2050 * currently on this partition on some other partition
2051 */
2052 all_ok = pager_realloc(&entry->dpager, pindex);
2053
2054 pthread_mutex_unlock(&entry->dpager.lock);
2055 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
2056
2057 if (!all_ok) break;
2058
2059 }
2060 pthread_mutex_unlock(&all_pagers.lock);
2061
2062 if (all_ok) {
2063 /* No need to unlock partition, there are no refs left */
2064
2065 set_partition_of(pindex, 0);
2066 *pp_private = part->file;
2067 kfree(part->bitmap, howmany(part->total_size, NB_BM)(((part->total_size) + (32) - 1)/(32)) * sizeof(bm_entry_t));
2068 kfree(part, sizeof(struct part));
2069 dprintf("%s Removed paging partition %s\n", my_name, name);
2070 return KERN_SUCCESS0;
2071 }
2072
2073 /*
2074 * Put partition back in.
2075 */
2076 part->going_away = FALSE((boolean_t) 0);
2077
2078 return KERN_FAILURE5;
2079}
2080
2081
2082/*
2083 * We use the sequence numbers on requests to regulate
2084 * our parallelism. In general, we allow multiple reads and writes
2085 * to proceed in parallel, with the exception that reads must
2086 * wait for previous writes to finish. (Because the kernel might
2087 * generate a data-request for a page on the heels of a data-write
2088 * for the same page, and we must avoid returning stale data.)
2089 * terminate requests wait for proceeding reads and writes to finish.
2090 */
2091
2092unsigned int default_pager_total = 0; /* debugging */
2093unsigned int default_pager_wait_seqno = 0; /* debugging */
2094unsigned int default_pager_wait_read = 0; /* debugging */
2095unsigned int default_pager_wait_write = 0; /* debugging */
2096unsigned int default_pager_wait_refs = 0; /* debugging */
2097
2098#if PARALLEL1
2099/*
2100 * Waits for correct sequence number. Leaves pager locked.
2101 */
2102void pager_port_lock(ds, seqno)
2103 default_pager_t ds;
2104 mach_port_seqno_t seqno;
2105{
2106 default_pager_total++;
2107 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2108 while (ds->seqno != seqno) {
2109 default_pager_wait_seqno++;
2110 pthread_cond_wait(&ds->waiting_seqno, &ds->lock);
2111 }
2112}
2113
2114/*
2115 * Increments sequence number and unlocks pager.
2116 */
2117void pager_port_unlock(ds)
2118 default_pager_t ds;
2119{
2120 ds->seqno++;
2121 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2122 pthread_cond_broadcast(&ds->waiting_seqno);
2123}
2124
2125/*
2126 * Start a read - one more reader. Pager must be locked.
2127 */
2128void pager_port_start_read(ds)
2129 default_pager_t ds;
2130{
2131 ds->readers++;
2132}
2133
2134/*
2135 * Wait for readers. Unlocks and relocks pager if wait needed.
2136 */
2137void pager_port_wait_for_readers(ds)
2138 default_pager_t ds;
2139{
2140 while (ds->readers != 0) {
2141 default_pager_wait_read++;
2142 pthread_cond_wait(&ds->waiting_read, &ds->lock);
2143 }
2144}
2145
2146/*
2147 * Finish a read. Pager is unlocked and returns unlocked.
2148 */
2149void pager_port_finish_read(ds)
2150 default_pager_t ds;
2151{
2152 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2153 if (--ds->readers == 0) {
2154 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2155 pthread_cond_broadcast(&ds->waiting_read);
2156 }
2157 else {
2158 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2159 }
2160}
2161
2162/*
2163 * Start a write - one more writer. Pager must be locked.
2164 */
2165void pager_port_start_write(ds)
2166 default_pager_t ds;
2167{
2168 ds->writers++;
2169}
2170
2171/*
2172 * Wait for writers. Unlocks and relocks pager if wait needed.
2173 */
2174void pager_port_wait_for_writers(ds)
2175 default_pager_t ds;
2176{
2177 while (ds->writers != 0) {
2178 default_pager_wait_write++;
2179 pthread_cond_wait(&ds->waiting_write, &ds->lock);
2180 }
2181}
2182
2183/*
2184 * Finish a write. Pager is unlocked and returns unlocked.
2185 */
2186void pager_port_finish_write(ds)
2187 default_pager_t ds;
2188{
2189 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2190 if (--ds->writers == 0) {
2191 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2192 pthread_cond_broadcast(&ds->waiting_write);
2193 }
2194 else {
2195 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2196 }
2197}
2198
2199/*
2200 * Wait for concurrent default_pager_objects.
2201 * Unlocks and relocks pager if wait needed.
2202 */
2203void pager_port_wait_for_refs(ds)
2204 default_pager_t ds;
2205{
2206 while (ds->name_refs == 0) {
2207 default_pager_wait_refs++;
2208 pthread_cond_wait(&ds->waiting_refs, &ds->lock);
2209 }
2210}
2211
2212/*
2213 * Finished creating name refs - wake up waiters.
2214 */
2215void pager_port_finish_refs(ds)
2216 default_pager_t ds;
2217{
2218 pthread_cond_broadcast(&ds->waiting_refs);
2219}
2220
2221#else /* PARALLEL */
2222
2223#define pager_port_lock(ds,seqno)
2224#define pager_port_unlock(ds)
2225#define pager_port_start_read(ds)
2226#define pager_port_wait_for_readers(ds)
2227#define pager_port_finish_read(ds)
2228#define pager_port_start_write(ds)
2229#define pager_port_wait_for_writers(ds)
2230#define pager_port_finish_write(ds)
2231#define pager_port_wait_for_refs(ds)
2232#define pager_port_finish_refs(ds)
2233
2234#endif /* PARALLEL */
2235
2236/*
2237 * Default pager.
2238 */
2239task_t default_pager_self; /* Our task port. */
2240
2241mach_port_t default_pager_default_port; /* Port for memory_object_create. */
2242
2243/* We catch exceptions on ourself & startup using this port. */
2244mach_port_t default_pager_exception_port;
2245
2246mach_port_t default_pager_internal_set; /* Port set for internal objects. */
2247mach_port_t default_pager_external_set; /* Port set for external objects. */
2248mach_port_t default_pager_default_set; /* Port set for "default" thread. */
2249
2250typedef struct default_pager_thread {
2251 pthread_t dpt_thread; /* Server thread. */
2252 vm_offset_t dpt_buffer; /* Read buffer. */
2253 boolean_t dpt_internal; /* Do we handle internal objects? */
2254} default_pager_thread_t;
2255
2256#if PARALLEL1
2257 /* determine number of threads at run time */
2258#define DEFAULT_PAGER_INTERNAL_COUNT(0) (0)
2259
2260#else /* PARALLEL */
2261#define DEFAULT_PAGER_INTERNAL_COUNT(0) (1)
2262#endif /* PARALLEL */
2263
2264/* Memory created by default_pager_object_create should mostly be resident. */
2265#define DEFAULT_PAGER_EXTERNAL_COUNT(1) (1)
2266
2267unsigned int default_pager_internal_count = DEFAULT_PAGER_INTERNAL_COUNT(0);
2268 /* Number of "internal" threads. */
2269unsigned int default_pager_external_count = DEFAULT_PAGER_EXTERNAL_COUNT(1);
2270 /* Number of "external" threads. */
2271
2272default_pager_t pager_port_alloc(size)
2273 vm_size_t size;
2274{
2275 default_pager_t ds;
2276 p_index_t part;
2277
2278 ds = (default_pager_t) kalloc(sizeof *ds);
2279 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2280 panic("%spager_port_alloc",my_name);
2281 bzero((char *) ds, sizeof *ds);
2282
2283 dstruct_lock_init(ds)pthread_mutex_init(&ds->lock, ((void*)0));
2284
2285 /*
2286 * Get a suitable partition. If none big enough
2287 * just pick one and overcommit. If no partitions
2288 * at all.. well just fake one so that we will
2289 * kill specific objects on pageouts rather than
2290 * panicing the system now.
2291 */
2292 part = choose_partition(size, P_INDEX_INVALID((p_index_t)-1));
2293 if (no_partition(part)((part) == ((p_index_t)-1))) {
2294 overcommitted(FALSE((boolean_t) 0), atop(size)((size)/vm_page_size));
2295 part = choose_partition(0,P_INDEX_INVALID((p_index_t)-1));
2296#if debug0
2297 if (no_partition(part)((part) == ((p_index_t)-1)))
2298 dprintf("%s No paging space at all !!\n", my_name);
2299#endif
2300 }
2301 pager_alloc(&ds->dpager, part, size);
2302
2303 return ds;
2304}
2305
2306mach_port_urefs_t default_pager_max_urefs = 10000;
2307
2308/*
2309 * Check user reference count on pager_request port.
2310 * Pager must be locked.
2311 * Unlocks and re-locks pager if needs to call kernel.
2312 */
2313void pager_port_check_request(ds, pager_request)
2314 default_pager_t ds;
2315 mach_port_t pager_request;
2316{
2317 mach_port_delta_t delta;
2318 kern_return_t kr;
2319
2320 assert(ds->pager_request == pager_request)((ds->pager_request == pager_request) ? (void) (0) : __assert_fail
("ds->pager_request == pager_request", "../../mach-defpager/default_pager.c"
, 2320, __PRETTY_FUNCTION__))
;
2321
2322 if (++ds->request_refs > default_pager_max_urefs) {
2323 delta = 1 - ds->request_refs;
2324 ds->request_refs = 1;
2325
2326 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2327
2328 /*
2329 * Deallocate excess user references.
2330 */
2331
2332 kr = mach_port_mod_refs(default_pager_self, pager_request,
2333 MACH_PORT_RIGHT_SEND((mach_port_right_t) 0), delta);
2334 if (kr != KERN_SUCCESS0)
2335 panic("%spager_port_check_request",my_name);
2336
2337 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2338 }
2339}
2340
2341void default_pager_add(ds, internal)
2342 default_pager_t ds;
2343 boolean_t internal;
2344{
2345 mach_port_t pager = ds->pager;
2346 mach_port_t pset;
2347 mach_port_mscount_t sync;
2348 mach_port_t previous;
2349 kern_return_t kr;
2350 static char here[] = "%sdefault_pager_add";
2351
2352 /*
2353 * The port currently has a make-send count of zero,
2354 * because either we just created the port or we just
2355 * received the port in a memory_object_create request.
2356 */
2357
2358 if (internal) {
2359 /* possibly generate an immediate no-senders notification */
2360 sync = 0;
2361 pset = default_pager_internal_set;
2362 ds->external = FALSE((boolean_t) 0);
2363 } else {
2364 /* delay notification till send right is created */
2365 sync = 1;
2366 pset = default_pager_external_set;
2367 ds->external = TRUE((boolean_t) 1);
2368 }
2369
2370 kr = mach_port_request_notification(default_pager_self, pager,
2371 MACH_NOTIFY_NO_SENDERS(0100 + 006), sync,
2372 pager, MACH_MSG_TYPE_MAKE_SEND_ONCE21,
2373 &previous);
2374 if ((kr != KERN_SUCCESS0) || (previous != MACH_PORT_NULL((mach_port_t) 0)))
2375 panic(here,my_name);
2376
2377 kr = mach_port_move_member(default_pager_self, pager, pset);
2378 if (kr != KERN_SUCCESS0)
2379 panic(here,my_name);
2380}
2381
2382/*
2383 * Routine: memory_object_create
2384 * Purpose:
2385 * Handle requests for memory objects from the
2386 * kernel.
2387 * Notes:
2388 * Because we only give out the default memory
2389 * manager port to the kernel, we don't have to
2390 * be so paranoid about the contents.
2391 */
2392kern_return_t
2393seqnos_memory_object_create(old_pager, seqno, new_pager, new_size,
2394 new_pager_request, new_pager_name, new_page_size)
2395 mach_port_t old_pager;
2396 mach_port_seqno_t seqno;
2397 mach_port_t new_pager;
2398 vm_size_t new_size;
2399 mach_port_t new_pager_request;
2400 mach_port_t new_pager_name;
2401 vm_size_t new_page_size;
2402{
2403 default_pager_t ds;
2404 kern_return_t kr;
2405
2406 assert(old_pager == default_pager_default_port)((old_pager == default_pager_default_port) ? (void) (0) : __assert_fail
("old_pager == default_pager_default_port", "../../mach-defpager/default_pager.c"
, 2406, __PRETTY_FUNCTION__))
;
2407 assert(MACH_PORT_VALID(new_pager_request))(((((new_pager_request) != ((mach_port_t) 0)) && ((new_pager_request
) != ((mach_port_t) ~0)))) ? (void) (0) : __assert_fail ("(((new_pager_request) != ((mach_port_t) 0)) && ((new_pager_request) != ((mach_port_t) ~0)))"
, "../../mach-defpager/default_pager.c", 2407, __PRETTY_FUNCTION__
))
;
2408 assert(MACH_PORT_VALID(new_pager_name))(((((new_pager_name) != ((mach_port_t) 0)) && ((new_pager_name
) != ((mach_port_t) ~0)))) ? (void) (0) : __assert_fail ("(((new_pager_name) != ((mach_port_t) 0)) && ((new_pager_name) != ((mach_port_t) ~0)))"
, "../../mach-defpager/default_pager.c", 2408, __PRETTY_FUNCTION__
))
;
2409 assert(new_page_size == vm_page_size)((new_page_size == vm_page_size) ? (void) (0) : __assert_fail
("new_page_size == vm_page_size", "../../mach-defpager/default_pager.c"
, 2409, __PRETTY_FUNCTION__))
;
2410
2411 ds = pager_port_alloc(new_size);
2412rename_it:
2413 kr = mach_port_rename( default_pager_self,
2414 new_pager, (mach_port_t)pnameof(ds)(((vm_offset_t)(ds))+1));
2415 if (kr != KERN_SUCCESS0) {
2416 default_pager_t ds1;
2417
2418 if (kr != KERN_NAME_EXISTS13)
2419 panic("%s m_o_create", my_name);
2420 ds1 = (default_pager_t) kalloc(sizeof *ds1);
2421 *ds1 = *ds;
2422 pthread_mutex_lock(&all_pagers.lock);
2423 queue_enter(&all_pagers.leak_queue, ds, default_pager_t, links){ queue_entry_t prev; prev = (&all_pagers.leak_queue)->
prev; if ((&all_pagers.leak_queue) == prev) { (&all_pagers
.leak_queue)->next = (queue_entry_t) (ds); } else { ((default_pager_t
)prev)->links.next = (queue_entry_t)(ds); } (ds)->links
.prev = prev; (ds)->links.next = &all_pagers.leak_queue
; (&all_pagers.leak_queue)->prev = (queue_entry_t) ds;
}
;
2424 pthread_mutex_unlock(&all_pagers.lock);
2425 ds = ds1;
2426 goto rename_it;
2427 }
2428
2429 new_pager = (mach_port_t) pnameof(ds)(((vm_offset_t)(ds))+1);
2430
2431 /*
2432 * Set up associations between these ports
2433 * and this default_pager structure
2434 */
2435
2436 ds->pager = new_pager;
2437 ds->pager_request = new_pager_request;
2438 ds->request_refs = 1;
2439 ds->pager_name = new_pager_name;
2440 ds->name_refs = 1;
2441
2442 /*
2443 * After this, other threads might receive requests
2444 * for this memory object or find it in the port list.
2445 */
2446
2447 pager_port_list_insert(new_pager, ds);
2448 default_pager_add(ds, TRUE((boolean_t) 1));
2449
2450 return(KERN_SUCCESS0);
2451}
2452
2453memory_object_copy_strategy_t default_pager_copy_strategy =
2454 MEMORY_OBJECT_COPY_DELAY2;
2455
2456kern_return_t
2457seqnos_memory_object_init(pager, seqno, pager_request, pager_name,
2458 pager_page_size)
2459 mach_port_t pager;
2460 mach_port_seqno_t seqno;
2461 mach_port_t pager_request;
2462 mach_port_t pager_name;
2463 vm_size_t pager_page_size;
2464{
2465 default_pager_t ds;
2466 kern_return_t kr;
2467 static char here[] = "%sinit";
2468
2469 assert(MACH_PORT_VALID(pager_request))(((((pager_request) != ((mach_port_t) 0)) && ((pager_request
) != ((mach_port_t) ~0)))) ? (void) (0) : __assert_fail ("(((pager_request) != ((mach_port_t) 0)) && ((pager_request) != ((mach_port_t) ~0)))"
, "../../mach-defpager/default_pager.c", 2469, __PRETTY_FUNCTION__
))
;
2470 assert(MACH_PORT_VALID(pager_name))(((((pager_name) != ((mach_port_t) 0)) && ((pager_name
) != ((mach_port_t) ~0)))) ? (void) (0) : __assert_fail ("(((pager_name) != ((mach_port_t) 0)) && ((pager_name) != ((mach_port_t) ~0)))"
, "../../mach-defpager/default_pager.c", 2470, __PRETTY_FUNCTION__
))
;
2471 assert(pager_page_size == vm_page_size)((pager_page_size == vm_page_size) ? (void) (0) : __assert_fail
("pager_page_size == vm_page_size", "../../mach-defpager/default_pager.c"
, 2471, __PRETTY_FUNCTION__))
;
2472
2473 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2474 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2475 panic(here, my_name);
2476 pager_port_lock(ds, seqno);
2477
2478 if (ds->pager_request != MACH_PORT_NULL((mach_port_t) 0))
2479 panic(here, my_name);
2480
2481 ds->pager_request = pager_request;
2482 ds->request_refs = 1;
2483 ds->pager_name = pager_name;
2484 ds->name_refs = 1;
2485
2486 /*
2487 * Even if the kernel immediately terminates the object,
2488 * the pager_request port won't be destroyed until
2489 * we process the terminate request, which won't happen
2490 * until we unlock the object.
2491 */
2492
2493 kr = memory_object_ready(pager_request,
2494 FALSE((boolean_t) 0), /* Do not cache */
2495 default_pager_copy_strategy);
2496 if (kr != KERN_SUCCESS0)
2497 panic(here, my_name);
2498
2499 pager_port_unlock(ds);
2500
2501 return(KERN_SUCCESS0);
2502}
2503
2504kern_return_t
2505seqnos_memory_object_terminate(pager, seqno, pager_request, pager_name)
2506 mach_port_t pager;
2507 mach_port_seqno_t seqno;
2508 mach_port_t pager_request;
2509 mach_port_t pager_name;
2510{
2511 default_pager_t ds;
2512 kern_return_t kr;
2513 static char here[] = "%sterminate";
2514
2515 /*
2516 * pager_request and pager_name are receive rights,
2517 * not send rights.
2518 */
2519
2520 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2521 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2522 panic(here, my_name);
2523ddprintf ("seqnos_memory_object_terminate <%p>: pager_port_lock: <%p>[s:%d,r:%d,w:%d,l:%d], %d\n",
2524 &kr, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held, seqno);
2525 pager_port_lock(ds, seqno);
2526
2527 /*
2528 * Wait for read and write requests to terminate.
2529 */
2530
2531 pager_port_wait_for_readers(ds);
2532 pager_port_wait_for_writers(ds);
2533
2534 /*
2535 * After memory_object_terminate both memory_object_init
2536 * and a no-senders notification are possible, so we need
2537 * to clean up the request and name ports but leave
2538 * the pager port.
2539 *
2540 * A concurrent default_pager_objects might be allocating
2541 * more references for the name port. In this case,
2542 * we must first wait for it to finish.
2543 */
2544
2545 pager_port_wait_for_refs(ds);
2546
2547 if (ds->external)
2548 pager_request = ds->pager_request;
2549 ds->pager_request = MACH_PORT_NULL((mach_port_t) 0);
2550 ds->request_refs = 0;
2551 assert(ds->pager_name == pager_name)((ds->pager_name == pager_name) ? (void) (0) : __assert_fail
("ds->pager_name == pager_name", "../../mach-defpager/default_pager.c"
, 2551, __PRETTY_FUNCTION__))
;
2552 ds->pager_name = MACH_PORT_NULL((mach_port_t) 0);
2553 ds->name_refs = 0;
2554ddprintf ("seqnos_memory_object_terminate <%p>: pager_port_unlock: <%p>[s:%d,r:%d,w:%d,l:%d]\n",
2555 &kr, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held);
2556 pager_port_unlock(ds);
2557
2558 /*
2559 * Now we destroy our port rights.
2560 */
2561
2562 mach_port_destroy(mach_task_self()((__mach_task_self_ + 0)), pager_request);
2563 mach_port_destroy(mach_task_self()((__mach_task_self_ + 0)), pager_name);
2564
2565 return (KERN_SUCCESS0);
2566}
2567
2568void default_pager_no_senders(pager, seqno, mscount)
2569 memory_object_t pager;
2570 mach_port_seqno_t seqno;
2571 mach_port_mscount_t mscount;
2572{
2573 default_pager_t ds;
2574 kern_return_t kr;
2575 static char here[] = "%sno_senders";
2576
2577 /*
2578 * Because we don't give out multiple send rights
2579 * for a memory object, there can't be a race
2580 * between getting a no-senders notification
2581 * and creating a new send right for the object.
2582 * Hence we don't keep track of mscount.
2583 */
2584
2585
2586 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2587 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2588 panic(here,my_name);
2589 pager_port_lock(ds, seqno);
2590
2591 /*
2592 * We shouldn't get a no-senders notification
2593 * when the kernel has the object cached.
2594 */
2595
2596 if (ds->pager_request != MACH_PORT_NULL((mach_port_t) 0))
2597 panic(here,my_name);
2598
2599 /*
2600 * Unlock the pager (though there should be no one
2601 * waiting for it).
2602 */
2603 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2604
2605 /*
2606 * Remove the memory object port association, and then
2607 * the destroy the port itself. We must remove the object
2608 * from the port list before deallocating the pager,
2609 * because of default_pager_objects.
2610 */
2611
2612 pager_port_list_delete(ds);
2613 pager_dealloc(&ds->dpager);
2614
2615 kr = mach_port_mod_refs(default_pager_self, pager,
2616 MACH_PORT_RIGHT_RECEIVE((mach_port_right_t) 1), -1);
2617 if (kr != KERN_SUCCESS0)
2618 panic(here,my_name);
2619
2620 /*
2621 * Do this *after* deallocating the port name
2622 */
2623 kfree((char *) ds, sizeof(*ds));
2624
2625 /*
2626 * Recover memory that we might have wasted because
2627 * of name conflicts
2628 */
2629 pthread_mutex_lock(&all_pagers.lock);
2630
2631 while (!queue_empty(&all_pagers.leak_queue)(((&all_pagers.leak_queue)) == (((&all_pagers.leak_queue
)->next)))
) {
2632
2633 ds = (default_pager_t) queue_first(&all_pagers.leak_queue)((&all_pagers.leak_queue)->next);
2634 queue_remove_first(&all_pagers.leak_queue, ds, default_pager_t, links){ queue_entry_t next; (ds) = (default_pager_t) ((&all_pagers
.leak_queue)->next); next = (ds)->links.next; if ((&
all_pagers.leak_queue) == next) (&all_pagers.leak_queue)->
prev = (&all_pagers.leak_queue); else ((default_pager_t)(
next))->links.prev = (&all_pagers.leak_queue); (&all_pagers
.leak_queue)->next = next; }
;
2635 kfree((char *) ds, sizeof(*ds));
2636 }
2637
2638 pthread_mutex_unlock(&all_pagers.lock);
2639}
2640
2641int default_pager_pagein_count = 0;
2642int default_pager_pageout_count = 0;
2643
2644static __thread default_pager_thread_t *dpt;
2645
2646kern_return_t
2647seqnos_memory_object_data_request(pager, seqno, reply_to, offset,
2648 length, protection_required)
2649 memory_object_t pager;
2650 mach_port_seqno_t seqno;
2651 mach_port_t reply_to;
2652 vm_offset_t offset;
2653 vm_size_t length;
2654 vm_prot_t protection_required;
2655{
2656 default_pager_t ds;
2657 vm_offset_t addr;
2658 unsigned int errors;
2659 kern_return_t rc;
2660 static char here[] = "%sdata_request";
2661
2662 if (length != vm_page_size)
2663 panic(here,my_name);
2664
2665 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2666 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2667 panic(here,my_name);
2668ddprintf ("seqnos_memory_object_data_request <%p>: pager_port_lock: <%p>[s:%d,r:%d,w:%d,l:%d], %d\n",
2669 &ds, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held, seqno);
2670 pager_port_lock(ds, seqno);
2671 pager_port_check_request(ds, reply_to);
2672 pager_port_wait_for_writers(ds);
2673 pager_port_start_read(ds);
2674
2675 /*
2676 * Get error count while pager locked.
2677 */
2678 errors = ds->errors;
2679
2680ddprintf ("seqnos_memory_object_data_request <%p>: pager_port_unlock: <%p>[s:%d,r:%d,w:%d,l:%d]\n",
2681 &ds, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held);
2682 pager_port_unlock(ds);
2683
2684 if (errors) {
2685 dprintf("%s %s\n", my_name,
2686 "dropping data_request because of previous paging errors");
2687 (void) memory_object_data_error(reply_to,
2688 offset, vm_page_size,
2689 KERN_FAILURE5);
2690 goto done;
2691 }
2692
2693 if (offset >= ds->dpager.limit)
2694 rc = PAGER_ERROR2;
2695 else
2696 rc = default_read(&ds->dpager, dpt->dpt_buffer,
2697 vm_page_size, offset,
2698 &addr, protection_required & VM_PROT_WRITE((vm_prot_t) 0x02),
2699 ds->external);
2700
2701 switch (rc) {
2702 case PAGER_SUCCESS0:
2703 if (addr != dpt->dpt_buffer) {
2704 /*
2705 * Deallocates data buffer
2706 */
2707 (void) memory_object_data_supply(
2708 reply_to, offset,
2709 addr, vm_page_size, TRUE((boolean_t) 1),
2710 VM_PROT_NONE((vm_prot_t) 0x00),
2711 FALSE((boolean_t) 0), MACH_PORT_NULL((mach_port_t) 0));
2712 } else {
2713 (void) memory_object_data_supply(
2714 reply_to, offset,
2715 addr, vm_page_size, FALSE((boolean_t) 0),
2716 VM_PROT_NONE((vm_prot_t) 0x00),
2717 FALSE((boolean_t) 0), MACH_PORT_NULL((mach_port_t) 0));
2718 }
2719 break;
2720
2721 case PAGER_ABSENT1:
2722 (void) memory_object_data_unavailable(
2723 reply_to,
2724 offset,
2725 vm_page_size);
2726 break;
2727
2728 case PAGER_ERROR2:
2729 (void) memory_object_data_error(
2730 reply_to,
2731 offset,
2732 vm_page_size,
2733 KERN_FAILURE5);
2734 break;
2735 }
2736
2737 default_pager_pagein_count++;
2738
2739 done:
2740 pager_port_finish_read(ds);
2741 return(KERN_SUCCESS0);
2742}
2743
2744/*
2745 * memory_object_data_initialize: check whether we already have each page, and
2746 * write it if we do not. The implementation is far from optimized, and
2747 * also assumes that the default_pager is single-threaded.
2748 */
2749kern_return_t
2750seqnos_memory_object_data_initialize(pager, seqno, pager_request,
2751 offset, addr, data_cnt)
2752 memory_object_t pager;
2753 mach_port_seqno_t seqno;
2754 mach_port_t pager_request;
2755 register
2756 vm_offset_t offset;
2757 register
2758 pointer_t addr;
2759 vm_size_t data_cnt;
2760{
2761 vm_offset_t amount_sent;
2762 default_pager_t ds;
2763 static char here[] = "%sdata_initialize";
2764
2765#ifdef lint
2766 pager_request++;
2767#endif /* lint */
2768
2769 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2770 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2771 panic(here,my_name);
2772ddprintf ("seqnos_memory_object_data_initialize <%p>: pager_port_lock: <%p>[s:%d,r:%d,w:%d,l:%d], %d\n",
2773 &ds, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held, seqno);
2774 pager_port_lock(ds, seqno);
2775 pager_port_check_request(ds, pager_request);
2776 pager_port_start_write(ds);
2777ddprintf ("seqnos_memory_object_data_initialize <%p>: pager_port_unlock: <%p>[s:%d,r:%d,w:%d,l:%d]\n",
2778 &ds, ds, ds->seqno, ds->readers, ds->writers, ds->lock.held);
2779 pager_port_unlock(ds);
2780
2781 for (amount_sent = 0;
2782 amount_sent < data_cnt;
2783 amount_sent += vm_page_size) {
2784
2785 if (!default_has_page(&ds->dpager, offset + amount_sent)) {
2786 if (default_write(&ds->dpager,
2787 addr + amount_sent,
2788 vm_page_size,
2789 offset + amount_sent)
2790 != PAGER_SUCCESS0) {
2791 dprintf("%s%s write error\n", my_name, here);
2792 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2793 ds->errors++;
2794 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2795 }
2796 }
2797 }
2798
2799 pager_port_finish_write(ds);
2800 if (vm_deallocate(default_pager_self, addr, data_cnt) != KERN_SUCCESS0)
2801 panic(here,my_name);
2802
2803 return(KERN_SUCCESS0);
2804}
2805
2806/*
2807 * memory_object_data_write: split up the stuff coming in from
2808 * a memory_object_data_write call
2809 * into individual pages and pass them off to default_write.
2810 */
2811kern_return_t
2812seqnos_memory_object_data_write(pager, seqno, pager_request,
2813 offset, addr, data_cnt)
2814 memory_object_t pager;
2815 mach_port_seqno_t seqno;
2816 mach_port_t pager_request;
2817 register
2818 vm_offset_t offset;
2819 register
2820 pointer_t addr;
2821 vm_size_t data_cnt;
2822{
2823 register
2824 vm_size_t amount_sent;
2825 default_pager_t ds;
2826 static char here[] = "%sdata_write";
2827 int err;
2828
2829#ifdef lint
2830 pager_request++;
2831#endif /* lint */
2832
2833 if ((data_cnt % vm_page_size) != 0)
2834 panic(here,my_name);
2835
2836 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
2837 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
2838 panic(here,my_name);
2839
2840 pager_port_lock(ds, seqno);
2841 pager_port_start_write(ds);
2842
2843 vm_size_t limit = ds->dpager.byte_limit;
2844 pager_port_unlock(ds);
2845 if ((limit != round_page(limit)((((vm_offset_t) (limit) + __vm_page_size - 1) / __vm_page_size
) * __vm_page_size)
) && (trunc_page(limit)((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size) == offset)) {
2846 assert(trunc_page(limit) == offset)((((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size
) == offset) ? (void) (0) : __assert_fail ("((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size) == offset"
, "../../mach-defpager/default_pager.c", 2846, __PRETTY_FUNCTION__
))
;
2847 assert(data_cnt == vm_page_size)((data_cnt == vm_page_size) ? (void) (0) : __assert_fail ("data_cnt == vm_page_size"
, "../../mach-defpager/default_pager.c", 2847, __PRETTY_FUNCTION__
))
;
2848
2849 vm_offset_t tail = addr + limit - trunc_page(limit)((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size);
2850 vm_size_t tail_size = round_page(limit)((((vm_offset_t) (limit) + __vm_page_size - 1) / __vm_page_size
) * __vm_page_size)
- limit;
2851 memset((void *) tail, 0, tail_size);
2852
2853 unsigned *arr = (unsigned *)addr;
2854 memory_object_data_supply(pager_request, trunc_page(limit)((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size), addr,
2855 vm_page_size, TRUE((boolean_t) 1), VM_PROT_NONE((vm_prot_t) 0x00),
2856 TRUE((boolean_t) 1), MACH_PORT_NULL((mach_port_t) 0));
2857 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2858 ds->dpager.byte_limit = round_page(limit)((((vm_offset_t) (limit) + __vm_page_size - 1) / __vm_page_size
) * __vm_page_size)
;
2859 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2860 pager_port_finish_write(ds);
2861
2862 return(KERN_SUCCESS0);
2863 }
2864
2865 for (amount_sent = 0;
2866 amount_sent < data_cnt;
2867 amount_sent += vm_page_size) {
2868
2869 int result;
2870
2871 result = default_write(&ds->dpager,
2872 addr + amount_sent,
2873 vm_page_size,
2874 offset + amount_sent);
2875 if (result != KERN_SUCCESS0) {
2876 dstruct_lock(ds)pthread_mutex_lock(&ds->lock);
2877 ds->errors++;
2878 dstruct_unlock(ds)pthread_mutex_unlock(&ds->lock);
2879 }
2880 default_pager_pageout_count++;
2881 }
2882
2883 pager_port_finish_write(ds);
2884 err = vm_deallocate(default_pager_self, addr, data_cnt);
2885 if (err != KERN_SUCCESS0)
2886 {
2887 panic(here,my_name);
2888 }
2889
2890 return(KERN_SUCCESS0);
2891}
2892
2893/*ARGSUSED*/
2894kern_return_t
2895seqnos_memory_object_copy(old_memory_object, seqno, old_memory_control,
2896 offset, length, new_memory_object)
2897 memory_object_t old_memory_object;
2898 mach_port_seqno_t seqno;
2899 memory_object_control_t
2900 old_memory_control;
2901 vm_offset_t offset;
2902 vm_size_t length;
2903 memory_object_t new_memory_object;
2904{
2905 panic("%scopy", my_name);
2906 return KERN_FAILURE5;
2907}
2908
2909/* We get this when our memory_object_lock_request has completed
2910 after we truncated an object. */
2911kern_return_t
2912seqnos_memory_object_lock_completed (memory_object_t pager,
2913 mach_port_seqno_t seqno,
2914 mach_port_t pager_request,
2915 vm_offset_t offset,
2916 vm_size_t length)
2917{
2918 panic("%slock_completed",my_name);
2919 return KERN_FAILURE5;
2920}
2921
2922kern_return_t
2923seqnos_memory_object_data_unlock(pager, seqno, pager_request,
2924 offset, addr, data_cnt)
2925 memory_object_t pager;
2926 mach_port_seqno_t seqno;
2927 mach_port_t pager_request;
2928 vm_offset_t offset;
2929 pointer_t addr;
2930 vm_size_t data_cnt;
2931{
2932 panic("%sdata_unlock",my_name);
2933 return(KERN_FAILURE5);
2934}
2935
2936kern_return_t
2937seqnos_memory_object_supply_completed(pager, seqno, pager_request,
2938 offset, length,
2939 result, error_offset)
2940 memory_object_t pager;
2941 mach_port_seqno_t seqno;
2942 mach_port_t pager_request;
2943 vm_offset_t offset;
2944 vm_size_t length;
2945 kern_return_t result;
2946 vm_offset_t error_offset;
2947{
2948 panic("%ssupply_completed",my_name);
2949 return(KERN_FAILURE5);
2950}
2951
2952/*
2953 * memory_object_data_return: split up the stuff coming in from
2954 * a memory_object_data_write call
2955 * into individual pages and pass them off to default_write.
2956 */
2957kern_return_t
2958seqnos_memory_object_data_return(pager, seqno, pager_request,
2959 offset, addr, data_cnt,
2960 dirty, kernel_copy)
2961 memory_object_t pager;
2962 mach_port_seqno_t seqno;
2963 mach_port_t pager_request;
2964 vm_offset_t offset;
2965 pointer_t addr;
2966 vm_size_t data_cnt;
2967 boolean_t dirty;
2968 boolean_t kernel_copy;
2969{
2970
2971 return seqnos_memory_object_data_write (pager, seqno, pager_request,
2972 offset, addr, data_cnt);
2973}
2974
2975kern_return_t
2976seqnos_memory_object_change_completed(pager, seqno, may_cache, copy_strategy)
2977 memory_object_t pager;
2978 mach_port_seqno_t seqno;
2979 boolean_t may_cache;
2980 memory_object_copy_strategy_t copy_strategy;
2981{
2982 panic("%schange_completed",my_name);
2983 return(KERN_FAILURE5);
2984}
2985
2986
2987boolean_t default_pager_notify_server(in, out)
2988 mach_msg_header_t *in, *out;
2989{
2990 mach_no_senders_notification_t *n =
2991 (mach_no_senders_notification_t *) in;
2992
2993 /*
2994 * The only send-once rights we create are for
2995 * receiving no-more-senders notifications.
2996 * Hence, if we receive a message directed to
2997 * a send-once right, we can assume it is
2998 * a genuine no-senders notification from the kernel.
2999 */
3000
3001 if ((n->not_header.msgh_bits !=
3002 MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE)((0) | ((18) << 8))) ||
3003 (n->not_header.msgh_id != MACH_NOTIFY_NO_SENDERS(0100 + 006)))
3004 return FALSE((boolean_t) 0);
3005
3006 assert(n->not_header.msgh_size == sizeof *n)((n->not_header.msgh_size == sizeof *n) ? (void) (0) : __assert_fail
("n->not_header.msgh_size == sizeof *n", "../../mach-defpager/default_pager.c"
, 3006, __PRETTY_FUNCTION__))
;
3007 assert(n->not_header.msgh_remote_port == MACH_PORT_NULL)((n->not_header.msgh_remote_port == ((mach_port_t) 0)) ? (
void) (0) : __assert_fail ("n->not_header.msgh_remote_port == ((mach_port_t) 0)"
, "../../mach-defpager/default_pager.c", 3007, __PRETTY_FUNCTION__
))
;
3008
3009 assert(n->not_type.msgt_name == MACH_MSG_TYPE_INTEGER_32)((n->not_type.msgt_name == 2) ? (void) (0) : __assert_fail
("n->not_type.msgt_name == 2", "../../mach-defpager/default_pager.c"
, 3009, __PRETTY_FUNCTION__))
;
3010 assert(n->not_type.msgt_size == 32)((n->not_type.msgt_size == 32) ? (void) (0) : __assert_fail
("n->not_type.msgt_size == 32", "../../mach-defpager/default_pager.c"
, 3010, __PRETTY_FUNCTION__))
;
3011 assert(n->not_type.msgt_number == 1)((n->not_type.msgt_number == 1) ? (void) (0) : __assert_fail
("n->not_type.msgt_number == 1", "../../mach-defpager/default_pager.c"
, 3011, __PRETTY_FUNCTION__))
;
3012 assert(n->not_type.msgt_inline)((n->not_type.msgt_inline) ? (void) (0) : __assert_fail ("n->not_type.msgt_inline"
, "../../mach-defpager/default_pager.c", 3012, __PRETTY_FUNCTION__
))
;
3013 assert(! n->not_type.msgt_longform)((! n->not_type.msgt_longform) ? (void) (0) : __assert_fail
("! n->not_type.msgt_longform", "../../mach-defpager/default_pager.c"
, 3013, __PRETTY_FUNCTION__))
;
3014
3015 default_pager_no_senders(n->not_header.msgh_local_port,
3016 n->not_header.msgh_seqno, n->not_count);
3017
3018 out->msgh_remote_port = MACH_PORT_NULL((mach_port_t) 0);
3019 return TRUE((boolean_t) 1);
3020}
3021
3022extern boolean_t seqnos_memory_object_server();
3023extern boolean_t seqnos_memory_object_default_server();
3024extern boolean_t default_pager_server();
3025extern boolean_t exc_server();
3026extern boolean_t bootstrap_server();
3027extern void bootstrap_compat();
3028
3029mach_msg_size_t default_pager_msg_size_object = 128;
3030
3031boolean_t
3032default_pager_demux_object(in, out)
3033 mach_msg_header_t *in;
3034 mach_msg_header_t *out;
3035{
3036 /*
3037 * We receive memory_object_data_initialize messages in
3038 * the memory_object_default interface.
3039 */
3040
3041int rval;
3042ddprintf ("DPAGER DEMUX OBJECT <%p>: %d\n", in, in->msgh_id);
3043rval =
3044 (seqnos_memory_object_server(in, out) ||
3045 seqnos_memory_object_default_server(in, out) ||
3046 default_pager_notify_server(in, out) ||
3047 default_pager_server(in, out));
3048ddprintf ("DPAGER DEMUX OBJECT DONE <%p>: %d\n", in, in->msgh_id);
3049return rval;
3050}
3051
3052mach_msg_size_t default_pager_msg_size_default = 8 * 1024;
3053
3054boolean_t
3055default_pager_demux_default(in, out)
3056 mach_msg_header_t *in;
3057 mach_msg_header_t *out;
3058{
3059 if (in->msgh_local_port == default_pager_default_port) {
3060 /*
3061 * We receive memory_object_create messages in
3062 * the memory_object_default interface.
3063 */
3064
3065int rval;
3066ddprintf ("DPAGER DEMUX DEFAULT <%p>: %d\n", in, in->msgh_id);
3067rval =
3068 (seqnos_memory_object_default_server(in, out) ||
3069 default_pager_server(in, out));
3070ddprintf ("DPAGER DEMUX DEFAULT DONE <%p>: %d\n", in, in->msgh_id);
3071return rval;
3072 } else if (in->msgh_local_port == default_pager_exception_port) {
3073 /*
3074 * We receive exception messages for
3075 * ourself and the startup task.
3076 */
3077
3078 return exc_server(in, out);
3079 } else {
3080 panic(my_name);
3081 return FALSE((boolean_t) 0);
3082 }
3083}
3084
3085/*
3086 * We use multiple threads, for two reasons.
3087 *
3088 * First, memory objects created by default_pager_object_create
3089 * are "external", instead of "internal". This means the kernel
3090 * sends data (memory_object_data_write) to the object pageable.
3091 * To prevent deadlocks, the external and internal objects must
3092 * be managed by different threads.
3093 *
3094 * Second, the default pager uses synchronous IO operations.
3095 * Spreading requests across multiple threads should
3096 * recover some of the performance loss from synchronous IO.
3097 *
3098 * We have 3+ threads.
3099 * One receives memory_object_create and
3100 * default_pager_object_create requests.
3101 * One or more manage internal objects.
3102 * One or more manage external objects.
3103 */
3104
3105void
3106default_pager_thread_privileges()
3107{
3108 /*
3109 * Set thread privileges.
3110 */
3111 wire_thread(); /* grab a kernel stack and memory allocation
3112 privileges */
3113}
3114
3115void *
3116default_pager_default_thread(void *arg)
3117{
3118 kern_return_t kr;
3119 default_pager_thread_privileges ();
3120 for (;;) {
3121 kr = mach_msg_server(default_pager_demux_default,
3122 default_pager_msg_size_default,
3123 default_pager_default_set);
3124 panic(my_name, kr);
3125 }
3126}
3127
3128
3129
3130void *
3131default_pager_thread(void *arg)
3132{
3133 mach_port_t pset;
3134 kern_return_t kr;
3135
3136 dpt = (default_pager_thread_t *) arg;
3137
3138 /*
3139 * Threads handling external objects cannot have
3140 * privileges. Otherwise a burst of data-requests for an
3141 * external object could empty the free-page queue,
3142 * because the fault code only reserves real pages for
3143 * requests sent to internal objects.
3144 */
3145
3146 if (dpt->dpt_internal) {
3147 default_pager_thread_privileges();
3148 pset = default_pager_internal_set;
3149 } else {
3150 pset = default_pager_external_set;
3151 }
3152
3153 for (;;) {
3154 kr = mach_msg_server(default_pager_demux_object,
3155 default_pager_msg_size_object,
3156 pset);
3157 panic(my_name, kr);
3158 }
3159}
3160
3161void
3162start_default_pager_thread(internal)
3163 boolean_t internal;
3164{
3165 default_pager_thread_t *ndpt;
3166 kern_return_t kr;
3167 error_t err;
3168
3169 ndpt = (default_pager_thread_t *) kalloc(sizeof *ndpt);
3170 if (ndpt == 0)
3171 panic(my_name);
3172
3173 ndpt->dpt_internal = internal;
3174
3175 kr = vm_allocate(default_pager_self, &ndpt->dpt_buffer,
3176 vm_page_size, TRUE((boolean_t) 1));
3177 if (kr != KERN_SUCCESS0)
3178 panic(my_name);
3179 wire_memory(ndpt->dpt_buffer, vm_page_size,
3180 VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_WRITE((vm_prot_t) 0x02));
3181
3182 err = pthread_create(&ndpt->dpt_thread, NULL((void*)0), default_pager_thread,
3183 ndpt);
3184 if (!err)
3185 pthread_detach (ndpt->dpt_thread);
3186 else {
3187 errno(*__errno_location ()) = err;
3188 perror ("pthread_create");
3189 }
3190}
3191
3192void
3193default_pager_initialize(host_port)
3194 mach_port_t host_port;
3195{
3196 memory_object_t DMM;
3197 kern_return_t kr;
3198
3199 /*
3200 * This task will become the default pager.
3201 */
3202 default_pager_self = mach_task_self()((__mach_task_self_ + 0));
3203
3204 /*
3205 * Initialize the "default pager" port.
3206 */
3207 kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_RECEIVE((mach_port_right_t) 1),
3208 &default_pager_default_port);
3209 if (kr != KERN_SUCCESS0)
3210 panic(my_name);
3211
3212 DMM = default_pager_default_port;
3213 kr = vm_set_default_memory_manager(host_port, &DMM);
3214 if ((kr != KERN_SUCCESS0) || MACH_PORT_VALID(DMM)(((DMM) != ((mach_port_t) 0)) && ((DMM) != ((mach_port_t
) ~0)))
)
3215 panic(my_name);
3216
3217 /*
3218 * Initialize the exception port.
3219 */
3220 kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_RECEIVE((mach_port_right_t) 1),
3221 &default_pager_exception_port);
3222 if (kr != KERN_SUCCESS0)
3223 panic(my_name);
3224
3225 /*
3226 * Arrange for wiring privileges.
3227 */
3228 wire_setup(host_port);
3229
3230 /*
3231 * Find out how many CPUs we have, to determine the number
3232 * of threads to create.
3233 */
3234 if (default_pager_internal_count == 0) {
3235 host_basic_info_data_t h_info;
3236 natural_t h_info_count;
3237
3238 h_info_count = HOST_BASIC_INFO_COUNT(sizeof(host_basic_info_data_t)/sizeof(integer_t));
3239 (void) host_info(host_port, HOST_BASIC_INFO1,
3240 (host_info_t)&h_info, &h_info_count);
3241
3242 /*
3243 * Random computation to get more parallelism on
3244 * multiprocessors.
3245 */
3246 default_pager_internal_count =
3247 (h_info.avail_cpus > 32 ? 32 : h_info.avail_cpus) / 4 + 3;
3248 }
3249}
3250
3251/*
3252 * Initialize and Run the default pager
3253 */
3254void
3255default_pager()
3256{
3257 kern_return_t kr;
3258 int i;
3259
3260 default_pager_thread_privileges();
3261
3262 /*
3263 * Wire down code, data, stack
3264 */
3265 wire_all_memory();
3266
3267
3268 /*
3269 * Initialize the list of all pagers.
3270 */
3271 pager_port_list_init(){ pthread_mutex_init(&all_pagers.lock, ((void*)0)); ((&
all_pagers.queue)->next = (&all_pagers.queue)->prev
= &all_pagers.queue); ((&all_pagers.leak_queue)->
next = (&all_pagers.leak_queue)->prev = &all_pagers
.leak_queue); all_pagers.count = 0; }
;
3272
3273 kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET((mach_port_right_t) 3),
3274 &default_pager_internal_set);
3275 if (kr != KERN_SUCCESS0)
3276 panic(my_name);
3277
3278 kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET((mach_port_right_t) 3),
3279 &default_pager_external_set);
3280 if (kr != KERN_SUCCESS0)
3281 panic(my_name);
3282
3283 kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET((mach_port_right_t) 3),
3284 &default_pager_default_set);
3285 if (kr != KERN_SUCCESS0)
3286 panic(my_name);
3287
3288 kr = mach_port_move_member(default_pager_self,
3289 default_pager_default_port,
3290 default_pager_default_set);
3291 if (kr != KERN_SUCCESS0)
3292 panic(my_name);
3293
3294 kr = mach_port_move_member(default_pager_self,
3295 default_pager_exception_port,
3296 default_pager_default_set);
3297 if (kr != KERN_SUCCESS0)
3298 panic(my_name);
3299
3300 /*
3301 * Now we create the threads that will actually
3302 * manage objects.
3303 */
3304
3305 for (i = 0; i < default_pager_internal_count; i++)
3306 start_default_pager_thread(TRUE((boolean_t) 1));
3307
3308 for (i = 0; i < default_pager_external_count; i++)
3309 start_default_pager_thread(FALSE((boolean_t) 0));
3310
3311 default_pager_default_thread(0); /* Become the default_pager server */
3312#if 0
3313 cthread_fork (default_pager_default_thread, 0);
3314 /* cthread_exit (cthread_self ()); */
3315 thread_suspend (mach_thread_self ());
3316#endif
3317}
3318
3319/*
3320 * Create an external object.
3321 */
3322kern_return_t
3323S_default_pager_object_create (mach_port_t pager,
3324 mach_port_t *mem_obj,
3325 vm_size_t size)
3326{
3327 default_pager_t ds;
3328 mach_port_t port;
3329 kern_return_t result;
3330
3331 if (pager != default_pager_default_port)
3332 return KERN_INVALID_ARGUMENT4;
3333
3334 ds = pager_port_alloc(size);
3335rename_it:
3336 port = (mach_port_t) pnameof(ds)(((vm_offset_t)(ds))+1);
3337 result = mach_port_allocate_name(default_pager_self,
3338 MACH_PORT_RIGHT_RECEIVE((mach_port_right_t) 1), port);
3339 if (result != KERN_SUCCESS0) {
3340 default_pager_t ds1;
3341
3342 if (result != KERN_NAME_EXISTS13) return (result);
3343
3344 ds1 = (default_pager_t) kalloc(sizeof *ds1);
3345 *ds1 = *ds;
3346 pthread_mutex_lock(&all_pagers.lock);
3347 queue_enter(&all_pagers.leak_queue, ds, default_pager_t, links){ queue_entry_t prev; prev = (&all_pagers.leak_queue)->
prev; if ((&all_pagers.leak_queue) == prev) { (&all_pagers
.leak_queue)->next = (queue_entry_t) (ds); } else { ((default_pager_t
)prev)->links.next = (queue_entry_t)(ds); } (ds)->links
.prev = prev; (ds)->links.next = &all_pagers.leak_queue
; (&all_pagers.leak_queue)->prev = (queue_entry_t) ds;
}
;
3348 pthread_mutex_unlock(&all_pagers.lock);
3349 ds = ds1;
3350 goto rename_it;
3351 }
3352
3353 /*
3354 * Set up associations between these ports
3355 * and this default_pager structure
3356 */
3357
3358 ds->pager = port;
3359 ds->dpager.limit = size;
3360 pager_port_list_insert(port, ds);
3361 default_pager_add(ds, FALSE((boolean_t) 0));
3362
3363 *mem_obj = port;
3364 return (KERN_SUCCESS0);
3365}
3366
3367kern_return_t
3368S_default_pager_info (mach_port_t pager,
3369 default_pager_info_t *infop)
3370{
3371 vm_size_t total, free;
3372
3373 if (pager != default_pager_default_port)
3374 return KERN_INVALID_ARGUMENT4;
3375
3376 pthread_mutex_lock(&all_partitions.lock);
3377 paging_space_info(&total, &free);
3378 pthread_mutex_unlock(&all_partitions.lock);
3379
3380 infop->dpi_total_space = ptoa(total)((total)*vm_page_size);
3381 infop->dpi_free_space = ptoa(free)((free)*vm_page_size);
3382 infop->dpi_page_size = vm_page_size;
3383 return KERN_SUCCESS0;
3384}
3385
3386kern_return_t
3387S_default_pager_objects (mach_port_t pager,
3388 default_pager_object_array_t *objectsp,
3389 natural_t *ocountp,
3390 mach_port_array_t *portsp,
3391 natural_t *pcountp)
3392{
3393 vm_offset_t oaddr; /* memory for objects */
3394 vm_size_t osize; /* current size */
3395 default_pager_object_t *objects;
3396 natural_t opotential;
3397
3398 vm_offset_t paddr; /* memory for ports */
3399 vm_size_t psize; /* current size */
3400 mach_port_t *ports;
3401 natural_t ppotential;
3402
3403 unsigned int actual;
3404 unsigned int num_pagers;
3405 kern_return_t kr;
3406 default_pager_t entry;
3407
3408 if (pager != default_pager_default_port)
3409 return KERN_INVALID_ARGUMENT4;
3410
3411 /* start with the inline memory */
3412
3413 num_pagers = 0;
3414
3415 objects = *objectsp;
3416 opotential = *ocountp;
3417
3418 ports = *portsp;
3419 ppotential = *pcountp;
3420
3421 pthread_mutex_lock(&all_pagers.lock);
3422 /*
3423 * We will send no more than this many
3424 */
3425 actual = all_pagers.count;
3426 pthread_mutex_unlock(&all_pagers.lock);
3427
3428 if (opotential < actual) {
3429 vm_offset_t newaddr;
3430 vm_size_t newsize;
3431
3432 newsize = 2 * round_page(actual * sizeof *objects)((((vm_offset_t) (actual * sizeof *objects) + __vm_page_size -
1) / __vm_page_size) * __vm_page_size)
;
3433
3434 kr = vm_allocate(default_pager_self, &newaddr, newsize, TRUE((boolean_t) 1));
3435 if (kr != KERN_SUCCESS0)
3436 goto nomemory;
3437
3438 oaddr = newaddr;
3439 osize = newsize;
3440 opotential = osize/sizeof *objects;
3441 objects = (default_pager_object_t *) oaddr;
3442 }
3443
3444 if (ppotential < actual) {
3445 vm_offset_t newaddr;
3446 vm_size_t newsize;
3447
3448 newsize = 2 * round_page(actual * sizeof *ports)((((vm_offset_t) (actual * sizeof *ports) + __vm_page_size - 1
) / __vm_page_size) * __vm_page_size)
;
3449
3450 kr = vm_allocate(default_pager_self, &newaddr, newsize, TRUE((boolean_t) 1));
3451 if (kr != KERN_SUCCESS0)
3452 goto nomemory;
3453
3454 paddr = newaddr;
3455 psize = newsize;
3456 ppotential = psize/sizeof *ports;
3457 ports = (mach_port_t *) paddr;
3458 }
3459
3460 /*
3461 * Now scan the list.
3462 */
3463
3464 pthread_mutex_lock(&all_pagers.lock);
3465
3466 num_pagers = 0;
3467 queue_iterate(&all_pagers.queue, entry, default_pager_t, links)for ((entry) = (default_pager_t) ((&all_pagers.queue)->
next); !(((&all_pagers.queue)) == ((queue_entry_t)(entry)
)); (entry) = (default_pager_t) ((&(entry)->links)->
next))
{
3468
3469 mach_port_t port;
3470 vm_size_t size;
3471
3472 if ((num_pagers >= opotential) ||
3473 (num_pagers >= ppotential)) {
3474 /*
3475 * This should be rare. In any case,
3476 * we will only miss recent objects,
3477 * because they are added at the end.
3478 */
3479 break;
3480 }
3481
3482 /*
3483 * Avoid interfering with normal operations
3484 */
3485 if (pthread_mutex_trylock(&entry->dpager.lock))
3486 goto not_this_one;
3487 size = pager_allocated(&entry->dpager);
3488 pthread_mutex_unlock(&entry->dpager.lock);
3489
3490 dstruct_lock(entry)pthread_mutex_lock(&entry->lock);
3491
3492 port = entry->pager_name;
3493 if (port == MACH_PORT_NULL((mach_port_t) 0)) {
3494 /*
3495 * The object is waiting for no-senders
3496 * or memory_object_init.
3497 */
3498 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3499 goto not_this_one;
3500 }
3501
3502 /*
3503 * We need a reference for the reply message.
3504 * While we are unlocked, the bucket queue
3505 * can change and the object might be terminated.
3506 * memory_object_terminate will wait for us,
3507 * preventing deallocation of the entry.
3508 */
3509
3510 if (--entry->name_refs == 0) {
3511 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3512
3513 /* keep the list locked, wont take long */
3514
3515 kr = mach_port_mod_refs(default_pager_self,
3516 port, MACH_PORT_RIGHT_SEND((mach_port_right_t) 0),
3517 default_pager_max_urefs);
3518 if (kr != KERN_SUCCESS0)
3519 panic("%sdefault_pager_objects",my_name);
3520
3521 dstruct_lock(entry)pthread_mutex_lock(&entry->lock);
3522
3523 entry->name_refs += default_pager_max_urefs;
3524 pager_port_finish_refs(entry);
3525 }
3526 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3527
3528 /* the arrays are wired, so no deadlock worries */
3529
3530 objects[num_pagers].dpo_object = (vm_offset_t) entry;
3531 objects[num_pagers].dpo_size = size;
3532 ports [num_pagers++] = port;
3533 continue;
3534not_this_one:
3535 /*
3536 * Do not return garbage
3537 */
3538 objects[num_pagers].dpo_object = (vm_offset_t) 0;
3539 objects[num_pagers].dpo_size = 0;
3540 ports [num_pagers++] = MACH_PORT_NULL((mach_port_t) 0);
3541
3542 }
3543
3544 pthread_mutex_unlock(&all_pagers.lock);
3545
3546 /*
3547 * Deallocate and clear unused memory.
3548 * (Returned memory will automagically become pageable.)
3549 */
3550
3551 if (objects == *objectsp) {
3552 /*
3553 * Our returned information fit inline.
3554 * Nothing to deallocate.
3555 */
3556
3557 *ocountp = num_pagers;
3558 } else if (actual == 0) {
3559 (void) vm_deallocate(default_pager_self, oaddr, osize);
3560
3561 /* return zero items inline */
3562 *ocountp = 0;
3563 } else {
3564 vm_offset_t used;
3565
3566 used = round_page(actual * sizeof *objects)((((vm_offset_t) (actual * sizeof *objects) + __vm_page_size -
1) / __vm_page_size) * __vm_page_size)
;
3567
3568 if (used != osize)
3569 (void) vm_deallocate(default_pager_self,
3570 oaddr + used, osize - used);
3571
3572 *objectsp = objects;
3573 *ocountp = num_pagers;
3574 }
3575
3576 if (ports == *portsp) {
3577 /*
3578 * Our returned information fit inline.
3579 * Nothing to deallocate.
3580 */
3581
3582 *pcountp = num_pagers;
3583 } else if (actual == 0) {
3584 (void) vm_deallocate(default_pager_self, paddr, psize);
3585
3586 /* return zero items inline */
3587 *pcountp = 0;
3588 } else {
3589 vm_offset_t used;
3590
3591 used = round_page(actual * sizeof *ports)((((vm_offset_t) (actual * sizeof *ports) + __vm_page_size - 1
) / __vm_page_size) * __vm_page_size)
;
3592
3593 if (used != psize)
3594 (void) vm_deallocate(default_pager_self,
3595 paddr + used, psize - used);
3596
3597 *portsp = ports;
3598 *pcountp = num_pagers;
3599 }
3600
3601 return KERN_SUCCESS0;
3602
3603 nomemory:
3604
3605 {
3606 int i;
3607 for (i = 0; i < num_pagers; i++)
3608 (void) mach_port_deallocate(default_pager_self, ports[i]);
3609 }
3610
3611 if (objects != *objectsp)
3612 (void) vm_deallocate(default_pager_self, oaddr, osize);
3613
3614 if (ports != *portsp)
3615 (void) vm_deallocate(default_pager_self, paddr, psize);
3616
3617 return KERN_RESOURCE_SHORTAGE6;
3618}
3619
3620
3621kern_return_t
3622S_default_pager_object_pages (mach_port_t pager,
3623 mach_port_t object,
3624 default_pager_page_array_t *pagesp,
3625 natural_t *countp)
3626{
3627 vm_offset_t addr; /* memory for page offsets */
3628 vm_size_t size; /* current memory size */
3629 default_pager_page_t *pages;
3630 natural_t potential, actual;
3631 kern_return_t kr;
3632
3633 if (pager != default_pager_default_port)
3634 return KERN_INVALID_ARGUMENT4;
3635
3636 /* we start with the inline space */
3637
3638 pages = *pagesp;
3639 potential = *countp;
3640
3641 for (;;) {
3642 default_pager_t entry;
3643
3644 pthread_mutex_lock(&all_pagers.lock);
3645 queue_iterate(&all_pagers.queue, entry, default_pager_t, links)for ((entry) = (default_pager_t) ((&all_pagers.queue)->
next); !(((&all_pagers.queue)) == ((queue_entry_t)(entry)
)); (entry) = (default_pager_t) ((&(entry)->links)->
next))
{
3646 dstruct_lock(entry)pthread_mutex_lock(&entry->lock);
3647 if (entry->pager_name == object) {
3648 pthread_mutex_unlock(&all_pagers.lock);
3649 goto found_object;
3650 }
3651 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3652 }
3653 pthread_mutex_unlock(&all_pagers.lock);
3654
3655 /* did not find the object */
3656
3657 if (pages != *pagesp)
3658 (void) vm_deallocate(default_pager_self, addr, size);
3659 return KERN_INVALID_ARGUMENT4;
3660
3661 found_object:
3662
3663 if (pthread_mutex_trylock(&entry->dpager.lock)) {
3664 /* oh well bad luck */
3665
3666 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3667
3668 /* yield the processor */
3669 (void) thread_switch(MACH_PORT_NULL((mach_port_t) 0),
3670 SWITCH_OPTION_NONE0, 0);
3671 continue;
3672 }
3673
3674 actual = pager_pages(&entry->dpager, pages, potential);
3675 pthread_mutex_unlock(&entry->dpager.lock);
3676 dstruct_unlock(entry)pthread_mutex_unlock(&entry->lock);
3677
3678 if (actual <= potential)
3679 break;
3680
3681 /* allocate more memory */
3682
3683 if (pages != *pagesp)
3684 (void) vm_deallocate(default_pager_self, addr, size);
3685 size = round_page(actual * sizeof *pages)((((vm_offset_t) (actual * sizeof *pages) + __vm_page_size - 1
) / __vm_page_size) * __vm_page_size)
;
3686 kr = vm_allocate(default_pager_self, &addr, size, TRUE((boolean_t) 1));
3687 if (kr != KERN_SUCCESS0)
3688 return kr;
3689 pages = (default_pager_page_t *) addr;
3690 potential = size/sizeof *pages;
3691 }
3692
3693 /*
3694 * Deallocate and clear unused memory.
3695 * (Returned memory will automagically become pageable.)
3696 */
3697
3698 if (pages == *pagesp) {
3699 /*
3700 * Our returned information fit inline.
3701 * Nothing to deallocate.
3702 */
3703
3704 *countp = actual;
3705 } else if (actual == 0) {
3706 (void) vm_deallocate(default_pager_self, addr, size);
3707
3708 /* return zero items inline */
3709 *countp = 0;
3710 } else {
3711 vm_offset_t used;
3712
3713 used = round_page(actual * sizeof *pages)((((vm_offset_t) (actual * sizeof *pages) + __vm_page_size - 1
) / __vm_page_size) * __vm_page_size)
;
3714
3715 if (used != size)
3716 (void) vm_deallocate(default_pager_self,
3717 addr + used, size - used);
3718
3719 *pagesp = pages;
3720 *countp = actual;
3721 }
3722 return KERN_SUCCESS0;
3723}
3724
3725
3726kern_return_t
3727S_default_pager_object_set_size (mach_port_t pager,
3728 mach_port_seqno_t seqno,
3729 vm_size_t limit)
3730{
3731 kern_return_t kr;
3732 default_pager_t ds;
3733
3734 ds = pager_port_lookup(pager)((! (((pager) != ((mach_port_t) 0)) && ((pager) != ((
mach_port_t) ~0))) || ((default_pager_t)(((vm_offset_t)(pager
))&~1))->pager != (pager)) ? ((default_pager_t)0) : (default_pager_t
)(((vm_offset_t)(pager))&~1))
;
3735 if (ds == DEFAULT_PAGER_NULL((default_pager_t)0))
3736 return KERN_INVALID_ARGUMENT4;
3737
3738 pager_port_lock(ds, seqno);
3739 pager_port_wait_for_readers(ds);
3740 pager_port_wait_for_writers(ds);
3741
3742 vm_size_t rounded_limit = round_page (limit)((((vm_offset_t) (limit) + __vm_page_size - 1) / __vm_page_size
) * __vm_page_size)
;
3743 vm_size_t trunc_limit = trunc_page (limit)((((vm_offset_t) (limit)) / __vm_page_size) * __vm_page_size);
3744
3745
3746 if (ds->dpager.limit < rounded_limit)
3747 {
3748 /* The limit has not been exceeded heretofore. Just change it. */
3749 ds->dpager.limit = rounded_limit;
3750
3751 /* Byte limit is used for truncation of file, which aren't rounded to
3752 page boundary. But by enlarging of file we are free to increase this value*/
3753 ds->dpager.byte_limit = rounded_limit;
3754 kr = memory_object_lock_request(ds->pager_request, 0,
3755 rounded_limit,
3756 MEMORY_OBJECT_RETURN_NONE0, FALSE((boolean_t) 0),
3757 VM_PROT_NONE((vm_prot_t) 0x00), MACH_PORT_NULL((mach_port_t) 0));
3758 if (kr != KERN_SUCCESS0)
3759 panic ("memory_object_lock_request: %d", kr);
3760 }
3761 else
3762 {
3763 if (ds->dpager.limit != rounded_limit)
3764 {
3765 kr = memory_object_lock_request(ds->pager_request, rounded_limit,
3766 ds->dpager.limit - rounded_limit,
3767 MEMORY_OBJECT_RETURN_NONE0, TRUE((boolean_t) 1),
3768 VM_PROT_ALL(((vm_prot_t) 0x01)|((vm_prot_t) 0x02)|((vm_prot_t) 0x04)), MACH_PORT_NULL((mach_port_t) 0));
3769 if (kr != KERN_SUCCESS0)
3770 panic ("memory_object_lock_request: %d", kr);
3771
3772 ds->dpager.limit = rounded_limit;
3773 }
3774
3775 /* Deallocate the old backing store pages and shrink the page map. */
3776 if (ds->dpager.size > ds->dpager.limit / vm_page_size)
3777 pager_truncate (&ds->dpager, ds->dpager.limit / vm_page_size);
3778
3779 /* If memory object size isn't page aligned, fill the tail
3780 of last page with zeroes */
3781 if ((limit != rounded_limit) && (ds->dpager.limit > limit))
3782 {
3783 /* Clean part of last page which isn't part of file.
3784 For file sizes that aren't multiple of vm_page_size */
3785 ds->dpager.byte_limit = limit;
3786 kr = memory_object_lock_request(ds->pager_request, trunc_limit,
3787 vm_page_size,
3788 MEMORY_OBJECT_RETURN_ALL2, TRUE((boolean_t) 1),
3789 VM_PROT_NONE((vm_prot_t) 0x00), MACH_PORT_NULL((mach_port_t) 0));
3790 }
3791 }
3792
3793 pager_port_unlock(ds);
3794
3795 return kr;
3796}
3797
3798/*
3799 * Add/remove extra paging space
3800 */
3801
3802extern mach_port_t bootstrap_master_device_port;
3803extern mach_port_t bootstrap_master_host_port;
3804
3805kern_return_t
3806S_default_pager_paging_file (pager, mdport, file_name, add)
3807 mach_port_t pager;
3808 mach_port_t mdport;
3809 default_pager_filename_t file_name;
3810 boolean_t add;
3811{
3812 kern_return_t kr;
3813
3814 if (pager != default_pager_default_port)
3815 return KERN_INVALID_ARGUMENT4;
3816
3817#if 0
3818dprintf("bmd %x md %x\n", bootstrap_master_device_port, mdport);
3819#endif
3820 if (add) {
3821 kr = add_paging_file(bootstrap_master_device_port,
3822 file_name, 0);
3823 } else {
3824 kr = remove_paging_file(file_name);
3825 }
3826
3827 /* XXXX more code needed */
3828 if (mdport != bootstrap_master_device_port)
3829 mach_port_deallocate( mach_task_self()((__mach_task_self_ + 0)), mdport);
3830
3831 return kr;
3832}
3833
3834kern_return_t
3835default_pager_register_fileserver(pager, fileserver)
3836 mach_port_t pager;
3837 mach_port_t fileserver;
3838{
3839 if (pager != default_pager_default_port)
3840 return KERN_INVALID_ARGUMENT4;
3841#if notyet
3842 mach_port_deallocate(mach_task_self()((__mach_task_self_ + 0)), fileserver);
3843 if (0) dp_helper_paging_space(0,0,0);/*just linkit*/
3844#endif
3845 return KERN_SUCCESS0;
3846}
3847
3848/*
3849 * When things do not quite workout...
3850 */
3851void no_paging_space(out_of_memory)
3852 boolean_t out_of_memory;
3853{
3854 static char here[] = "%s *** NOT ENOUGH PAGING SPACE ***";
3855
3856 if (out_of_memory)
3857 dprintf("*** OUT OF MEMORY *** ");
3858 panic(here, my_name);
3859}
3860
3861void overcommitted(got_more_space, space)
3862 boolean_t got_more_space;
3863 vm_size_t space; /* in pages */
3864{
3865 vm_size_t pages_free, pages_total;
3866
3867 static boolean_t user_warned = FALSE((boolean_t) 0);
3868 static vm_size_t pages_shortage = 0;
3869
3870 paging_space_info(&pages_total, &pages_free);
3871
3872 /*
3873 * If user added more space, see if it is enough
3874 */
3875 if (got_more_space) {
3876 pages_free -= pages_shortage;
3877 if (pages_free > 0) {
3878 pages_shortage = 0;
3879 if (user_warned)
3880 dprintf("%s paging space ok now.\n", my_name);
3881 } else
3882 pages_shortage = pages_free;
3883 user_warned = FALSE((boolean_t) 0);
3884 return;
3885 }
3886 /*
3887 * We ran out of gas, let user know.
3888 */
3889 pages_free -= space;
3890 pages_shortage = (pages_free > 0) ? 0 : -pages_free;
3891 if (!user_warned && pages_shortage) {
3892 user_warned = TRUE((boolean_t) 1);
3893 dprintf("%s paging space over-committed.\n", my_name);
3894 }
3895#if debug0
3896 user_warned = FALSE((boolean_t) 0);
3897 dprintf("%s paging space over-committed [+%d (%d) pages].\n",
3898 my_name, space, pages_shortage);
3899#endif
3900}
3901
3902void paging_space_info(totp, freep)
3903 vm_size_t *totp, *freep;
3904{
3905 vm_size_t total, free;
3906 partition_t part;
3907 int i;
3908
3909 total = free = 0;
3910 for (i = 0; i < all_partitions.n_partitions; i++) {
3911
3912 if ((part = partition_of(i)) == 0) continue;
3913
3914 /* no need to lock: by the time this data
3915 gets back to any remote requestor it
3916 will be obsolete anyways */
3917 total += part->total_size;
3918 free += part->free;
3919#if debug0
3920 dprintf("Partition %d: x%x total, x%x free\n",
3921 i, part->total_size, part->free);
3922#endif
3923 }
3924 *totp = total;
3925 *freep = free;
3926}
3927
3928/*
3929 * Catch exceptions.
3930 */
3931
3932kern_return_t
3933catch_exception_raise(exception_port, thread, task, exception, code, subcode)
3934 mach_port_t exception_port;
3935 mach_port_t thread, task;
3936 int exception, code, subcode;
3937{
3938 ddprintf ("(default_pager)catch_exception_raise(%d,%d,%d)\n",
3939 exception, code, subcode);
3940 panic(my_name);
3941
3942 /* mach_msg_server will deallocate thread/task for us */
3943
3944 return KERN_FAILURE5;
3945}