summaryrefslogtreecommitdiff
path: root/vm/vm_page.h
diff options
context:
space:
mode:
Diffstat (limited to 'vm/vm_page.h')
-rw-r--r--vm/vm_page.h285
1 files changed, 272 insertions, 13 deletions
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 23c8c47..3fe6572 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -25,11 +25,23 @@
#include <kern/list.h>
#include <kern/log2.h>
#include <kern/macros.h>
-//#include <kern/param.h>
-//#include <kern/stddef.h>
-//#include <kern/types.h>
+#include <mach/vm_param.h>
+#include <machine/vm_param.h>
#include <machine/pmap.h>
+#include <kern/queue.h>
+#include <kern/lock.h>
+
+#include <mach/boolean.h>
+#include <mach/vm_prot.h>
+#include <mach/vm_param.h>
+#include <vm/vm_object.h>
+#include <vm/vm_types.h>
+#include <kern/queue.h>
+#include <kern/lock.h>
+
+#include <kern/sched_prim.h> /* definitions of wait/wakeup */
+
/*
* Address/page conversion and rounding macros (not inline functions to
* be easily usable on both virtual and physical addresses, which may not
@@ -62,7 +74,7 @@
* TODO Obviously, this needs to be addressed, e.g. with a reserved pool of
* pages.
*/
-#define VM_PAGE_FREE 0 /* Page unused */
+#define VM_PAGE_UNUSED 0 /* Page unused */
#define VM_PAGE_RESERVED 1 /* Page reserved at boot time */
#define VM_PAGE_TABLE 2 /* Page is part of the page table */
#define VM_PAGE_PMAP 3 /* Page stores pmap-specific data */
@@ -74,12 +86,51 @@
* Physical page descriptor.
*/
struct vm_page {
- struct list node;
- unsigned short type;
- unsigned short seg_index;
- unsigned short order;
- phys_addr_t phys_addr;
- void *slab_priv;
+ /* This is the data used by the vm_page module. */
+ struct list node;
+ unsigned short type;
+ unsigned short seg_index;
+ unsigned short order;
+ phys_addr_t phys_addr;
+ void *slab_priv;
+
+ /* We use an empty struct as the delimiter. */
+ struct {} vm_page_header;
+#define VM_PAGE_HEADER_SIZE offsetof(struct vm_page, vm_page_header)
+
+ /* This is the data used by vm_resident and friends. */
+ queue_chain_t pageq; /* queue info for FIFO queue */
+
+ vm_object_t object; /* which object am I in (O,P) */
+ vm_offset_t offset; /* offset into that object (O,P) */
+
+ unsigned int wire_count:16, /* how many wired down maps use me?
+ (O&P) */
+ /* boolean_t */ inactive:1, /* page is in inactive list (P) */
+ active:1, /* page is in active list (P) */
+ laundry:1, /* page is being cleaned now (P)*/
+ reference:1, /* page has been used (P) */
+ external:1, /* page considered external (P) */
+ extcounted:1, /* page counted in ext counts (P) */
+ busy:1, /* page is in transit (O) */
+ wanted:1, /* someone is waiting for page (O) */
+ tabled:1, /* page is in VP table (O) */
+ fictitious:1, /* Physical page doesn't exist (O) */
+ private:1, /* Page should not be returned to
+ * the free list (O) */
+ absent:1, /* Data has been requested, but is
+ * not yet available (O) */
+ error:1, /* Data manager was unable to provide
+ * data due to error (O) */
+ dirty:1, /* Page must be cleaned (O) */
+ precious:1, /* Page is precious; data must be
+ * returned even if clean (O) */
+ overwriting:1; /* Request to unlock has been made
+ * without having data. (O)
+ * [See vm_object_overwrite] */
+
+ vm_prot_t page_lock; /* Uses prohibited by data manager (O) */
+ vm_prot_t unlock_request; /* Outstanding unlock request (O) */
};
static inline unsigned short
@@ -166,7 +217,7 @@ void vm_page_manage(struct vm_page *page);
/*
* Return the page descriptor for the given physical address.
*/
-struct vm_page * vm_page_lookup(phys_addr_t pa);
+struct vm_page * vm_page_lookup_pa(phys_addr_t pa);
/*
* Allocate a block of 2^order physical pages.
@@ -174,13 +225,13 @@ struct vm_page * vm_page_lookup(phys_addr_t pa);
* The selector is used to determine the segments from which allocation can
* be attempted.
*/
-struct vm_page * vm_page_alloc(unsigned int order, unsigned int selector,
+struct vm_page * vm_page_alloc_p(unsigned int order, unsigned int selector,
unsigned short type);
/*
* Release a block of 2^order physical pages.
*/
-void vm_page_free(struct vm_page *page, unsigned int order);
+void vm_page_free_p(struct vm_page *page, unsigned int order);
/*
* Return the name of the given segment.
@@ -192,4 +243,212 @@ const char * vm_page_seg_name(unsigned int seg_index);
*/
void vm_page_info(void);
+/* Mach stuff follows. */
+
+/*
+ * Glue code.
+ */
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+#define MAX_CPUS NCPUS
+#define __read_mostly
+#define __initdata
+#define __init
+#define cpu_id() cpu_number()
+#define thread_pin()
+#define thread_unpin()
+#define printk printf
+
+void update_vm_page_counts(void);
+
+/*
+ * For debugging, this macro can be defined to perform
+ * some useful check on a page structure.
+ */
+
+#define VM_PAGE_CHECK(mem)
+
+/*
+ * Each pageable resident page falls into one of three lists:
+ *
+ * free
+ * Available for allocation now.
+ * inactive
+ * Not referenced in any map, but still has an
+ * object/offset-page mapping, and may be dirty.
+ * This is the list of pages that should be
+ * paged out next.
+ * active
+ * A list of pages which have been placed in
+ * at least one physical map. This list is
+ * ordered, in LRU-like fashion.
+ */
+
+extern
+vm_page_t vm_page_queue_free; /* memory free queue */
+extern
+vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
+extern
+queue_head_t vm_page_queue_active; /* active memory queue */
+extern
+queue_head_t vm_page_queue_inactive; /* inactive memory queue */
+
+extern
+int vm_page_free_count; /* How many pages are free? */
+extern
+int vm_page_fictitious_count;/* How many fictitious pages are free? */
+extern
+int vm_page_active_count; /* How many pages are active? */
+extern
+int vm_page_inactive_count; /* How many pages are inactive? */
+extern
+int vm_page_wire_count; /* How many pages are wired? */
+extern
+int vm_page_free_target; /* How many do we want free? */
+extern
+int vm_page_free_min; /* When to wakeup pageout */
+extern
+int vm_page_inactive_target;/* How many do we want inactive? */
+extern
+int vm_page_free_reserved; /* How many pages reserved to do pageout */
+extern
+int vm_page_laundry_count; /* How many pages being laundered? */
+extern
+int vm_page_external_limit; /* Max number of pages for external objects */
+
+/* Only objects marked with the extcounted bit are included in this total.
+ Pages which we scan for possible pageout, but which are not actually
+ dirty, don't get considered against the external page limits any more
+ in this way. */
+extern
+int vm_page_external_count; /* How many pages for external objects? */
+
+
+
+struct lock vm_page_queue_lock; /* lock on active and inactive page queues */
+decl_simple_lock_data(extern,vm_page_queue_free_lock)
+ /* lock on free page queue */
+
+extern unsigned int vm_page_free_wanted;
+ /* how many threads are waiting for memory */
+
+extern vm_offset_t vm_page_fictitious_addr;
+ /* (fake) phys_addr of fictitious pages */
+
+extern void vm_page_bootstrap(
+ vm_offset_t *startp,
+ vm_offset_t *endp);
+extern void vm_page_module_init(void);
+
+extern void vm_page_create(
+ vm_offset_t start,
+ vm_offset_t end);
+extern vm_page_t vm_page_lookup(
+ vm_object_t object,
+ vm_offset_t offset);
+extern vm_page_t vm_page_grab_fictitious(void);
+extern void vm_page_release_fictitious(vm_page_t);
+extern boolean_t vm_page_convert(vm_page_t *, boolean_t);
+extern void vm_page_more_fictitious(void);
+extern vm_page_t vm_page_grab(boolean_t);
+extern void vm_page_release(vm_page_t, boolean_t);
+extern void vm_page_wait(void (*)(void));
+extern vm_page_t vm_page_alloc(
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_init(
+ vm_page_t mem,
+ vm_offset_t phys_addr);
+extern void vm_page_init_mach(struct vm_page *);
+extern void vm_page_free(vm_page_t);
+extern void vm_page_activate(vm_page_t);
+extern void vm_page_deactivate(vm_page_t);
+extern void vm_page_rename(
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset);
+extern void vm_page_insert(
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
+extern void vm_page_remove(
+ vm_page_t mem);
+
+extern void vm_page_zero_fill(vm_page_t);
+extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
+
+extern void vm_page_wire(vm_page_t);
+extern void vm_page_unwire(vm_page_t);
+
+/*
+ * Functions implemented as macros
+ */
+
+#define PAGE_ASSERT_WAIT(m, interruptible) \
+ MACRO_BEGIN \
+ (m)->wanted = TRUE; \
+ assert_wait((event_t) (m), (interruptible)); \
+ MACRO_END
+
+#define PAGE_WAKEUP_DONE(m) \
+ MACRO_BEGIN \
+ (m)->busy = FALSE; \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup(((event_t) m)); \
+ } \
+ MACRO_END
+
+#define PAGE_WAKEUP(m) \
+ MACRO_BEGIN \
+ if ((m)->wanted) { \
+ (m)->wanted = FALSE; \
+ thread_wakeup((event_t) (m)); \
+ } \
+ MACRO_END
+
+#define VM_PAGE_FREE(p) \
+ MACRO_BEGIN \
+ vm_page_lock_queues(); \
+ vm_page_free(p); \
+ vm_page_unlock_queues(); \
+ MACRO_END
+
+/*
+ * Macro to be used in place of pmap_enter()
+ */
+
+#define PMAP_ENTER(pmap, virtual_address, page, protection, wired) \
+ MACRO_BEGIN \
+ pmap_enter( \
+ (pmap), \
+ (virtual_address), \
+ (page)->phys_addr, \
+ (protection) & ~(page)->page_lock, \
+ (wired) \
+ ); \
+ MACRO_END
+
+#define VM_PAGE_WAIT(continuation) vm_page_wait(continuation)
+
+#define vm_page_lock_queues() lock_write(&vm_page_queue_lock)
+#define vm_page_unlock_queues() lock_write_done(&vm_page_queue_lock)
+#define have_vm_page_queue_lock() have_write_lock(&vm_page_queue_lock)
+
+#define VM_PAGE_QUEUES_REMOVE(mem) \
+ MACRO_BEGIN \
+ if (mem->active) { \
+ queue_remove(&vm_page_queue_active, \
+ mem, vm_page_t, pageq); \
+ mem->active = FALSE; \
+ vm_page_active_count--; \
+ } \
+ \
+ if (mem->inactive) { \
+ queue_remove(&vm_page_queue_inactive, \
+ mem, vm_page_t, pageq); \
+ mem->inactive = FALSE; \
+ vm_page_inactive_count--; \
+ } \
+ MACRO_END
+
#endif /* _VM_VM_PAGE_H */