diff options
Diffstat (limited to 'debian/patches/vm_page_cleanq.patch')
-rw-r--r-- | debian/patches/vm_page_cleanq.patch | 336 |
1 files changed, 336 insertions, 0 deletions
diff --git a/debian/patches/vm_page_cleanq.patch b/debian/patches/vm_page_cleanq.patch new file mode 100644 index 0000000..5535d6e --- /dev/null +++ b/debian/patches/vm_page_cleanq.patch @@ -0,0 +1,336 @@ +diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c +index da4ef38..3bd2c5b 100644 +--- a/linux/dev/glue/block.c ++++ b/linux/dev/glue/block.c +@@ -1537,7 +1537,7 @@ device_read (void *d, ipc_port_t reply_port, + if (dirty) + { + PAGE_WAKEUP_DONE (m); +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_insert (m, object, o); + } + else +diff --git a/vm/memory_object.c b/vm/memory_object.c +index 097ed23..7a0dbb8 100644 +--- a/vm/memory_object.c ++++ b/vm/memory_object.c +@@ -209,7 +209,7 @@ retry_lookup: + */ + + data_m->busy = FALSE; +- data_m->dirty = FALSE; ++ vm_page_mark_dirty (data_m, FALSE); + pmap_clear_modify(data_m->phys_addr); + + data_m->page_lock = lock_value; +@@ -555,7 +555,7 @@ memory_object_lock_result_t memory_object_lock_page( + */ + + if (!m->dirty) +- m->dirty = pmap_is_modified(m->phys_addr); ++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); + + if (m->dirty || (m->precious && + should_return == MEMORY_OBJECT_RETURN_ALL)) { +diff --git a/vm/vm_debug.c b/vm/vm_debug.c +index 227090e..822ca86 100644 +--- a/vm/vm_debug.c ++++ b/vm/vm_debug.c +@@ -352,7 +352,7 @@ mach_vm_object_pages( + if (((state & (VPI_STATE_NODATA|VPI_STATE_DIRTY)) == 0) && + pmap_is_modified(p->phys_addr)) { + state |= VPI_STATE_DIRTY; +- p->dirty = TRUE; ++ vm_page_mark_dirty (p, TRUE); + } + + vm_page_lock_queues(); +diff --git a/vm/vm_fault.c b/vm/vm_fault.c +index 686156c..a48902a 100644 +--- a/vm/vm_fault.c ++++ b/vm/vm_fault.c +@@ -978,7 +978,7 @@ vm_fault_return_t vm_fault_page( + + vm_page_lock_queues(); + pmap_page_protect(m->phys_addr, VM_PROT_NONE); +- copy_m->dirty = TRUE; ++ vm_page_mark_dirty (copy_m, TRUE); + vm_page_unlock_queues(); + + /* +@@ -1077,7 +1077,7 @@ vm_fault_return_t vm_fault_page( + */ + + if (vm_fault_dirty_handling && (*protection & VM_PROT_WRITE)) +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + + return(VM_FAULT_SUCCESS); + +@@ -1957,7 +1957,7 @@ kern_return_t vm_fault_copy( + vm_page_zero_fill(dst_page); + else + vm_page_copy(src_page, dst_page); +- dst_page->dirty = TRUE; ++ vm_page_mark_dirty (dst_page, TRUE); + + /* + * Unlock everything, and return +diff --git a/vm/vm_map.c b/vm/vm_map.c +index 6b13724..c229df5 100644 +--- a/vm/vm_map.c ++++ b/vm/vm_map.c +@@ -2931,7 +2931,7 @@ insert_pages: + assert(!m->wanted); + + m->busy = FALSE; +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_replace(m, object, old_last_offset + offset); + if (must_wire) { + vm_page_wire(m); +diff --git a/vm/vm_object.c b/vm/vm_object.c +index a96516c..f7d8fd2 100644 +--- a/vm/vm_object.c ++++ b/vm/vm_object.c +@@ -587,7 +587,7 @@ void vm_object_terminate( + panic("vm_object_terminate.4 0x%x 0x%x", object, p); + + if (!p->dirty) +- p->dirty = pmap_is_modified(p->phys_addr); ++ vm_page_mark_dirty (p, pmap_is_modified(p->phys_addr)); + + if (p->dirty || p->precious) { + p->busy = TRUE; +@@ -1089,7 +1089,7 @@ kern_return_t vm_object_copy_slowly( + */ + + new_page->busy = FALSE; +- new_page->dirty = TRUE; ++ vm_page_mark_dirty (new_page, TRUE); + vm_object_lock(result_page->object); + PAGE_WAKEUP_DONE(result_page); + +diff --git a/vm/vm_page.h b/vm/vm_page.h +index 4fe1b41..729ad8e 100644 +--- a/vm/vm_page.h ++++ b/vm/vm_page.h +@@ -70,8 +70,10 @@ + * and sundry status bits. + * + * Fields in this structure are locked either by the lock on the +- * object that the page belongs to (O) or by the lock on the page +- * queues (P). [Some fields require that both locks be held to ++ * object that the page belongs to (O), by the lock on the page ++ * queues (P), or by vm_page_queue_clean_lock (C). ++ * ++ * [Some fields require that both locks, O and P, be held to + * change that field; holding either lock is sufficient to read.] + */ + +@@ -79,6 +81,7 @@ struct vm_page { + queue_chain_t pageq; /* queue info for FIFO + * queue or free list (P) */ + queue_chain_t listq; /* all pages in same object (O) */ ++ queue_chain_t cleanq; /* all clean pages (C) */ + struct vm_page *next; /* VP bucket link (O) */ + + vm_object_t object; /* which object am I in (O,P) */ +@@ -147,8 +150,12 @@ extern + queue_head_t vm_page_queue_active; /* active memory queue */ + extern + queue_head_t vm_page_queue_inactive; /* inactive memory queue */ ++extern ++queue_head_t vm_page_queue_clean; /* clean memory queue */ + + extern ++int vm_page_clean_count; /* How many pages are clean? */ ++extern + int vm_page_free_count; /* How many pages are free? */ + extern + int vm_page_fictitious_count;/* How many fictitious pages are free? */ +@@ -184,6 +191,8 @@ decl_simple_lock_data(extern,vm_page_queue_lock)/* lock on active and inactive + page queues */ + decl_simple_lock_data(extern,vm_page_queue_free_lock) + /* lock on free page queue */ ++decl_simple_lock_data(extern,vm_page_queue_clean_lock) ++ /* lock on clean page queue */ + + extern unsigned int vm_page_free_wanted; + /* how many threads are waiting for memory */ +@@ -312,4 +321,24 @@ extern unsigned int vm_page_info( + } \ + MACRO_END + ++static inline void ++vm_page_mark_dirty (vm_page_t m, boolean_t dirty) ++{ ++ if (m->dirty == dirty && (dirty || m->cleanq.next)) ++ return; /* No action necessary. */ ++ ++ simple_lock (&vm_page_queue_clean_lock); ++ if (dirty && m->cleanq.next) { ++ queue_remove (&vm_page_queue_clean, m, vm_page_t, cleanq); ++ vm_page_clean_count -= 1; ++ m->cleanq.next = NULL; ++ } ++ if (! dirty) { ++ queue_enter (&vm_page_queue_clean, m, vm_page_t, cleanq); ++ vm_page_clean_count += 1; ++ } ++ simple_unlock (&vm_page_queue_clean_lock); ++ m->dirty = dirty; ++} ++ + #endif /* _VM_VM_PAGE_H_ */ +diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c +index c4aba96..740a465 100644 +--- a/vm/vm_pageout.c ++++ b/vm/vm_pageout.c +@@ -293,7 +293,7 @@ vm_pageout_setup( + vm_page_insert(m, new_object, new_offset); + vm_page_unlock_queues(); + +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + m->precious = FALSE; + m->page_lock = VM_PROT_NONE; + m->unlock_request = VM_PROT_NONE; +@@ -306,7 +306,7 @@ vm_pageout_setup( + vm_page_copy(m, new_m); + + vm_object_lock(old_object); +- m->dirty = FALSE; ++ vm_page_mark_dirty (m, FALSE); + pmap_clear_modify(m->phys_addr); + + /* +@@ -336,7 +336,7 @@ vm_pageout_setup( + * Use the new page below. + */ + m = new_m; +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + assert(!m->precious); + PAGE_WAKEUP_DONE(m); + } +@@ -681,6 +681,28 @@ void vm_pageout_scan(void) + /*NOTREACHED*/ + } + ++ /* Try to evict a clean page first. */ ++ simple_lock (&vm_page_queue_clean); ++ int tries; ++ for (tries = vm_page_clean_count; tries; tries--) ++ { ++ assert (! queue_empty (&vm_page_queue_clean)); ++ queue_remove_first (&vm_page_queue_clean, ++ m, vm_page_t, cleanq); ++ if (! m->active && m->inactive ++ && (want_pages || m->external)) ++ { ++ m->cleanq.next = NULL; ++ vm_page_clean_count -= 1; ++ simple_unlock (&vm_page_queue_clean); ++ goto got_one; ++ } ++ else ++ queue_enter (&vm_page_queue_clean, ++ m, vm_page_t, cleanq); ++ } ++ simple_unlock (&vm_page_queue_clean); ++ + vm_pageout_inactive++; + + /* Find a page we are interested in paging out. If we +@@ -697,7 +719,7 @@ void vm_pageout_scan(void) + if (!m) + goto pause; + } +- ++ got_one: + object = m->object; + + /* +@@ -780,7 +802,7 @@ void vm_pageout_scan(void) + m->busy = TRUE; + pmap_page_protect(m->phys_addr, VM_PROT_NONE); + if (!m->dirty) +- m->dirty = pmap_is_modified(m->phys_addr); ++ vm_page_mark_dirty (m, pmap_is_modified(m->phys_addr)); + + if (m->external) { + /* Figure out if we still care about this +diff --git a/vm/vm_resident.c b/vm/vm_resident.c +index b65b756..2e918da 100644 +--- a/vm/vm_resident.c ++++ b/vm/vm_resident.c +@@ -148,9 +148,12 @@ vm_offset_t vm_page_fictitious_addr = (vm_offset_t) -1; + queue_head_t vm_page_queue_active; + queue_head_t vm_page_queue_inactive; + decl_simple_lock_data(,vm_page_queue_lock) ++queue_head_t vm_page_queue_clean; ++decl_simple_lock_data(,vm_page_queue_clean_lock) + int vm_page_active_count; + int vm_page_inactive_count; + int vm_page_wire_count; ++int vm_page_clean_count; + + /* + * Several page replacement parameters are also +@@ -200,6 +203,7 @@ void vm_page_bootstrap( + */ + + m = &vm_page_template; ++ m->cleanq.next = NULL; + m->object = VM_OBJECT_NULL; /* reset later */ + m->offset = 0; /* reset later */ + m->wire_count = 0; +@@ -231,12 +235,14 @@ void vm_page_bootstrap( + */ + + simple_lock_init(&vm_page_queue_free_lock); ++ simple_lock_init(&vm_page_queue_clean_lock); + simple_lock_init(&vm_page_queue_lock); + + vm_page_queue_free = VM_PAGE_NULL; + vm_page_queue_fictitious = VM_PAGE_NULL; + queue_init(&vm_page_queue_active); + queue_init(&vm_page_queue_inactive); ++ queue_init(&vm_page_queue_clean); + + vm_page_free_wanted = 0; + +@@ -666,7 +672,6 @@ void vm_page_remove( + bucket->pages = mem->next; + } else { + vm_page_t *prev; +- + for (prev = &this->next; + (this = *prev) != mem; + prev = &this->next) +@@ -1304,6 +1309,10 @@ void vm_page_free( + if (mem->absent) + vm_object_absent_release(mem->object); + ++ ++ /* Get it off the clean list. */ ++ vm_page_mark_dirty (mem, TRUE); ++ + /* + * XXX The calls to vm_page_init here are + * really overkill. +diff --git a/xen/block.c b/xen/block.c +index d98b31e..175955a 100644 +--- a/xen/block.c ++++ b/xen/block.c +@@ -539,7 +539,7 @@ device_read (void *d, ipc_port_t reply_port, + assert (m->busy); + vm_page_lock_queues (); + PAGE_WAKEUP_DONE (m); +- m->dirty = TRUE; ++ vm_page_mark_dirty (m, TRUE); + vm_page_insert (m, object, o); + vm_page_unlock_queues (); + o += PAGE_SIZE; |