diff options
-rw-r--r-- | kern/slab.c | 54 | ||||
-rw-r--r-- | vm/vm_page.c | 1 | ||||
-rw-r--r-- | vm/vm_page.h | 16 |
3 files changed, 60 insertions, 11 deletions
diff --git a/kern/slab.c b/kern/slab.c index 375ad23..cb72047 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -424,6 +424,15 @@ kmem_pagefree(vm_offset_t addr, vm_size_t size, int flags) : kmem_pagefree_virtual(addr, size); } +static inline int kmem_slab_use_tree(int flags) +{ + if (flags & KMEM_CF_VERIFY) + return 1; + + return ((flags & (KMEM_CF_SLAB_EXTERNAL | KMEM_CF_DIRECTMAP)) + == KMEM_CF_SLAB_EXTERNAL); +} + static void kmem_slab_create_verify(struct kmem_slab *slab, struct kmem_cache *cache) { @@ -470,6 +479,14 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache, kmem_pagefree(slab_buf, cache->slab_size, cache->flags); return NULL; } + + if (cache->flags & KMEM_CF_DIRECTMAP) { + struct vm_page *page; + + page = vm_page_lookup_pa(kvtophys(slab_buf)); + assert(page != NULL); + vm_page_set_priv(page, slab); + } } else { slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1; } @@ -537,15 +554,21 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache) kmem_slab_destroy_verify(slab, cache); slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE); - kmem_pagefree(slab_buf, cache->slab_size, cache->flags); - if (cache->flags & KMEM_CF_SLAB_EXTERNAL) + if (cache->flags & KMEM_CF_SLAB_EXTERNAL) { + if (cache->flags & KMEM_CF_DIRECTMAP) { + struct vm_page *page; + + /* Not strictly needed, but let's increase safety */ + page = vm_page_lookup_pa(kvtophys(slab_buf)); + assert(page != NULL); + vm_page_set_priv(page, NULL); + } + kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab); -} + } -static inline int kmem_slab_use_tree(int flags) -{ - return (flags & KMEM_CF_SLAB_EXTERNAL) || (flags & KMEM_CF_VERIFY); + kmem_pagefree(slab_buf, cache->slab_size, cache->flags); } static inline int kmem_slab_cmp_lookup(const void *addr, @@ -968,12 +991,21 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) union kmem_bufctl *bufctl; if (cache->flags & KMEM_CF_SLAB_EXTERNAL) { - struct rbtree_node *node; + if (cache->flags & KMEM_CF_DIRECTMAP) { + struct vm_page *page; + + page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf)); + assert(page != NULL); + slab = vm_page_get_priv(page); + } else { + struct rbtree_node *node; + + node = rbtree_lookup_nearest(&cache->active_slabs, buf, + kmem_slab_cmp_lookup, RBTREE_LEFT); + assert(node != NULL); + slab = rbtree_entry(node, struct kmem_slab, tree_node); + } - node = rbtree_lookup_nearest(&cache->active_slabs, buf, - kmem_slab_cmp_lookup, RBTREE_LEFT); - assert(node != NULL); - slab = rbtree_entry(node, struct kmem_slab, tree_node); assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr + cache->slab_size, PAGE_SIZE))); } else { diff --git a/vm/vm_page.c b/vm/vm_page.c index 48d7096..a868fce 100644 --- a/vm/vm_page.c +++ b/vm/vm_page.c @@ -165,6 +165,7 @@ vm_page_init_pa(struct vm_page *page, unsigned short seg_index, phys_addr_t pa) page->type = VM_PT_RESERVED; page->seg_index = seg_index; page->order = VM_PAGE_ORDER_UNLISTED; + page->priv = NULL; page->phys_addr = pa; } diff --git a/vm/vm_page.h b/vm/vm_page.h index 6f4f3c2..f2e20a7 100644 --- a/vm/vm_page.h +++ b/vm/vm_page.h @@ -82,6 +82,7 @@ struct vm_page { unsigned short type; unsigned short seg_index; unsigned short order; + void *priv; /* * This member is used throughout the code and may only change for @@ -424,6 +425,21 @@ vm_page_direct_ptr(const struct vm_page *page) #endif /* + * Associate private data with a page. + */ +static inline void +vm_page_set_priv(struct vm_page *page, void *priv) +{ + page->priv = priv; +} + +static inline void * +vm_page_get_priv(const struct vm_page *page) +{ + return page->priv; +} + +/* * Load physical memory into the vm_page module at boot time. * * The avail_start and avail_end parameters are used to maintain a simple |