diff options
Diffstat (limited to 'kern')
-rw-r--r-- | kern/slab.c | 82 | ||||
-rw-r--r-- | kern/slab.h | 7 |
2 files changed, 8 insertions, 81 deletions
diff --git a/kern/slab.c b/kern/slab.c index 3db4db1..cff8096 100644 --- a/kern/slab.c +++ b/kern/slab.c @@ -878,7 +878,7 @@ static int kmem_cache_grow(struct kmem_cache *cache) simple_lock(&cache->lock); if (slab != NULL) { - list_insert_tail(&cache->free_slabs, &slab->list_node); + list_insert(&cache->free_slabs, &slab->list_node); cache->nr_bufs += cache->bufs_per_slab; cache->nr_slabs++; cache->nr_free_slabs++; @@ -951,49 +951,17 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache) slab->nr_refs++; cache->nr_objs++; - /* - * The slab has become complete. - */ if (slab->nr_refs == cache->bufs_per_slab) { + /* The slab has become complete */ list_remove(&slab->list_node); if (slab->nr_refs == 1) cache->nr_free_slabs--; } else if (slab->nr_refs == 1) { - /* - * The slab has become partial. - */ + /* The slab has become partial */ list_remove(&slab->list_node); - list_insert_tail(&cache->partial_slabs, &slab->list_node); + list_insert(&cache->partial_slabs, &slab->list_node); cache->nr_free_slabs--; - } else if (!list_singular(&cache->partial_slabs)) { - struct list *node; - struct kmem_slab *tmp; - - /* - * The slab remains partial. If there are more than one partial slabs, - * maintain the list sorted. - */ - - assert(slab->nr_refs > 1); - - for (node = list_prev(&slab->list_node); - !list_end(&cache->partial_slabs, node); - node = list_prev(node)) { - tmp = list_entry(node, struct kmem_slab, list_node); - - if (tmp->nr_refs >= slab->nr_refs) - break; - } - - /* - * If the direct neighbor was found, the list is already sorted. - * If no slab was found, the slab is inserted at the head of the list. - */ - if (node != list_prev(&slab->list_node)) { - list_remove(&slab->list_node); - list_insert_after(node, &slab->list_node); - } } if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags)) @@ -1036,54 +1004,20 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf) slab->nr_refs--; cache->nr_objs--; - /* - * The slab has become free. - */ if (slab->nr_refs == 0) { + /* The slab has become free */ + if (kmem_slab_use_tree(cache->flags)) rbtree_remove(&cache->active_slabs, &slab->tree_node); - /* - * The slab was partial. - */ if (cache->bufs_per_slab > 1) list_remove(&slab->list_node); - list_insert_tail(&cache->free_slabs, &slab->list_node); + list_insert(&cache->free_slabs, &slab->list_node); cache->nr_free_slabs++; } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) { - /* - * The slab has become partial. - */ + /* The slab has become partial */ list_insert(&cache->partial_slabs, &slab->list_node); - } else if (!list_singular(&cache->partial_slabs)) { - struct list *node; - struct kmem_slab *tmp; - - /* - * The slab remains partial. If there are more than one partial slabs, - * maintain the list sorted. - */ - - assert(slab->nr_refs > 0); - - for (node = list_next(&slab->list_node); - !list_end(&cache->partial_slabs, node); - node = list_next(node)) { - tmp = list_entry(node, struct kmem_slab, list_node); - - if (tmp->nr_refs <= slab->nr_refs) - break; - } - - /* - * If the direct neighbor was found, the list is already sorted. - * If no slab was found, the slab is inserted at the tail of the list. - */ - if (node != list_next(&slab->list_node)) { - list_remove(&slab->list_node); - list_insert_before(node, &slab->list_node); - } } } diff --git a/kern/slab.h b/kern/slab.h index 47bef21..b842fb7 100644 --- a/kern/slab.h +++ b/kern/slab.h @@ -155,13 +155,6 @@ typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t); * Cache of objects. * * Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID. - * - * The partial slabs list is sorted by slab references. Slabs with a high - * number of references are placed first on the list to reduce fragmentation. - * Sorting occurs at insertion/removal of buffers in a slab. As the list - * is maintained sorted, and the number of references only changes by one, - * this is a very cheap operation in the average case and the worst (linear) - * case is very unlikely. */ struct kmem_cache { #if SLAB_USE_CPU_POOLS |