summaryrefslogtreecommitdiff
path: root/kern/slab.c
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-04-21 21:17:53 +0200
committerRichard Braun <rbraun@sceen.net>2013-04-21 21:17:53 +0200
commit1af3af1431037f87d29434a46b91e1a059f785c2 (patch)
tree8125c593161d879f86178b1a7cbb2dae21ecf79f /kern/slab.c
parent24832de763ad58be6afdcff6c761b54ccee42667 (diff)
Optimize slab reaping
Instead of walking the list of free slabs while holding the cache lock, detach the list from the cache and directly compute the final count values, and destroy slabs after releasing the cache lock. * kern/slab.c (kmem_cache_reap): Optimize.
Diffstat (limited to 'kern/slab.c')
-rw-r--r--kern/slab.c23
1 files changed, 10 insertions, 13 deletions
diff --git a/kern/slab.c b/kern/slab.c
index cff8096..0f0d4cb 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -899,31 +899,28 @@ static void kmem_cache_reap(struct kmem_cache *cache)
{
struct kmem_slab *slab;
struct list dead_slabs;
+ unsigned long nr_free_slabs;
if (cache->flags & KMEM_CF_NO_RECLAIM)
return;
- list_init(&dead_slabs);
-
simple_lock(&cache->lock);
-
- while (!list_empty(&cache->free_slabs)) {
- slab = list_first_entry(&cache->free_slabs, struct kmem_slab,
- list_node);
- list_remove(&slab->list_node);
- list_insert(&dead_slabs, &slab->list_node);
- cache->nr_bufs -= cache->bufs_per_slab;
- cache->nr_slabs--;
- cache->nr_free_slabs--;
- }
-
+ list_set_head(&dead_slabs, &cache->free_slabs);
+ list_init(&cache->free_slabs);
+ nr_free_slabs = cache->nr_free_slabs;
+ cache->nr_bufs -= cache->bufs_per_slab * nr_free_slabs;
+ cache->nr_slabs -= nr_free_slabs;
+ cache->nr_free_slabs = 0;
simple_unlock(&cache->lock);
while (!list_empty(&dead_slabs)) {
slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
list_remove(&slab->list_node);
kmem_slab_destroy(slab, cache);
+ nr_free_slabs--;
}
+
+ assert(nr_free_slabs == 0);
}
/*