summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2013-05-16 00:04:31 +0200
committerRichard Braun <rbraun@sceen.net>2013-05-16 00:15:59 +0200
commitc4d56664a22f0c46ec0e81b856860964ea89f420 (patch)
treeb531b90896a65b05ad2d35f5b88306a20e6123b2
parent2840e10b3294607268dde98ed247c4badd6735b2 (diff)
Reduce fragmentation in the slab allocator
This reverts a change brought when reworking slab lists handling that made the allocator store slabs in LIFO order, whatever their reference count. While it's fine for free slabs, it actually increased fragmentation for partial slabs. * kern/slab.c (kmem_cache_alloc_from_slab): Insert slabs that become partial at the end of the partial slabs list.
-rw-r--r--kern/slab.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kern/slab.c b/kern/slab.c
index 0a4dbdf..d14d07a 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -955,9 +955,12 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
if (slab->nr_refs == 1)
cache->nr_free_slabs--;
} else if (slab->nr_refs == 1) {
- /* The slab has become partial */
+ /*
+ * The slab has become partial. Insert the new slab at the end of
+ * the list to reduce fragmentation.
+ */
list_remove(&slab->list_node);
- list_insert_head(&cache->partial_slabs, &slab->list_node);
+ list_insert_tail(&cache->partial_slabs, &slab->list_node);
cache->nr_free_slabs--;
}