summaryrefslogtreecommitdiff
path: root/kern/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'kern/slab.c')
-rw-r--r--kern/slab.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kern/slab.c b/kern/slab.c
index c8a2b7c..0a4dbdf 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -878,7 +878,7 @@ static int kmem_cache_grow(struct kmem_cache *cache)
simple_lock(&cache->lock);
if (slab != NULL) {
- list_insert(&cache->free_slabs, &slab->list_node);
+ list_insert_head(&cache->free_slabs, &slab->list_node);
cache->nr_bufs += cache->bufs_per_slab;
cache->nr_slabs++;
cache->nr_free_slabs++;
@@ -957,7 +957,7 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
} else if (slab->nr_refs == 1) {
/* The slab has become partial */
list_remove(&slab->list_node);
- list_insert(&cache->partial_slabs, &slab->list_node);
+ list_insert_head(&cache->partial_slabs, &slab->list_node);
cache->nr_free_slabs--;
}
@@ -1010,11 +1010,11 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
if (cache->bufs_per_slab > 1)
list_remove(&slab->list_node);
- list_insert(&cache->free_slabs, &slab->list_node);
+ list_insert_head(&cache->free_slabs, &slab->list_node);
cache->nr_free_slabs++;
} else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
/* The slab has become partial */
- list_insert(&cache->partial_slabs, &slab->list_node);
+ list_insert_head(&cache->partial_slabs, &slab->list_node);
}
}