summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2013-05-01 23:36:37 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2013-05-01 23:36:37 +0200
commit0e40fde7b8b9ff050b41ca62e86a91dec726972e (patch)
tree6e76f00b09cf2c24d9283dcc5468120e612b74f1
parent342cfcb319cad816da5472c8fe5ca82372965088 (diff)
parent1af3af1431037f87d29434a46b91e1a059f785c2 (diff)
Merge branch 'master' of git.savannah.gnu.org:/srv/git/hurd/gnumach
-rw-r--r--kern/slab.c107
-rw-r--r--kern/slab.h7
2 files changed, 20 insertions, 94 deletions
diff --git a/kern/slab.c b/kern/slab.c
index 99d5bca..0f0d4cb 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -878,7 +878,7 @@ static int kmem_cache_grow(struct kmem_cache *cache)
simple_lock(&cache->lock);
if (slab != NULL) {
- list_insert_tail(&cache->free_slabs, &slab->list_node);
+ list_insert(&cache->free_slabs, &slab->list_node);
cache->nr_bufs += cache->bufs_per_slab;
cache->nr_slabs++;
cache->nr_free_slabs++;
@@ -899,31 +899,28 @@ static void kmem_cache_reap(struct kmem_cache *cache)
{
struct kmem_slab *slab;
struct list dead_slabs;
+ unsigned long nr_free_slabs;
if (cache->flags & KMEM_CF_NO_RECLAIM)
return;
- list_init(&dead_slabs);
-
simple_lock(&cache->lock);
-
- while (!list_empty(&cache->free_slabs)) {
- slab = list_first_entry(&cache->free_slabs, struct kmem_slab,
- list_node);
- list_remove(&slab->list_node);
- list_insert(&dead_slabs, &slab->list_node);
- cache->nr_bufs -= cache->bufs_per_slab;
- cache->nr_slabs--;
- cache->nr_free_slabs--;
- }
-
+ list_set_head(&dead_slabs, &cache->free_slabs);
+ list_init(&cache->free_slabs);
+ nr_free_slabs = cache->nr_free_slabs;
+ cache->nr_bufs -= cache->bufs_per_slab * nr_free_slabs;
+ cache->nr_slabs -= nr_free_slabs;
+ cache->nr_free_slabs = 0;
simple_unlock(&cache->lock);
while (!list_empty(&dead_slabs)) {
slab = list_first_entry(&dead_slabs, struct kmem_slab, list_node);
list_remove(&slab->list_node);
kmem_slab_destroy(slab, cache);
+ nr_free_slabs--;
}
+
+ assert(nr_free_slabs == 0);
}
/*
@@ -951,49 +948,17 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
slab->nr_refs++;
cache->nr_objs++;
- /*
- * The slab has become complete.
- */
if (slab->nr_refs == cache->bufs_per_slab) {
+ /* The slab has become complete */
list_remove(&slab->list_node);
if (slab->nr_refs == 1)
cache->nr_free_slabs--;
} else if (slab->nr_refs == 1) {
- /*
- * The slab has become partial.
- */
+ /* The slab has become partial */
list_remove(&slab->list_node);
- list_insert_tail(&cache->partial_slabs, &slab->list_node);
+ list_insert(&cache->partial_slabs, &slab->list_node);
cache->nr_free_slabs--;
- } else if (!list_singular(&cache->partial_slabs)) {
- struct list *node;
- struct kmem_slab *tmp;
-
- /*
- * The slab remains partial. If there are more than one partial slabs,
- * maintain the list sorted.
- */
-
- assert(slab->nr_refs > 1);
-
- for (node = list_prev(&slab->list_node);
- !list_end(&cache->partial_slabs, node);
- node = list_prev(node)) {
- tmp = list_entry(node, struct kmem_slab, list_node);
-
- if (tmp->nr_refs >= slab->nr_refs)
- break;
- }
-
- /*
- * If the direct neighbor was found, the list is already sorted.
- * If no slab was found, the slab is inserted at the head of the list.
- */
- if (node != list_prev(&slab->list_node)) {
- list_remove(&slab->list_node);
- list_insert_after(node, &slab->list_node);
- }
}
if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags))
@@ -1036,54 +1001,20 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
slab->nr_refs--;
cache->nr_objs--;
- /*
- * The slab has become free.
- */
if (slab->nr_refs == 0) {
+ /* The slab has become free */
+
if (kmem_slab_use_tree(cache->flags))
rbtree_remove(&cache->active_slabs, &slab->tree_node);
- /*
- * The slab was partial.
- */
if (cache->bufs_per_slab > 1)
list_remove(&slab->list_node);
- list_insert_tail(&cache->free_slabs, &slab->list_node);
+ list_insert(&cache->free_slabs, &slab->list_node);
cache->nr_free_slabs++;
} else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
- /*
- * The slab has become partial.
- */
+ /* The slab has become partial */
list_insert(&cache->partial_slabs, &slab->list_node);
- } else if (!list_singular(&cache->partial_slabs)) {
- struct list *node;
- struct kmem_slab *tmp;
-
- /*
- * The slab remains partial. If there are more than one partial slabs,
- * maintain the list sorted.
- */
-
- assert(slab->nr_refs > 0);
-
- for (node = list_next(&slab->list_node);
- !list_end(&cache->partial_slabs, node);
- node = list_next(node)) {
- tmp = list_entry(node, struct kmem_slab, list_node);
-
- if (tmp->nr_refs <= slab->nr_refs)
- break;
- }
-
- /*
- * If the direct neighbor was found, the list is already sorted.
- * If no slab was found, the slab is inserted at the tail of the list.
- */
- if (node != list_next(&slab->list_node)) {
- list_remove(&slab->list_node);
- list_insert_before(node, &slab->list_node);
- }
}
}
@@ -1305,7 +1236,9 @@ fast_free:
slab_free:
#endif /* SLAB_USE_CPU_POOLS */
+ simple_lock(&cache->lock);
kmem_cache_free_to_slab(cache, (void *)obj);
+ simple_unlock(&cache->lock);
}
void slab_collect(void)
diff --git a/kern/slab.h b/kern/slab.h
index 47bef21..b842fb7 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -155,13 +155,6 @@ typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
* Cache of objects.
*
* Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
- *
- * The partial slabs list is sorted by slab references. Slabs with a high
- * number of references are placed first on the list to reduce fragmentation.
- * Sorting occurs at insertion/removal of buffers in a slab. As the list
- * is maintained sorted, and the number of references only changes by one,
- * this is a very cheap operation in the average case and the worst (linear)
- * case is very unlikely.
*/
struct kmem_cache {
#if SLAB_USE_CPU_POOLS