summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2012-12-01 15:44:12 +0100
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-26 12:14:40 +0200
commit8002f6bac17e68ec131d9361dded3e9ca9022047 (patch)
tree14a974293c9637ab3324976ab8867fdc9f50459e
parent3a4a01e93287a1d52809efdcf512562e77067147 (diff)
kern/slab: remove the KMEM_CACHE_NORECLAIM flag
Don't encourage anyone to use non reclaimable pools of resources, it's a Bad Thing To Do.
-rw-r--r--kern/slab.c24
-rw-r--r--kern/slab.h17
-rw-r--r--vm/vm_map.c2
3 files changed, 16 insertions, 27 deletions
diff --git a/kern/slab.c b/kern/slab.c
index 5140130..9ccbb43 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -213,17 +213,6 @@
#endif /* _HOST_BIG_ENDIAN */
/*
- * Cache flags.
- *
- * The flags don't change once set and can be tested without locking.
- */
-#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */
-#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */
-#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */
-#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */
-#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */
-
-/*
* Options for kmem_cache_alloc_verify().
*/
#define KMEM_AV_NOCONSTRUCT 0
@@ -445,7 +434,6 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
return NULL;
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
if (slab == NULL) {
@@ -518,7 +506,6 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
assert(slab->nr_refs == 0);
assert(slab->first_free != NULL);
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
if (cache->flags & KMEM_CF_VERIFY)
kmem_slab_destroy_verify(slab, cache);
@@ -786,12 +773,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
if (flags & KMEM_CACHE_NOCPUPOOL)
cache->flags |= KMEM_CF_NO_CPU_POOL;
- if (flags & KMEM_CACHE_NORECLAIM) {
- assert(slab_free_fn == NULL);
- flags |= KMEM_CACHE_NOOFFSLAB;
- cache->flags |= KMEM_CF_NO_RECLAIM;
- }
-
if (flags & KMEM_CACHE_VERIFY)
cache->flags |= KMEM_CF_VERIFY;
@@ -907,9 +888,6 @@ static void kmem_cache_reap(struct kmem_cache *cache)
struct list dead_slabs;
unsigned long nr_free_slabs;
- if (cache->flags & KMEM_CF_NO_RECLAIM)
- return;
-
simple_lock(&cache->lock);
list_set_head(&dead_slabs, &cache->free_slabs);
list_init(&cache->free_slabs);
@@ -1507,8 +1485,6 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
? CACHE_FLAGS_NO_CPU_POOL : 0)
| ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
? CACHE_FLAGS_SLAB_EXTERNAL : 0)
- | ((cache->flags & KMEM_CF_NO_RECLAIM)
- ? CACHE_FLAGS_NO_RECLAIM : 0)
| ((cache->flags & KMEM_CF_VERIFY)
? CACHE_FLAGS_VERIFY : 0)
| ((cache->flags & KMEM_CF_DIRECT)
diff --git a/kern/slab.h b/kern/slab.h
index c7be169..bbd9d41 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -155,6 +155,16 @@ typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
#define KMEM_CACHE_NAME_SIZE 24
/*
+ * Cache flags.
+ *
+ * The flags don't change once set and can be tested without locking.
+ */
+#define KMEM_CF_NO_CPU_POOL 0x1 /* CPU pool layer disabled */
+#define KMEM_CF_SLAB_EXTERNAL 0x2 /* Slab data is off slab */
+#define KMEM_CF_VERIFY 0x4 /* Debugging facilities enabled */
+#define KMEM_CF_DIRECT 0x8 /* No buf-to-slab tree lookup */
+
+/*
* Cache of objects.
*
* Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
@@ -215,12 +225,13 @@ extern vm_map_t kmem_map;
*/
#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */
#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */
-#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source,
- implies KMEM_CACHE_NOOFFSLAB */
-#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */
+#define KMEM_CACHE_VERIFY 0x4 /* Use debugging facilities */
/*
* Initialize a cache.
+ *
+ * If a slab allocation/free function pointer is NULL, the default backend
+ * (vm_kmem on the kernel map) is used for the allocation/free action.
*/
void kmem_cache_init(struct kmem_cache *cache, const char *name,
size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
diff --git a/vm/vm_map.c b/vm/vm_map.c
index ae3ce21..4977bff 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -182,10 +182,12 @@ void vm_map_init(void)
NULL, NULL, NULL, 0);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
+#if 0
kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
| KMEM_CACHE_NORECLAIM);
+#endif
kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);