summaryrefslogtreecommitdiff
path: root/device
diff options
context:
space:
mode:
authorRichard Braun <rbraun@sceen.net>2014-12-10 21:52:40 +0100
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-07-26 12:14:40 +0200
commit130fd1a913a98e2a0a3351103651c1193b9b5a07 (patch)
treeded2bbccdd52dfa536f4132bd3cabe4a11d8593a /device
parent79ab3cb110fe49b5a8fb4fdbbb15285287a13cf8 (diff)
kern/slab: directmap update
The main impact of the direct physical mapping on the kmem module is the slab size computation. The page allocator requires the allocation size to be a power-of-two above the page size since it uses the buddy memory allocation algorithm. Custom slab allocation functions are no longer needed since the only user was the kentry area, which has been removed recently. The KMEM_CACHE_NOCPUPOOL flag is also no longer needed since CPU pools, which are allocated from a kmem cache, can now always be allocated out of the direct physical mapping.
Diffstat (limited to 'device')
-rw-r--r--device/dev_lookup.c6
-rw-r--r--device/dev_pager.c12
-rw-r--r--device/ds_routines.c12
-rw-r--r--device/net_io.c12
4 files changed, 28 insertions, 14 deletions
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index 297dcde..f6fc0d4 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -365,6 +365,8 @@ dev_lookup_init(void)
for (i = 0; i < NDEVHASH; i++)
queue_init(&dev_number_hash_table[i]);
- kmem_cache_init(&dev_hdr_cache, "mach_device",
- sizeof(struct mach_device), 0, NULL, NULL, NULL, 0);
+ kmem_cache_init (&dev_hdr_cache,
+ "mach_device",
+ sizeof(struct mach_device), 0,
+ NULL, 0);
}
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 815473a..0ffcdf5 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -172,8 +172,10 @@ void dev_pager_hash_init(void)
vm_size_t size;
size = sizeof(struct dev_pager_entry);
- kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
- NULL, NULL, NULL, 0);
+ kmem_cache_init (&dev_pager_hash_cache,
+ "dev_pager_entry",
+ size, 0,
+ NULL, 0);
for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
queue_init(&dev_pager_hashtable[i]);
simple_lock_init(&dev_pager_hash_lock);
@@ -704,8 +706,10 @@ void device_pager_init(void)
* Initialize cache of paging structures.
*/
size = sizeof(struct dev_pager);
- kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
- NULL, NULL, NULL, 0);
+ kmem_cache_init (&dev_pager_cache,
+ "dev_pager",
+ size, 0,
+ NULL, 0);
/*
* Initialize the name port hashing stuff.
diff --git a/device/ds_routines.c b/device/ds_routines.c
index 33cfd89..e3502f7 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -1553,8 +1553,10 @@ void mach_device_init(void)
*/
device_io_map->wait_for_space = TRUE;
- kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
- sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0);
+ kmem_cache_init (&io_inband_cache,
+ "io_buf_ptr_inband",
+ sizeof(io_buf_ptr_inband_t), 0,
+ NULL, 0);
mach_device_trap_init();
}
@@ -1597,8 +1599,10 @@ struct kmem_cache io_trap_cache;
static void
mach_device_trap_init(void)
{
- kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
- NULL, NULL, NULL, 0);
+ kmem_cache_init (&io_trap_cache,
+ "io_req",
+ IOTRAP_REQSIZE, 0,
+ NULL, 0);
}
/*
diff --git a/device/net_io.c b/device/net_io.c
index 12a1e9c..67d849a 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -1494,12 +1494,16 @@ net_io_init(void)
vm_size_t size;
size = sizeof(struct net_rcv_port);
- kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
- NULL, NULL, NULL, 0);
+ kmem_cache_init (&net_rcv_cache,
+ "net_rcv_port",
+ size, 0,
+ NULL, 0);
size = sizeof(struct net_hash_entry);
- kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
- NULL, NULL, NULL, 0);
+ kmem_cache_init (&net_hash_entry_cache,
+ "net_hash_entry",
+ size, 0,
+ NULL, 0);
size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
net_kmsg_size = round_page(size);