diff options
Diffstat (limited to 'debian/patches/0008-xxx-use-a-rbtree-for-reverse-lookups.patch')
-rw-r--r-- | debian/patches/0008-xxx-use-a-rbtree-for-reverse-lookups.patch | 188 |
1 files changed, 188 insertions, 0 deletions
diff --git a/debian/patches/0008-xxx-use-a-rbtree-for-reverse-lookups.patch b/debian/patches/0008-xxx-use-a-rbtree-for-reverse-lookups.patch new file mode 100644 index 0000000..be1b5b6 --- /dev/null +++ b/debian/patches/0008-xxx-use-a-rbtree-for-reverse-lookups.patch @@ -0,0 +1,188 @@ +From 35c5b585b1d1304a75b84ec7101e488cec95e7d1 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Sat, 11 Apr 2015 03:11:33 +0200 +Subject: [PATCH gnumach 8/8] xxx use a rbtree for reverse lookups + +--- + ipc/ipc_init.c | 3 +++ + ipc/ipc_space.c | 7 +++-- + ipc/ipc_space.h | 82 +++++++++++++++++++++++++++++++++++++++++---------------- + 3 files changed, 67 insertions(+), 25 deletions(-) + +diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c +index 2c58a6e..34b7d78 100644 +--- a/ipc/ipc_init.c ++++ b/ipc/ipc_init.c +@@ -75,6 +75,9 @@ ipc_bootstrap(void) + kmem_cache_init(&ipc_space_cache, "ipc_space", + sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0); + ++ kmem_cache_init(&ipc_rmn_cache, "ipc_rmn_cache", ++ sizeof(struct ipc_rmn), 0, NULL, NULL, NULL, 0); ++ + kmem_cache_init(&ipc_entry_cache, "ipc_entry", + sizeof(struct ipc_entry), 0, NULL, NULL, NULL, 0); + +diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c +index ea3cb3b..566cdba 100644 +--- a/ipc/ipc_space.c ++++ b/ipc/ipc_space.c +@@ -54,6 +54,7 @@ + + + struct kmem_cache ipc_space_cache; ++struct kmem_cache ipc_rmn_cache; + ipc_space_t ipc_space_kernel; + ipc_space_t ipc_space_reply; + +@@ -115,7 +116,7 @@ ipc_space_create( + space->is_active = TRUE; + + rdxtree_init(&space->is_map); +- rdxtree_init(&space->is_reverse_map); ++ rbtree_init(&space->is_reverse_map); + /* The zeroth entry is reserved. */ + rdxtree_insert(&space->is_map, 0, &zero_entry); + space->is_size = 1; +@@ -204,7 +205,9 @@ ipc_space_destroy( + ie_free(entry); + } + rdxtree_remove_all(&space->is_map); +- rdxtree_remove_all(&space->is_reverse_map); ++ struct rbtree_node *node, *tmp; ++ rbtree_for_each_remove(&space->is_reverse_map, node, tmp) ++ ipc_rmn_free(node); + + /* + * Because the space is now dead, +diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h +index fdbb4f2..60d8d60 100644 +--- a/ipc/ipc_space.h ++++ b/ipc/ipc_space.h +@@ -45,6 +45,7 @@ + #include <machine/vm_param.h> + #include <kern/macro_help.h> + #include <kern/lock.h> ++#include <kern/rbtree.h> + #include <kern/rdxtree.h> + #include <kern/slab.h> + #include <ipc/ipc_entry.h> +@@ -75,7 +76,7 @@ struct ipc_space { + boolean_t is_active; /* is the space alive? */ + struct rdxtree is_map; /* a map of entries */ + size_t is_size; /* number of entries */ +- struct rdxtree is_reverse_map; /* maps objects to entries */ ++ struct rbtree is_reverse_map; /* maps objects to entries */ + ipc_entry_t is_free_list; /* a linked list of free entries */ + size_t is_free_list_size; /* number of free entries */ + #define IS_FREE_LIST_SIZE_LIMIT 64 /* maximum number of entries +@@ -259,13 +260,36 @@ ipc_entry_dealloc( + + /* Reverse lookups. */ + +-/* Cast a pointer to a suitable key. */ +-#define KEY(X) \ +- ({ \ +- assert((((unsigned long) (X)) & 0x07) == 0); \ +- ((unsigned long long) \ +- (((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \ +- }) ++struct ipc_rmn { ++ struct rbtree_node node; ++ ipc_object_t object; ++ ipc_entry_t entry; ++}; ++ ++static inline int ++ipc_rmn_cmp_lookup(ipc_object_t obj, struct rbtree_node *node) ++{ ++ struct ipc_rmn *n = ++ structof(node, struct ipc_rmn, node); ++ return (unsigned long) obj - (unsigned long) n->object; ++} ++ ++static inline int ++ipc_rmn_cmp_insert(struct rbtree_node *node0, struct rbtree_node *node1) ++{ ++ struct ipc_rmn *n = ++ structof(node0, struct ipc_rmn, node); ++ struct ipc_rmn *m = ++ structof(node1, struct ipc_rmn, node); ++ return (unsigned long) n->object - (unsigned long) m->object; ++} ++ ++extern struct kmem_cache ipc_rmn_cache; ++ ++#define ipc_rmn_alloc() \ ++ ((struct ipc_rmn *) kmem_cache_alloc(&ipc_rmn_cache)) ++#define ipc_rmn_free(rmn) \ ++ kmem_cache_free(&ipc_rmn_cache, (vm_offset_t) rmn) + + /* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must + be write-locked. */ +@@ -276,8 +300,16 @@ ipc_reverse_insert(ipc_space_t space, + { + assert(space != IS_NULL); + assert(obj != IO_NULL); +- return (kern_return_t) rdxtree_insert(&space->is_reverse_map, +- KEY(obj), entry); ++ ++ struct ipc_rmn *node = ipc_rmn_alloc(); ++ if (node == NULL) ++ return KERN_RESOURCE_SHORTAGE; ++ ++ node->object = obj; ++ node->entry = entry; ++ rbtree_insert(&space->is_reverse_map, &node->node, ++ ipc_rmn_cmp_insert); ++ return KERN_SUCCESS; + } + + /* Remove OBJ from the reverse mapping. SPACE must be +@@ -288,18 +320,18 @@ ipc_reverse_remove(ipc_space_t space, + { + assert(space != IS_NULL); + assert(obj != IO_NULL); +- return rdxtree_remove(&space->is_reverse_map, KEY(obj)); +-} +- +-/* Remove all entries from the reverse mapping. SPACE must be +- write-locked. */ +-static inline void +-ipc_reverse_remove_all(ipc_space_t space) +-{ +- assert(space != IS_NULL); +- rdxtree_remove_all(&space->is_reverse_map); +- assert(space->is_reverse_map.height == 0); +- assert(space->is_reverse_map.root == NULL); ++ struct ipc_rmn *node; ++ ipc_entry_t entry; ++ node = structof(rbtree_lookup(&space->is_reverse_map, obj, ++ ipc_rmn_cmp_lookup), ++ struct ipc_rmn, node); ++ if (node == NULL) ++ return NULL; ++ ++ entry = node->entry; ++ rbtree_remove(&space->is_reverse_map, &node->node); ++ ipc_rmn_free(node); ++ return entry; + } + + /* Return ENTRY related to OBJ, or NULL if no such entry is found in +@@ -311,7 +343,11 @@ ipc_reverse_lookup(ipc_space_t space, + { + assert(space != IS_NULL); + assert(obj != IO_NULL); +- return rdxtree_lookup(&space->is_reverse_map, KEY(obj)); ++ struct ipc_rmn *node; ++ node = structof(rbtree_lookup(&space->is_reverse_map, obj, ++ ipc_rmn_cmp_lookup), ++ struct ipc_rmn, node); ++ return node? node->entry: NULL; + } + + #undef KEY +-- +2.1.4 + |