diff options
Diffstat (limited to 'ipc')
-rw-r--r-- | ipc/ipc_entry.c | 5 | ||||
-rw-r--r-- | ipc/ipc_entry.h | 5 | ||||
-rw-r--r-- | ipc/ipc_hash.c | 184 | ||||
-rw-r--r-- | ipc/ipc_right.c | 2 | ||||
-rw-r--r-- | ipc/ipc_space.c | 3 | ||||
-rw-r--r-- | ipc/ipc_space.h | 63 |
6 files changed, 94 insertions, 168 deletions
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c index e78f74e..5b9fd98 100644 --- a/ipc/ipc_entry.c +++ b/ipc/ipc_entry.c @@ -618,6 +618,7 @@ ipc_entry_grow_table(ipc_space_t space) is_write_unlock(space); thread_wakeup((event_t) space); it_entries_free(its, table); + ipc_reverse_remove_all(space); is_write_lock(space); return KERN_SUCCESS; } @@ -641,9 +642,6 @@ ipc_entry_grow_table(ipc_space_t space) memcpy(table, otable, osize * sizeof(struct ipc_entry)); - for (i = 0; i < osize; i++) - table[i].ie_index = 0; - (void) memset((void *) (table + osize), 0, (size - osize) * sizeof(struct ipc_entry)); @@ -651,6 +649,7 @@ ipc_entry_grow_table(ipc_space_t space) * Put old entries into the reverse hash table. */ + ipc_reverse_remove_all(space); for (i = 0; i < osize; i++) { ipc_entry_t entry = &table[i]; diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h index cb6d3f9..0caa70b 100644 --- a/ipc/ipc_entry.h +++ b/ipc/ipc_entry.h @@ -54,11 +54,6 @@ * those entries. The cutoff point between the table and the tree * is adjusted dynamically to minimize memory consumption. * - * The ie_index field of entries in the table implements - * a ordered hash table with open addressing and linear probing. - * This hash table converts (space, object) -> name. - * It is used independently of the other fields. - * * Free (unallocated) entries in the table have null ie_object * fields. The ie_bits field is zero except for IE_BITS_GEN. * The ie_next (ie_request) field links free entries into a free list. diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c index c2c6d6e..87952a7 100644 --- a/ipc/ipc_hash.c +++ b/ipc/ipc_hash.c @@ -296,37 +296,19 @@ ipc_hash_global_delete( } /* - * Each space has a local reverse hash table, which holds - * entries from the space's table. In fact, the hash table - * just uses a field (ie_index) in the table itself. - * - * The local hash table is an open-addressing hash table, - * which means that when a collision occurs, instead of - * throwing the entry into a bucket, the entry is rehashed - * to another position in the table. In this case the rehash - * is very simple: linear probing (ie, just increment the position). - * This simple rehash makes deletions tractable (they're still a pain), - * but it means that collisions tend to build up into clumps. - * - * Because at least one entry in the table (index 0) is always unused, - * there will always be room in the reverse hash table. If a table - * with n slots gets completely full, the reverse hash table will - * have one giant clump of n-1 slots and one free slot somewhere. - * Because entries are only entered into the reverse table if they - * are pure send rights (not receive, send-once, port-set, - * or dead-name rights), and free entries of course aren't entered, - * I expect the reverse hash table won't get unreasonably full. - * - * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2, - * pp. 135-142.) may be desirable here. They can dramatically help - * unsuccessful lookups. But unsuccessful lookups are almost always - * followed by insertions, and those slow down somewhat. They - * also can help deletions somewhat. Successful lookups aren't affected. - * So possibly a small win; probably nothing significant. + * Each space has a local reverse mapping from objects to entries + * from the space's table. This used to be a hash table. */ -#define IH_LOCAL_HASH(obj, size) \ - ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) & (size - 1)) +#define IPC_LOCAL_HASH_INVARIANT(S, O, N, E) \ + MACRO_BEGIN \ + assert(IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND || \ + IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND_RECEIVE || \ + IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_NONE); \ + assert((E)->ie_object == (O)); \ + assert((E)->ie_index == (N)); \ + assert(&(S)->is_table[N] == (E)); \ + MACRO_END /* * Routine: ipc_hash_local_lookup @@ -345,37 +327,15 @@ ipc_hash_local_lookup( mach_port_t *namep, ipc_entry_t *entryp) { - ipc_entry_t table; - ipc_entry_num_t size; - mach_port_index_t hindex, index; - assert(space != IS_NULL); assert(obj != IO_NULL); - table = space->is_table; - size = space->is_table_size; - hindex = IH_LOCAL_HASH(obj, size); - - /* - * Ideally, table[hindex].ie_index is the name we want. - * However, must check ie_object to verify this, - * because collisions can happen. In case of a collision, - * search farther along in the clump. - */ - - while ((index = table[hindex].ie_index) != 0) { - ipc_entry_t entry = &table[index]; - - if (entry->ie_object == obj) { - *namep = MACH_PORT_MAKEB(index, entry->ie_bits); - *entryp = entry; - return TRUE; - } - - if (++hindex == size) - hindex = 0; + *entryp = ipc_reverse_lookup(space, obj); + if (*entryp != IE_NULL) { + *namep = (*entryp)->ie_index; + IPC_LOCAL_HASH_INVARIANT(space, obj, *namep, *entryp); + return TRUE; } - return FALSE; } @@ -394,33 +354,15 @@ ipc_hash_local_insert( mach_port_index_t index, ipc_entry_t entry) { - ipc_entry_t table; - ipc_entry_num_t size; - mach_port_index_t hindex; - + kern_return_t kr; assert(index != 0); assert(space != IS_NULL); assert(obj != IO_NULL); - table = space->is_table; - size = space->is_table_size; - hindex = IH_LOCAL_HASH(obj, size); - - assert(entry == &table[index]); - assert(entry->ie_object == obj); - - /* - * We want to insert at hindex, but there may be collisions. - * If a collision occurs, search for the end of the clump - * and insert there. - */ - - while (table[hindex].ie_index != 0) { - if (++hindex == size) - hindex = 0; - } - - table[hindex].ie_index = index; + entry->ie_index = index; + IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry); + kr = ipc_reverse_insert(space, obj, entry); + assert(kr == 0); } /* @@ -438,90 +380,14 @@ ipc_hash_local_delete( mach_port_index_t index, ipc_entry_t entry) { - ipc_entry_t table; - ipc_entry_num_t size; - mach_port_index_t hindex, dindex; - + ipc_entry_t removed; assert(index != MACH_PORT_NULL); assert(space != IS_NULL); assert(obj != IO_NULL); - table = space->is_table; - size = space->is_table_size; - hindex = IH_LOCAL_HASH(obj, size); - - assert(entry == &table[index]); - assert(entry->ie_object == obj); - - /* - * First check we have the right hindex for this index. - * In case of collision, we have to search farther - * along in this clump. - */ - - while (table[hindex].ie_index != index) { - if (table[hindex].ie_index == 0) - { - static int gak = 0; - if (gak == 0) - { - printf("gak! entry wasn't in hash table!\n"); - gak = 1; - } - return; - } - if (++hindex == size) - hindex = 0; - } - - /* - * Now we want to set table[hindex].ie_index = 0. - * But if we aren't the last index in a clump, - * this might cause problems for lookups of objects - * farther along in the clump that are displaced - * due to collisions. Searches for them would fail - * at hindex instead of succeeding. - * - * So we must check the clump after hindex for objects - * that are so displaced, and move one up to the new hole. - * - * hindex - index of new hole in the clump - * dindex - index we are checking for a displaced object - * - * When we move a displaced object up into the hole, - * it creates a new hole, and we have to repeat the process - * until we get to the end of the clump. - */ - - for (dindex = hindex; index != 0; hindex = dindex) { - for (;;) { - mach_port_index_t tindex; - ipc_object_t tobj; - - if (++dindex == size) - dindex = 0; - assert(dindex != hindex); - - /* are we at the end of the clump? */ - - index = table[dindex].ie_index; - if (index == 0) - break; - - /* is this a displaced object? */ - - tobj = table[index].ie_object; - assert(tobj != IO_NULL); - tindex = IH_LOCAL_HASH(tobj, size); - - if ((dindex < hindex) ? - ((dindex < tindex) && (tindex <= hindex)) : - ((dindex < tindex) || (tindex <= hindex))) - break; - } - - table[hindex].ie_index = index; - } + IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry); + removed = ipc_reverse_remove(space, obj); + assert(removed == entry); } /* diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c index 503eb1f..35061c9 100644 --- a/ipc/ipc_right.c +++ b/ipc/ipc_right.c @@ -423,7 +423,7 @@ ipc_right_check( * Purpose: * Cleans up an entry in a dead space. * The entry isn't deallocated or removed - * from reverse hash tables. + * from the reverse mappings. * Conditions: * The space is dead and unlocked. */ diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c index ab55e83..dcc96b3 100644 --- a/ipc/ipc_space.c +++ b/ipc/ipc_space.c @@ -148,6 +148,7 @@ ipc_space_create( space->is_tree_total = 0; space->is_tree_small = 0; space->is_tree_hash = 0; + rdxtree_init(&space->is_reverse_map); *spacep = space; return KERN_SUCCESS; @@ -271,6 +272,8 @@ ipc_space_destroy( } ipc_splay_traverse_finish(&space->is_tree); + rdxtree_remove_all(&space->is_reverse_map); + /* * Because the space is now dead, * we must release the "active" reference for it. diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h index 3bd2f4d..f41c64c 100644 --- a/ipc/ipc_space.h +++ b/ipc/ipc_space.h @@ -42,8 +42,10 @@ #include <mach/boolean.h> #include <mach/kern_return.h> #include <mach/mach_types.h> +#include <machine/vm_param.h> #include <kern/macros.h> #include <kern/lock.h> +#include <kern/rdxtree.h> #include <kern/slab.h> #include <ipc/ipc_splay.h> #include <ipc/ipc_types.h> @@ -79,6 +81,8 @@ struct ipc_space { ipc_entry_num_t is_tree_total; /* number of entries in the tree */ ipc_entry_num_t is_tree_small; /* # of small entries in the tree */ ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */ + struct rdxtree is_reverse_map; /* maps objects to entries */ + }; #define IS_NULL ((ipc_space_t) 0) @@ -135,4 +139,63 @@ kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *); kern_return_t ipc_space_create_special(struct ipc_space **); void ipc_space_destroy(struct ipc_space *); +/* Reverse lookups. */ + +/* Cast a pointer to a suitable key. */ +#define KEY(X) \ + ({ \ + assert((((unsigned long) (X)) & 0x07) == 0); \ + ((unsigned long long) \ + (((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \ + }) + +/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must + be write-locked. */ +static inline kern_return_t +ipc_reverse_insert(ipc_space_t space, + ipc_object_t obj, + ipc_entry_t entry) +{ + assert(space != IS_NULL); + assert(obj != IO_NULL); + return (kern_return_t) rdxtree_insert(&space->is_reverse_map, + KEY(obj), entry); +} + +/* Remove OBJ from the reverse mapping. SPACE must be + write-locked. */ +static inline ipc_entry_t +ipc_reverse_remove(ipc_space_t space, + ipc_object_t obj) +{ + assert(space != IS_NULL); + assert(obj != IO_NULL); + return rdxtree_remove(&space->is_reverse_map, KEY(obj)); +} + +/* Remove all entries from the reverse mapping. SPACE must be + write-locked. */ +static inline void +ipc_reverse_remove_all(ipc_space_t space) +{ + assert(space != IS_NULL); + rdxtree_remove_all(&space->is_reverse_map); + assert(space->is_reverse_map.height == 0); + assert(space->is_reverse_map.root == NULL); +} + +/* Return ENTRY related to OBJ, or NULL if no such entry is found in + the reverse mapping. SPACE must be read-locked or + write-locked. */ +static inline ipc_entry_t +ipc_reverse_lookup(ipc_space_t space, + ipc_object_t obj) +{ + assert(space != IS_NULL); + assert(obj != IO_NULL); + return rdxtree_lookup(&space->is_reverse_map, KEY(obj)); +} + +#undef KEY + #endif /* _IPC_IPC_SPACE_H_ */ |