summaryrefslogtreecommitdiff
path: root/debian/patches/0006-xxx-drop-cleanup-unused-code.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/0006-xxx-drop-cleanup-unused-code.patch')
-rw-r--r--debian/patches/0006-xxx-drop-cleanup-unused-code.patch2982
1 files changed, 2982 insertions, 0 deletions
diff --git a/debian/patches/0006-xxx-drop-cleanup-unused-code.patch b/debian/patches/0006-xxx-drop-cleanup-unused-code.patch
new file mode 100644
index 0000000..415335a
--- /dev/null
+++ b/debian/patches/0006-xxx-drop-cleanup-unused-code.patch
@@ -0,0 +1,2982 @@
+From fd1420b2bdb1524199454f28ff2585afeaf52ebd Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 25 Mar 2015 23:42:29 +0100
+Subject: [PATCH gnumach 6/8] xxx: drop/cleanup unused code
+
+Conflicts:
+ ipc/ipc_entry.c
+ ipc/ipc_entry.h
+---
+ Makefrag.am | 4 -
+ ddb/db_print.c | 20 +-
+ include/mach_debug/ipc_info.h | 3 -
+ include/mach_debug/mach_debug_types.defs | 2 +-
+ ipc/ipc_entry.c | 349 +-----------
+ ipc/ipc_entry.h | 38 +-
+ ipc/ipc_hash.c | 475 ----------------
+ ipc/ipc_hash.h | 96 ----
+ ipc/ipc_init.c | 5 -
+ ipc/ipc_kmsg.c | 8 +-
+ ipc/ipc_object.c | 1 -
+ ipc/ipc_right.c | 39 +-
+ ipc/ipc_space.c | 70 +--
+ ipc/ipc_space.h | 10 +-
+ ipc/ipc_splay.c | 920 -------------------------------
+ ipc/ipc_splay.h | 114 ----
+ ipc/mach_debug.c | 203 +------
+ ipc/mach_port.c | 60 +-
+ 18 files changed, 63 insertions(+), 2354 deletions(-)
+ delete mode 100644 ipc/ipc_hash.c
+ delete mode 100644 ipc/ipc_hash.h
+ delete mode 100644 ipc/ipc_splay.c
+ delete mode 100644 ipc/ipc_splay.h
+
+diff --git a/Makefrag.am b/Makefrag.am
+index 15f422f..1b09fc3 100644
+--- a/Makefrag.am
++++ b/Makefrag.am
+@@ -80,8 +80,6 @@ endif
+ libkernel_a_SOURCES += \
+ ipc/ipc_entry.c \
+ ipc/ipc_entry.h \
+- ipc/ipc_hash.c \
+- ipc/ipc_hash.h \
+ ipc/ipc_init.c \
+ ipc/ipc_init.h \
+ ipc/ipc_kmsg.c \
+@@ -105,8 +103,6 @@ libkernel_a_SOURCES += \
+ ipc/ipc_right.h \
+ ipc/ipc_space.c \
+ ipc/ipc_space.h \
+- ipc/ipc_splay.c \
+- ipc/ipc_splay.h \
+ ipc/ipc_table.c \
+ ipc/ipc_table.h \
+ ipc/ipc_target.c \
+diff --git a/ddb/db_print.c b/ddb/db_print.c
+index 24a3e33..fb4efaa 100644
+--- a/ddb/db_print.c
++++ b/ddb/db_print.c
+@@ -439,17 +439,11 @@ db_port_iterate(thread, func)
+ void (*func)();
+ {
+ ipc_entry_t entry;
+- int index;
+ int n = 0;
+- int size;
+- ipc_space_t space;
+-
+- space = thread->task->itk_space;
+- entry = space->is_table;
+- size = space->is_table_size;
+- for (index = 0; index < size; index++, entry++) {
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&thread->task->itk_space->is_map, &iter, entry) {
+ if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+- (*func)(index, (ipc_port_t) entry->ie_object,
++ (*func)(entry->ie_name, (ipc_port_t) entry->ie_object,
+ entry->ie_bits, n++);
+ }
+ return(n);
+@@ -460,16 +454,14 @@ db_lookup_port(
+ thread_t thread,
+ int id)
+ {
+- ipc_space_t space;
+ ipc_entry_t entry;
+
+ if (thread == THREAD_NULL)
+ return(0);
+- space = thread->task->itk_space;
+- if (id < 0 || id >= space->is_table_size)
++ if (id < 0)
+ return(0);
+- entry = &space->is_table[id];
+- if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
++ entry = ipc_entry_lookup(thread->task->itk_space, (mach_port_t) id);
++ if (entry && entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ return((ipc_port_t)entry->ie_object);
+ return(0);
+ }
+diff --git a/include/mach_debug/ipc_info.h b/include/mach_debug/ipc_info.h
+index ef0b0c6..8c5bc5a 100644
+--- a/include/mach_debug/ipc_info.h
++++ b/include/mach_debug/ipc_info.h
+@@ -56,14 +56,11 @@ typedef struct ipc_info_space {
+
+ typedef struct ipc_info_name {
+ mach_port_t iin_name; /* port name, including gen number */
+-/*boolean_t*/integer_t iin_collision; /* collision at this entry? */
+-/*boolean_t*/integer_t iin_compat; /* is this a compat-mode entry? */
+ /*boolean_t*/integer_t iin_marequest; /* extant msg-accepted request? */
+ mach_port_type_t iin_type; /* straight port type */
+ mach_port_urefs_t iin_urefs; /* user-references */
+ vm_offset_t iin_object; /* object pointer */
+ natural_t iin_next; /* marequest/next in free list */
+- natural_t iin_hash; /* hash index */
+ } ipc_info_name_t;
+
+ typedef ipc_info_name_t *ipc_info_name_array_t;
+diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
+index d24b6f9..e8399d6 100644
+--- a/include/mach_debug/mach_debug_types.defs
++++ b/include/mach_debug/mach_debug_types.defs
+@@ -40,7 +40,7 @@ type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
+
+ type ipc_info_space_t = struct[6] of natural_t;
+
+-type ipc_info_name_t = struct[9] of natural_t;
++type ipc_info_name_t = struct[6] of natural_t;
+ type ipc_info_name_array_t = array[] of ipc_info_name_t;
+
+ type ipc_info_tree_name_t = struct[11] of natural_t;
+diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
+index 58d453b..12c95fd 100644
+--- a/ipc/ipc_entry.c
++++ b/ipc/ipc_entry.c
+@@ -46,49 +46,11 @@
+ #include <ipc/ipc_types.h>
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_space.h>
+-#include <ipc/ipc_splay.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_object.h>
+
+ struct kmem_cache ipc_entry_cache;
+
+-struct kmem_cache ipc_tree_entry_cache;
+-
+-/*
+- * Routine: ipc_entry_tree_collision
+- * Purpose:
+- * Checks if "name" collides with an allocated name
+- * in the space's tree. That is, returns TRUE
+- * if the splay tree contains a name with the same
+- * index as "name".
+- * Conditions:
+- * The space is locked (read or write) and active.
+- */
+-
+-boolean_t
+-ipc_entry_tree_collision(
+- ipc_space_t space,
+- mach_port_t name)
+-{
+- assert(!"reached");
+- mach_port_index_t index;
+- mach_port_t lower, upper;
+-
+- assert(space->is_active);
+-
+- /*
+- * Check if we collide with the next smaller name
+- * or the next larger name.
+- */
+-
+- ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
+-
+- index = MACH_PORT_INDEX(name);
+- return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
+- ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
+-}
+-
+ /*
+ * Routine: ipc_entry_lookup
+ * Purpose:
+@@ -259,9 +221,6 @@ ipc_entry_alloc_name(
+ ipc_entry_t *entryp)
+ {
+ kern_return_t kr;
+- mach_port_index_t index = MACH_PORT_INDEX(name);
+- mach_port_gen_t gen = MACH_PORT_GEN(name);
+-
+ ipc_entry_t entry, e, *prevp;
+ void **slot;
+ assert(MACH_PORT_VALID(name));
+@@ -322,7 +281,7 @@ ipc_entry_alloc_name(
+ *prevp = entry->ie_next_free;
+ space->is_free_list_size -= 1;
+
+- entry->ie_bits = gen;
++ entry->ie_bits = 0;
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_name == name);
+ entry->ie_request = 0;
+@@ -364,312 +323,6 @@ ipc_entry_dealloc(
+ space->is_size -= 1;
+ }
+
+-/*
+- * Routine: ipc_entry_grow_table
+- * Purpose:
+- * Grows the table in a space.
+- * Conditions:
+- * The space must be write-locked and active before.
+- * If successful, it is also returned locked.
+- * Allocates memory.
+- * Returns:
+- * KERN_SUCCESS Grew the table.
+- * KERN_SUCCESS Somebody else grew the table.
+- * KERN_SUCCESS The space died.
+- * KERN_NO_SPACE Table has maximum size already.
+- * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
+- */
+-
+-kern_return_t
+-ipc_entry_grow_table(ipc_space_t space)
+-{
+- assert(!"reached");
+- ipc_entry_num_t osize, size, nsize;
+-
+- do {
+- ipc_entry_t otable, table;
+- ipc_table_size_t oits, its, nits;
+- mach_port_index_t i, free_index;
+-
+- assert(space->is_active);
+-
+- if (space->is_growing) {
+- /*
+- * Somebody else is growing the table.
+- * We just wait for them to finish.
+- */
+-
+- assert_wait((event_t) space, FALSE);
+- is_write_unlock(space);
+- thread_block((void (*)()) 0);
+- is_write_lock(space);
+- return KERN_SUCCESS;
+- }
+-
+- otable = space->is_table;
+- its = space->is_table_next;
+- size = its->its_size;
+- oits = its - 1;
+- osize = oits->its_size;
+- nits = its + 1;
+- nsize = nits->its_size;
+-
+- if (osize == size) {
+- is_write_unlock(space);
+- printf_once("no more room for ipc_entry_grow_table in space %p\n", space);
+- return KERN_NO_SPACE;
+- }
+-
+- assert((osize < size) && (size <= nsize));
+-
+- /*
+- * OK, we'll attempt to grow the table.
+- * The realloc requires that the old table
+- * remain in existence.
+- */
+-
+- space->is_growing = TRUE;
+- is_write_unlock(space);
+- if (it_entries_reallocable(oits))
+- table = it_entries_realloc(oits, otable, its);
+- else
+- table = it_entries_alloc(its);
+- is_write_lock(space);
+- space->is_growing = FALSE;
+-
+- /*
+- * We need to do a wakeup on the space,
+- * to rouse waiting threads. We defer
+- * this until the space is unlocked,
+- * because we don't want them to spin.
+- */
+-
+- if (table == IE_NULL) {
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- if (!space->is_active) {
+- /*
+- * The space died while it was unlocked.
+- */
+-
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- it_entries_free(its, table);
+- ipc_reverse_remove_all(space);
+- is_write_lock(space);
+- return KERN_SUCCESS;
+- }
+-
+- assert(space->is_table == otable);
+- assert(space->is_table_next == its);
+- assert(space->is_table_size == osize);
+-
+- space->is_table = table;
+- space->is_table_size = size;
+- space->is_table_next = nits;
+-
+- /*
+- * If we did a realloc, it remapped the data.
+- * Otherwise we copy by hand first. Then we have
+- * to clear the index fields in the old part and
+- * zero the new part.
+- */
+-
+- if (!it_entries_reallocable(oits))
+- memcpy(table, otable,
+- osize * sizeof(struct ipc_entry));
+-
+- (void) memset((void *) (table + osize), 0,
+- (size - osize) * sizeof(struct ipc_entry));
+-
+- /*
+- * Put old entries into the reverse hash table.
+- */
+-
+- ipc_reverse_remove_all(space);
+- for (i = 0; i < osize; i++) {
+- ipc_entry_t entry = &table[i];
+-
+- if (IE_BITS_TYPE(entry->ie_bits) ==
+- MACH_PORT_TYPE_SEND)
+- ipc_hash_local_insert(space, entry->ie_object,
+- i, entry);
+- }
+-
+- /*
+- * If there are entries in the splay tree,
+- * then we have work to do:
+- * 1) transfer entries to the table
+- * 2) update is_tree_small
+- */
+-
+- if (space->is_tree_total > 0) {
+- mach_port_index_t index;
+- boolean_t delete;
+- struct ipc_splay_tree ignore;
+- struct ipc_splay_tree move;
+- struct ipc_splay_tree small;
+- ipc_entry_num_t nosmall;
+- ipc_tree_entry_t tentry;
+-
+- /*
+- * The splay tree divides into four regions,
+- * based on the index of the entries:
+- * 1) 0 <= index < osize
+- * 2) osize <= index < size
+- * 3) size <= index < nsize
+- * 4) nsize <= index
+- *
+- * Entries in the first part are ignored.
+- * Entries in the second part, that don't
+- * collide, are moved into the table.
+- * Entries in the third part, that don't
+- * collide, are counted for is_tree_small.
+- * Entries in the fourth part are ignored.
+- */
+-
+- ipc_splay_tree_split(&space->is_tree,
+- MACH_PORT_MAKE(nsize, 0),
+- &small);
+- ipc_splay_tree_split(&small,
+- MACH_PORT_MAKE(size, 0),
+- &move);
+- ipc_splay_tree_split(&move,
+- MACH_PORT_MAKE(osize, 0),
+- &ignore);
+-
+- /* move entries into the table */
+-
+- for (tentry = ipc_splay_traverse_start(&move);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&move, delete)) {
+- mach_port_t name;
+- mach_port_gen_t gen;
+- mach_port_type_t type;
+- ipc_entry_bits_t bits;
+- ipc_object_t obj;
+- ipc_entry_t entry;
+-
+- name = tentry->ite_name;
+- gen = MACH_PORT_GEN(name);
+- index = MACH_PORT_INDEX(name);
+-
+- assert(tentry->ite_space == space);
+- assert((osize <= index) && (index < size));
+-
+- entry = &table[index];
+-
+- /* collision with previously moved entry? */
+-
+- bits = entry->ie_bits;
+- if (bits != 0) {
+- assert(IE_BITS_TYPE(bits));
+- assert(IE_BITS_GEN(bits) != gen);
+-
+- entry->ie_bits =
+- bits | IE_BITS_COLLISION;
+- delete = FALSE;
+- continue;
+- }
+-
+- bits = tentry->ite_bits;
+- type = IE_BITS_TYPE(bits);
+- assert(type != MACH_PORT_TYPE_NONE);
+-
+- entry->ie_bits = bits | gen;
+- entry->ie_object = obj = tentry->ite_object;
+- entry->ie_request = tentry->ite_request;
+-
+- if (type == MACH_PORT_TYPE_SEND) {
+- ipc_hash_global_delete(space, obj,
+- name, tentry);
+- ipc_hash_local_insert(space, obj,
+- index, entry);
+- }
+-
+- space->is_tree_total--;
+- delete = TRUE;
+- }
+- ipc_splay_traverse_finish(&move);
+-
+- /* count entries for is_tree_small */
+-
+- nosmall = 0; index = 0;
+- for (tentry = ipc_splay_traverse_start(&small);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&small, FALSE)) {
+- mach_port_index_t nindex;
+-
+- nindex = MACH_PORT_INDEX(tentry->ite_name);
+-
+- if (nindex != index) {
+- nosmall++;
+- index = nindex;
+- }
+- }
+- ipc_splay_traverse_finish(&small);
+-
+- assert(nosmall <= (nsize - size));
+- assert(nosmall <= space->is_tree_total);
+- space->is_tree_small = nosmall;
+-
+- /* put the splay tree back together */
+-
+- ipc_splay_tree_join(&space->is_tree, &small);
+- ipc_splay_tree_join(&space->is_tree, &move);
+- ipc_splay_tree_join(&space->is_tree, &ignore);
+- }
+-
+- /*
+- * Add entries in the new part which still aren't used
+- * to the free list. Add them in reverse order,
+- * and set the generation number to -1, so that
+- * early allocations produce "natural" names.
+- */
+-
+- free_index = table[0].ie_next;
+- for (i = size-1; i >= osize; --i) {
+- ipc_entry_t entry = &table[i];
+-
+- if (entry->ie_bits == 0) {
+- entry->ie_bits = IE_BITS_GEN_MASK;
+- entry->ie_next = free_index;
+- free_index = i;
+- }
+- }
+- table[0].ie_next = free_index;
+-
+- /*
+- * Now we need to free the old table.
+- * If the space dies or grows while unlocked,
+- * then we can quit here.
+- */
+-
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- it_entries_free(oits, otable);
+- is_write_lock(space);
+- if (!space->is_active || (space->is_table_next != nits))
+- return KERN_SUCCESS;
+-
+- /*
+- * We might have moved enough entries from
+- * the splay tree into the table that
+- * the table can be profitably grown again.
+- *
+- * Note that if size == nsize, then
+- * space->is_tree_small == 0.
+- */
+- } while ((space->is_tree_small > 0) &&
+- (((nsize - size) * sizeof(struct ipc_entry)) <
+- (space->is_tree_small * sizeof(struct ipc_tree_entry))));
+-
+- return KERN_SUCCESS;
+-}
+-
+
+ #if MACH_KDB
+ #include <ddb/db_output.h>
+diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
+index 09e1223..451dc53 100644
+--- a/ipc/ipc_entry.h
++++ b/ipc/ipc_entry.h
+@@ -63,26 +63,19 @@ typedef unsigned int ipc_entry_bits_t;
+ typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
+
+ typedef struct ipc_entry {
++ mach_port_t ie_name;
+ ipc_entry_bits_t ie_bits;
+ struct ipc_object *ie_object;
+ union {
+ struct ipc_entry *next_free;
+- mach_port_index_t next;
+ /*XXX ipc_port_request_index_t request;*/
+ unsigned int request;
+ } index;
+- union {
+- mach_port_index_t table;
+- struct ipc_tree_entry *tree;
+- } hash;
+ } *ipc_entry_t;
+
+ #define IE_NULL ((ipc_entry_t) 0)
+
+ #define ie_request index.request
+-#define ie_next index.next
+-#define ie_index hash.table
+-#define ie_name hash.table
+ #define ie_next_free index.next_free
+
+ #define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
+@@ -93,12 +86,10 @@ typedef struct ipc_entry {
+
+ #define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
+
+-#define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */
+-
+-#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */
+-#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */
++#define IE_BITS_RIGHT_MASK 0x003fffff /* relevant to the right */
+
+ #if PORT_GENERATIONS
++#error "not supported"
+ #define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
+ #define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
+ #define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
+@@ -109,26 +100,6 @@ typedef struct ipc_entry {
+ #endif
+
+
+-typedef struct ipc_tree_entry {
+- struct ipc_entry ite_entry;
+- mach_port_t ite_name;
+- struct ipc_space *ite_space;
+- struct ipc_tree_entry *ite_lchild;
+- struct ipc_tree_entry *ite_rchild;
+-} *ipc_tree_entry_t;
+-
+-#define ITE_NULL ((ipc_tree_entry_t) 0)
+-
+-#define ite_bits ite_entry.ie_bits
+-#define ite_object ite_entry.ie_object
+-#define ite_request ite_entry.ie_request
+-#define ite_next ite_entry.hash.tree
+-
+-extern struct kmem_cache ipc_tree_entry_cache;
+-
+-#define ite_alloc() ((ipc_tree_entry_t) kmem_cache_alloc(&ipc_tree_entry_cache))
+-#define ite_free(ite) kmem_cache_free(&ipc_tree_entry_cache, (vm_offset_t) (ite))
+-
+ extern struct kmem_cache ipc_entry_cache;
+ #define ie_alloc() ((ipc_entry_t) kmem_cache_alloc(&ipc_entry_cache))
+ #define ie_free(e) kmem_cache_free(&ipc_entry_cache, (vm_offset_t) (e))
+@@ -148,9 +119,6 @@ ipc_entry_alloc_name(ipc_space_t space, mach_port_t name, ipc_entry_t *entryp);
+ extern void
+ ipc_entry_dealloc(ipc_space_t space, mach_port_t name, ipc_entry_t entry);
+
+-extern kern_return_t
+-ipc_entry_grow_table(ipc_space_t space);
+-
+ ipc_entry_t
+ db_ipc_object_by_name(
+ task_t task,
+diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
+deleted file mode 100644
+index 682b854..0000000
+--- a/ipc/ipc_hash.c
++++ /dev/null
+@@ -1,475 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- * File: ipc/ipc_hash.c
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Entry hash table operations.
+- */
+-
+-#include <kern/printf.h>
+-#include <mach/boolean.h>
+-#include <mach/port.h>
+-#include <kern/lock.h>
+-#include <kern/kalloc.h>
+-#include <ipc/port.h>
+-#include <ipc/ipc_space.h>
+-#include <ipc/ipc_object.h>
+-#include <ipc/ipc_entry.h>
+-#include <ipc/ipc_hash.h>
+-#include <ipc/ipc_init.h>
+-#include <ipc/ipc_types.h>
+-
+-#if MACH_IPC_DEBUG
+-#include <mach/kern_return.h>
+-#include <mach_debug/hash_info.h>
+-#include <vm/vm_map.h>
+-#include <vm/vm_kern.h>
+-#include <vm/vm_user.h>
+-#endif
+-
+-
+-
+-/*
+- * Routine: ipc_hash_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_entry_t *entryp)
+-{
+- return ipc_hash_local_lookup(space, obj, namep, entryp);
+-}
+-
+-/*
+- * Routine: ipc_hash_insert
+- * Purpose:
+- * Inserts an entry into the appropriate reverse hash table,
+- * so that ipc_hash_lookup will find it.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_entry_t entry)
+-{
+- mach_port_index_t index;
+-
+- index = MACH_PORT_INDEX(name);
+- ipc_hash_local_insert(space, obj, index, entry);
+-}
+-
+-/*
+- * Routine: ipc_hash_delete
+- * Purpose:
+- * Deletes an entry from the appropriate reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_entry_t entry)
+-{
+- mach_port_index_t index;
+-
+- index = MACH_PORT_INDEX(name);
+- ipc_hash_local_delete(space, obj, index, entry);
+-}
+-
+-/*
+- * The global reverse hash table holds splay tree entries.
+- * It is a simple open-chaining hash table with singly-linked buckets.
+- * Each bucket is locked separately, with an exclusive lock.
+- * Within each bucket, move-to-front is used.
+- */
+-
+-ipc_hash_index_t ipc_hash_global_size;
+-ipc_hash_index_t ipc_hash_global_mask;
+-
+-#define IH_GLOBAL_HASH(space, obj) \
+- (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
+- (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
+- ipc_hash_global_mask)
+-
+-typedef struct ipc_hash_global_bucket {
+- decl_simple_lock_data(, ihgb_lock_data)
+- ipc_tree_entry_t ihgb_head;
+-} *ipc_hash_global_bucket_t;
+-
+-#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
+-
+-#define ihgb_lock_init(ihgb) simple_lock_init(&(ihgb)->ihgb_lock_data)
+-#define ihgb_lock(ihgb) simple_lock(&(ihgb)->ihgb_lock_data)
+-#define ihgb_unlock(ihgb) simple_unlock(&(ihgb)->ihgb_lock_data)
+-
+-ipc_hash_global_bucket_t ipc_hash_global_table;
+-
+-/*
+- * Routine: ipc_hash_global_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Looks in the global table, for splay tree entries.
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_global_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp)
+-{
+- assert(!"reached");
+- ipc_hash_global_bucket_t bucket;
+- ipc_tree_entry_t this, *last;
+-
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- if ((this = bucket->ihgb_head) != ITE_NULL) {
+- if ((this->ite_object == obj) &&
+- (this->ite_space == space)) {
+- /* found it at front; no need to move */
+-
+- *namep = this->ite_name;
+- *entryp = this;
+- } else for (last = &this->ite_next;
+- (this = *last) != ITE_NULL;
+- last = &this->ite_next) {
+- if ((this->ite_object == obj) &&
+- (this->ite_space == space)) {
+- /* found it; move to front */
+-
+- *last = this->ite_next;
+- this->ite_next = bucket->ihgb_head;
+- bucket->ihgb_head = this;
+-
+- *namep = this->ite_name;
+- *entryp = this;
+- break;
+- }
+- }
+- }
+-
+- ihgb_unlock(bucket);
+- return this != ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_hash_global_insert
+- * Purpose:
+- * Inserts an entry into the global reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_global_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- assert(!"reached");
+- ipc_hash_global_bucket_t bucket;
+-
+-
+- assert(entry->ite_name == name);
+- assert(space != IS_NULL);
+- assert(entry->ite_space == space);
+- assert(obj != IO_NULL);
+- assert(entry->ite_object == obj);
+-
+- space->is_tree_hash++;
+- assert(space->is_tree_hash <= space->is_tree_total);
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- /* insert at front of bucket */
+-
+- entry->ite_next = bucket->ihgb_head;
+- bucket->ihgb_head = entry;
+-
+- ihgb_unlock(bucket);
+-}
+-
+-/*
+- * Routine: ipc_hash_global_delete
+- * Purpose:
+- * Deletes an entry from the global reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_global_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- assert(!"reached");
+- ipc_hash_global_bucket_t bucket;
+- ipc_tree_entry_t this, *last;
+-
+- assert(entry->ite_name == name);
+- assert(space != IS_NULL);
+- assert(entry->ite_space == space);
+- assert(obj != IO_NULL);
+- assert(entry->ite_object == obj);
+-
+- assert(space->is_tree_hash > 0);
+- space->is_tree_hash--;
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- for (last = &bucket->ihgb_head;
+- (this = *last) != ITE_NULL;
+- last = &this->ite_next) {
+- if (this == entry) {
+- /* found it; remove from bucket */
+-
+- *last = this->ite_next;
+- break;
+- }
+- }
+- assert(this != ITE_NULL);
+-
+- ihgb_unlock(bucket);
+-}
+-
+-/*
+- * Each space has a local reverse mapping from objects to entries
+- * from the space's table. This used to be a hash table.
+- */
+-
+-#define IPC_LOCAL_HASH_INVARIANT(S, O, N, E) \
+- MACRO_BEGIN \
+- assert(IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND || \
+- IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND_RECEIVE || \
+- IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_NONE); \
+- assert((E)->ie_object == (O)); \
+- assert((E)->ie_index == (N)); \
+- MACRO_END
+-
+-/*
+- * Routine: ipc_hash_local_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Looks in the space's local table, for table entries.
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_local_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_entry_t *entryp)
+-{
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- *entryp = ipc_reverse_lookup(space, obj);
+- if (*entryp != IE_NULL) {
+- *namep = (*entryp)->ie_index;
+- IPC_LOCAL_HASH_INVARIANT(space, obj, *namep, *entryp);
+- return TRUE;
+- }
+- return FALSE;
+-}
+-
+-/*
+- * Routine: ipc_hash_local_insert
+- * Purpose:
+- * Inserts an entry into the space's reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_local_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_index_t index,
+- ipc_entry_t entry)
+-{
+- kern_return_t kr;
+- assert(index != 0);
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- entry->ie_index = index;
+- IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
+- kr = ipc_reverse_insert(space, obj, entry);
+- assert(kr == 0);
+-}
+-
+-/*
+- * Routine: ipc_hash_local_delete
+- * Purpose:
+- * Deletes an entry from the space's reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_local_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_index_t index,
+- ipc_entry_t entry)
+-{
+- ipc_entry_t removed;
+- assert(index != MACH_PORT_NULL);
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
+- removed = ipc_reverse_remove(space, obj);
+- assert(removed == entry);
+-}
+-
+-/*
+- * Routine: ipc_hash_init
+- * Purpose:
+- * Initialize the reverse hash table implementation.
+- */
+-
+-void
+-ipc_hash_init(void)
+-{
+- ipc_hash_index_t i;
+-
+- /* initialize ipc_hash_global_size */
+-
+- ipc_hash_global_size = IPC_HASH_GLOBAL_SIZE;
+-
+- /* make sure it is a power of two */
+-
+- ipc_hash_global_mask = ipc_hash_global_size - 1;
+- if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
+- natural_t bit;
+-
+- /* round up to closest power of two */
+-
+- for (bit = 1;; bit <<= 1) {
+- ipc_hash_global_mask |= bit;
+- ipc_hash_global_size = ipc_hash_global_mask + 1;
+-
+- if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
+- break;
+- }
+- }
+-
+- /* allocate ipc_hash_global_table */
+-
+- ipc_hash_global_table = (ipc_hash_global_bucket_t)
+- kalloc((vm_size_t) (ipc_hash_global_size *
+- sizeof(struct ipc_hash_global_bucket)));
+- assert(ipc_hash_global_table != IHGB_NULL);
+-
+- /* and initialize it */
+-
+- for (i = 0; i < ipc_hash_global_size; i++) {
+- ipc_hash_global_bucket_t bucket;
+-
+- bucket = &ipc_hash_global_table[i];
+- ihgb_lock_init(bucket);
+- bucket->ihgb_head = ITE_NULL;
+- }
+-}
+-
+-#if MACH_IPC_DEBUG
+-
+-/*
+- * Routine: ipc_hash_info
+- * Purpose:
+- * Return information about the global reverse hash table.
+- * Fills the buffer with as much information as possible
+- * and returns the desired size of the buffer.
+- * Conditions:
+- * Nothing locked. The caller should provide
+- * possibly-pageable memory.
+- */
+-
+-
+-ipc_hash_index_t
+-ipc_hash_info(
+- hash_info_bucket_t *info,
+- mach_msg_type_number_t count)
+-{
+- ipc_hash_index_t i;
+-
+- if (ipc_hash_global_size < count)
+- count = ipc_hash_global_size;
+-
+- for (i = 0; i < count; i++) {
+- ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
+- unsigned int bucket_count = 0;
+- ipc_tree_entry_t entry;
+-
+- ihgb_lock(bucket);
+- for (entry = bucket->ihgb_head;
+- entry != ITE_NULL;
+- entry = entry->ite_next)
+- bucket_count++;
+- ihgb_unlock(bucket);
+-
+- /* don't touch pageable memory while holding locks */
+- info[i].hib_count = bucket_count;
+- }
+-
+- return ipc_hash_global_size;
+-}
+-
+-#endif /* MACH_IPC_DEBUG */
+diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
+deleted file mode 100644
+index 929ba77..0000000
+--- a/ipc/ipc_hash.h
++++ /dev/null
+@@ -1,96 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- * File: ipc/ipc_hash.h
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Declarations of entry hash table operations.
+- */
+-
+-#ifndef _IPC_IPC_HASH_H_
+-#define _IPC_IPC_HASH_H_
+-
+-#include <mach/boolean.h>
+-#include <mach/kern_return.h>
+-
+-typedef natural_t ipc_hash_index_t;
+-
+-extern void
+-ipc_hash_init(void);
+-
+-#if MACH_IPC_DEBUG
+-
+-extern ipc_hash_index_t
+-ipc_hash_info(hash_info_bucket_t *, mach_msg_type_number_t);
+-
+-#endif /* MACH_IPC_DEBUG */
+-
+-extern boolean_t
+-ipc_hash_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_entry_t *entryp);
+-
+-extern void
+-ipc_hash_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_entry_t entry);
+-
+-extern void
+-ipc_hash_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_entry_t entry);
+-
+-/*
+- * For use by functions that know what they're doing:
+- * the global primitives, for splay tree entries,
+- * and the local primitives, for table entries.
+- */
+-
+-#define IPC_HASH_GLOBAL_SIZE 256
+-
+-extern boolean_t
+-ipc_hash_global_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_tree_entry_t *entryp);
+-
+-extern void
+-ipc_hash_global_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_tree_entry_t entry);
+-
+-extern void
+-ipc_hash_global_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_tree_entry_t entry);
+-
+-extern boolean_t
+-ipc_hash_local_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_entry_t *entryp);
+-
+-extern void
+-ipc_hash_local_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_index_t index, ipc_entry_t entry);
+-
+-extern void
+-ipc_hash_local_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_index_t index, ipc_entry_t entry);
+-
+-#endif /* _IPC_IPC_HASH_H_ */
+diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
+index 096e0fb..2c58a6e 100644
+--- a/ipc/ipc_init.c
++++ b/ipc/ipc_init.c
+@@ -47,7 +47,6 @@
+ #include <ipc/ipc_marequest.h>
+ #include <ipc/ipc_notify.h>
+ #include <ipc/ipc_kmsg.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_init.h>
+
+
+@@ -76,9 +75,6 @@ ipc_bootstrap(void)
+ kmem_cache_init(&ipc_space_cache, "ipc_space",
+ sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0);
+
+- kmem_cache_init(&ipc_tree_entry_cache, "ipc_tree_entry",
+- sizeof(struct ipc_tree_entry), 0, NULL, NULL, NULL, 0);
+-
+ kmem_cache_init(&ipc_entry_cache, "ipc_entry",
+ sizeof(struct ipc_entry), 0, NULL, NULL, NULL, 0);
+
+@@ -100,7 +96,6 @@ ipc_bootstrap(void)
+
+ ipc_table_init();
+ ipc_notify_init();
+- ipc_hash_init();
+ ipc_marequest_init();
+ }
+
+diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
+index c484a5d..1034c6a 100644
+--- a/ipc/ipc_kmsg.c
++++ b/ipc/ipc_kmsg.c
+@@ -50,7 +50,6 @@
+ #include <vm/vm_user.h>
+ #include <ipc/port.h>
+ #include <ipc/ipc_entry.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_kmsg.h>
+ #include <ipc/ipc_thread.h>
+ #include <ipc/ipc_marequest.h>
+@@ -1772,7 +1771,7 @@ ipc_kmsg_copyout_header(
+ break;
+
+ is_write_lock(space);
+- if (!space->is_active || space->is_table->ie_next == 0) {
++ if (!space->is_active || space->is_free_list == NULL) {
+ is_write_unlock(space);
+ break;
+ }
+@@ -2251,12 +2250,13 @@ ipc_kmsg_copyout_object(
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+- !ipc_hash_local_lookup(space, (ipc_object_t) port,
+- namep, &entry)) {
++ (entry = ipc_reverse_lookup(space,
++ (ipc_object_t) port)) == NULL) {
+ ip_unlock(port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
++ *namep = entry->ie_name;
+
+ /*
+ * Copyout the send right, incrementing urefs
+diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
+index ec40062..2d84cf5 100644
+--- a/ipc/ipc_object.c
++++ b/ipc/ipc_object.c
+@@ -41,7 +41,6 @@
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_object.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_right.h>
+ #include <ipc/ipc_notify.h>
+ #include <ipc/ipc_pset.h>
+diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
+index 503eb1f..27a8e6d 100644
+--- a/ipc/ipc_right.c
++++ b/ipc/ipc_right.c
+@@ -43,7 +43,6 @@
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_object.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_port.h>
+ #include <ipc/ipc_pset.h>
+ #include <ipc/ipc_marequest.h>
+@@ -142,7 +141,8 @@ ipc_right_reverse(
+ return TRUE;
+ }
+
+- if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) {
++ if ((*entryp = ipc_reverse_lookup(space, (ipc_object_t) port))) {
++ *namep = (*entryp)->ie_name;
+ assert((entry = *entryp) != IE_NULL);
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
+ assert(port == (ipc_port_t) entry->ie_object);
+@@ -392,7 +392,7 @@ ipc_right_check(
+ ipc_marequest_cancel(space, name);
+ }
+
+- ipc_hash_delete(space, (ipc_object_t) port, name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+@@ -609,8 +609,7 @@ ipc_right_destroy(
+ }
+
+ if (type == MACH_PORT_TYPE_SEND)
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+
+ ip_lock(port);
+
+@@ -789,8 +788,7 @@ ipc_right_dealloc(
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1134,8 +1132,7 @@ ipc_right_delta(
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1410,8 +1407,8 @@ ipc_right_copyin(
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+- ipc_hash_insert(space, (ipc_object_t) port,
+- name, entry);
++ entry->ie_name = name;
++ ipc_reverse_insert(space, (ipc_object_t)port, entry);
+
+ ip_reference(port);
+ } else {
+@@ -1534,8 +1531,7 @@ ipc_right_copyin(
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1796,8 +1792,7 @@ ipc_right_copyin_two(
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1921,8 +1916,8 @@ ipc_right_copyout(
+
+ /* entry is locked holding ref, so can use port */
+
+- ipc_hash_insert(space, (ipc_object_t) port,
+- name, entry);
++ entry->ie_name = name;
++ ipc_reverse_insert(space, (ipc_object_t)port, entry);
+ }
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+@@ -1956,8 +1951,7 @@ ipc_right_copyout(
+
+ /* entry is locked holding ref, so can use port */
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+@@ -2083,7 +2077,7 @@ ipc_right_rename(
+ ipc_marequest_rename(space, oname, nname);
+ }
+
+- /* initialize nentry before letting ipc_hash_insert see it */
++ /* initialize nentry before letting ipc_reverse_insert see it */
+
+ assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
+ nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
+@@ -2097,8 +2091,9 @@ ipc_right_rename(
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+- ipc_hash_delete(space, (ipc_object_t) port, oname, oentry);
+- ipc_hash_insert(space, (ipc_object_t) port, nname, nentry);
++ ipc_reverse_remove(space, (ipc_object_t)port);
++ nentry->ie_name = nname;
++ ipc_reverse_insert(space, (ipc_object_t)port, nentry);
+ break;
+ }
+
+diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
+index 5f939bb..ea3cb3b 100644
+--- a/ipc/ipc_space.c
++++ b/ipc/ipc_space.c
+@@ -46,8 +46,6 @@
+ #include <kern/slab.h>
+ #include <ipc/port.h>
+ #include <ipc/ipc_entry.h>
+-#include <ipc/ipc_splay.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_port.h>
+ #include <ipc/ipc_space.h>
+@@ -105,52 +103,17 @@ ipc_space_create(
+ ipc_space_t *spacep)
+ {
+ ipc_space_t space;
+- ipc_entry_t table;
+- ipc_entry_num_t new_size;
+- mach_port_index_t index;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+- table = it_entries_alloc(initial);
+- if (table == IE_NULL) {
+- is_free(space);
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- new_size = initial->its_size;
+- memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
+-
+- /*
+- * Initialize the free list in the table.
+- * Add the entries in reverse order, and
+- * set the generation number to -1, so that
+- * initial allocations produce "natural" names.
+- */
+-
+- for (index = 0; index < new_size; index++) {
+- ipc_entry_t entry = &table[index];
+-
+- entry->ie_bits = IE_BITS_GEN_MASK;
+- entry->ie_next = index+1;
+- }
+- table[new_size-1].ie_next = 0;
+-
+ is_ref_lock_init(space);
+ space->is_references = 2;
+
+ is_lock_init(space);
+ space->is_active = TRUE;
+- space->is_growing = FALSE;
+- space->is_table = table;
+- space->is_table_size = new_size;
+- space->is_table_next = initial+1;
+-
+- ipc_splay_tree_init(&space->is_tree);
+- space->is_tree_total = 0;
+- space->is_tree_small = 0;
+- space->is_tree_hash = 0;
++
+ rdxtree_init(&space->is_map);
+ rdxtree_init(&space->is_reverse_map);
+ /* The zeroth entry is reserved. */
+@@ -211,10 +174,6 @@ void
+ ipc_space_destroy(
+ ipc_space_t space)
+ {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t index;
+ boolean_t active;
+
+ assert(space != IS_NULL);
+@@ -237,36 +196,13 @@ ipc_space_destroy(
+
+ if (type != MACH_PORT_TYPE_NONE) {
+ mach_port_t name =
+- MACH_PORT_MAKEB(index, entry->ie_bits);
++ MACH_PORT_MAKEB(entry->ie_name, entry->ie_bits);
+
+ ipc_right_clean(space, name, entry);
+ }
+- }
+-
+- it_entries_free(space->is_table_next-1, table);
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
+- mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
+- mach_port_t name = tentry->ite_name;
+-
+- assert(type != MACH_PORT_TYPE_NONE);
+-
+- /* use object before ipc_right_clean releases ref */
+-
+- if (type == MACH_PORT_TYPE_SEND)
+- ipc_hash_global_delete(space, tentry->ite_object,
+- name, tentry);
+
+- ipc_right_clean(space, name, &tentry->ite_entry);
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+-
+- ipc_entry_t entry;
+- struct rdxtree_iter iter;
+- rdxtree_for_each(&space->is_map, &iter, entry)
+ ie_free(entry);
++ }
+ rdxtree_remove_all(&space->is_map);
+ rdxtree_remove_all(&space->is_reverse_map);
+
+diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
+index 17d5b75..c90a2a3 100644
+--- a/ipc/ipc_space.h
++++ b/ipc/ipc_space.h
+@@ -47,7 +47,7 @@
+ #include <kern/lock.h>
+ #include <kern/rdxtree.h>
+ #include <kern/slab.h>
+-#include <ipc/ipc_splay.h>
++#include <ipc/ipc_entry.h>
+ #include <ipc/ipc_types.h>
+
+ /*
+@@ -73,14 +73,6 @@ struct ipc_space {
+
+ decl_simple_lock_data(,is_lock_data)
+ boolean_t is_active; /* is the space alive? */
+- boolean_t is_growing; /* is the space growing? */
+- ipc_entry_t is_table; /* an array of entries */
+- ipc_entry_num_t is_table_size; /* current size of table */
+- struct ipc_table_size *is_table_next; /* info for larger table */
+- struct ipc_splay_tree is_tree; /* a splay tree of entries */
+- ipc_entry_num_t is_tree_total; /* number of entries in the tree */
+- ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
+- ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
+ struct rdxtree is_map; /* a map of entries */
+ size_t is_size; /* number of entries */
+ struct rdxtree is_reverse_map; /* maps objects to entries */
+diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
+deleted file mode 100644
+index 6fb5bcb..0000000
+--- a/ipc/ipc_splay.c
++++ /dev/null
+@@ -1,920 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- */
+-/*
+- * File: ipc/ipc_splay.c
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Primitive splay tree operations.
+- */
+-
+-#include <mach/port.h>
+-#include <kern/assert.h>
+-#include <kern/macro_help.h>
+-#include <ipc/ipc_entry.h>
+-#include <ipc/ipc_splay.h>
+-
+-/*
+- * Splay trees are self-adjusting binary search trees.
+- * They have the following attractive properties:
+- * 1) Space efficient; only two pointers per entry.
+- * 2) Robust performance; amortized O(log n) per operation.
+- * 3) Recursion not needed.
+- * This makes them a good fall-back data structure for those
+- * entries that don't fit into the lookup table.
+- *
+- * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686,
+- * describes the splaying operation. ipc_splay_prim_lookup
+- * and ipc_splay_prim_assemble implement the top-down splay
+- * described on p. 669.
+- *
+- * The tree is stored in an unassembled form. If ist_root is null,
+- * then the tree has no entries. Otherwise, ist_name records
+- * the value used for the last lookup. ist_root points to the
+- * middle tree obtained from the top-down splay. ist_ltree and
+- * ist_rtree point to left and right subtrees, whose entries
+- * are all smaller (larger) than those in the middle tree.
+- * ist_ltreep and ist_rtreep are pointers to fields in the
+- * left and right subtrees. ist_ltreep points to the rchild field
+- * of the largest entry in ltree, and ist_rtreep points to the
+- * lchild field of the smallest entry in rtree. The pointed-to
+- * fields aren't initialized. If the left (right) subtree is null,
+- * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree)
+- * field in the splay structure itself.
+- *
+- * The primary advantage of the unassembled form is that repeated
+- * unsuccessful lookups are efficient. In particular, an unsuccessful
+- * lookup followed by an insert only requires one splaying operation.
+- *
+- * The traversal algorithm works via pointer inversion.
+- * When descending down the tree, child pointers are reversed
+- * to point back to the parent entry. When ascending,
+- * the pointers are restored to their original value.
+- *
+- * The biggest potential problem with the splay tree implementation
+- * is that the operations, even lookup, require an exclusive lock.
+- * If IPC spaces are protected with exclusive locks, then
+- * the splay tree doesn't require its own lock, and ist_lock/ist_unlock
+- * needn't do anything. If IPC spaces are protected with read/write
+- * locks then ist_lock/ist_unlock should provide exclusive access.
+- *
+- * If it becomes important to let lookups run in parallel,
+- * or if the restructuring makes lookups too expensive, then
+- * there is hope. Use a read/write lock on the splay tree.
+- * Keep track of the number of entries in the tree. When doing
+- * a lookup, first try a non-restructuring lookup with a read lock held,
+- * with a bound (based on log of size of the tree) on the number of
+- * entries to traverse. If the lookup runs up against the bound,
+- * then take a write lock and do a reorganizing lookup.
+- * This way, if lookups only access roughly balanced parts
+- * of the tree, then lookups run in parallel and do no restructuring.
+- *
+- * The traversal algorithm currently requires an exclusive lock.
+- * If that is a problem, the tree could be changed from an lchild/rchild
+- * representation to a leftmost child/right sibling representation.
+- * In conjunction with non-restructing lookups, this would let
+- * lookups and traversals all run in parallel. But this representation
+- * is more complicated and would slow down the operations.
+- */
+-
+-/*
+- * Boundary values to hand to ipc_splay_prim_lookup:
+- */
+-
+-#define MACH_PORT_SMALLEST ((mach_port_t) 0)
+-#define MACH_PORT_LARGEST ((mach_port_t) ~0)
+-
+-/*
+- * Routine: ipc_splay_prim_lookup
+- * Purpose:
+- * Searches for the node labeled name in the splay tree.
+- * Returns three nodes (treep, ltreep, rtreep) and
+- * two pointers to nodes (ltreepp, rtreepp).
+- *
+- * ipc_splay_prim_lookup splits the supplied tree into
+- * three subtrees, left, middle, and right, returned
+- * in ltreep, treep, and rtreep.
+- *
+- * If name is present in the tree, then it is at
+- * the root of the middle tree. Otherwise, the root
+- * of the middle tree is the last node traversed.
+- *
+- * ipc_splay_prim_lookup returns a pointer into
+- * the left subtree, to the rchild field of its
+- * largest node, in ltreepp. It returns a pointer
+- * into the right subtree, to the lchild field of its
+- * smallest node, in rtreepp.
+- */
+-
+-static void
+-ipc_splay_prim_lookup(
+- mach_port_t name,
+- ipc_tree_entry_t tree,
+- ipc_tree_entry_t *treep,
+- ipc_tree_entry_t *ltreep,
+- ipc_tree_entry_t **ltreepp,
+- ipc_tree_entry_t *rtreep,
+- ipc_tree_entry_t **rtreepp)
+-{
+- mach_port_t tname; /* temp name */
+- ipc_tree_entry_t lchild, rchild; /* temp child pointers */
+-
+- assert(tree != ITE_NULL);
+-
+-#define link_left \
+-MACRO_BEGIN \
+- *ltreep = tree; \
+- ltreep = &tree->ite_rchild; \
+- tree = *ltreep; \
+-MACRO_END
+-
+-#define link_right \
+-MACRO_BEGIN \
+- *rtreep = tree; \
+- rtreep = &tree->ite_lchild; \
+- tree = *rtreep; \
+-MACRO_END
+-
+-#define rotate_left \
+-MACRO_BEGIN \
+- ipc_tree_entry_t temp = tree; \
+- \
+- tree = temp->ite_rchild; \
+- temp->ite_rchild = tree->ite_lchild; \
+- tree->ite_lchild = temp; \
+-MACRO_END
+-
+-#define rotate_right \
+-MACRO_BEGIN \
+- ipc_tree_entry_t temp = tree; \
+- \
+- tree = temp->ite_lchild; \
+- temp->ite_lchild = tree->ite_rchild; \
+- tree->ite_rchild = temp; \
+-MACRO_END
+-
+- while (name != (tname = tree->ite_name)) {
+- if (name < tname) {
+- /* descend to left */
+-
+- lchild = tree->ite_lchild;
+- if (lchild == ITE_NULL)
+- break;
+- tname = lchild->ite_name;
+-
+- if ((name < tname) &&
+- (lchild->ite_lchild != ITE_NULL))
+- rotate_right;
+- link_right;
+- if ((name > tname) &&
+- (lchild->ite_rchild != ITE_NULL))
+- link_left;
+- } else {
+- /* descend to right */
+-
+- rchild = tree->ite_rchild;
+- if (rchild == ITE_NULL)
+- break;
+- tname = rchild->ite_name;
+-
+- if ((name > tname) &&
+- (rchild->ite_rchild != ITE_NULL))
+- rotate_left;
+- link_left;
+- if ((name < tname) &&
+- (rchild->ite_lchild != ITE_NULL))
+- link_right;
+- }
+-
+- assert(tree != ITE_NULL);
+- }
+-
+- *treep = tree;
+- *ltreepp = ltreep;
+- *rtreepp = rtreep;
+-
+-#undef link_left
+-#undef link_right
+-#undef rotate_left
+-#undef rotate_right
+-}
+-
+-/*
+- * Routine: ipc_splay_prim_assemble
+- * Purpose:
+- * Assembles the results of ipc_splay_prim_lookup
+- * into a splay tree with the found node at the root.
+- *
+- * ltree and rtree are by-reference so storing
+- * through ltreep and rtreep can change them.
+- */
+-
+-static void
+-ipc_splay_prim_assemble(
+- ipc_tree_entry_t tree,
+- ipc_tree_entry_t *ltree,
+- ipc_tree_entry_t *ltreep,
+- ipc_tree_entry_t *rtree,
+- ipc_tree_entry_t *rtreep)
+-{
+- assert(tree != ITE_NULL);
+-
+- *ltreep = tree->ite_lchild;
+- *rtreep = tree->ite_rchild;
+-
+- tree->ite_lchild = *ltree;
+- tree->ite_rchild = *rtree;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_init
+- * Purpose:
+- * Initialize a raw splay tree for use.
+- */
+-
+-void
+-ipc_splay_tree_init(
+- ipc_splay_tree_t splay)
+-{
+- splay->ist_root = ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_pick
+- * Purpose:
+- * Picks and returns a random entry in a splay tree.
+- * Returns FALSE if the splay tree is empty.
+- */
+-
+-boolean_t
+-ipc_splay_tree_pick(
+- ipc_splay_tree_t splay,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- *namep = root->ite_name;
+- *entryp = root;
+- }
+-
+- ist_unlock(splay);
+-
+- return root != ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_lookup
+- * Purpose:
+- * Finds an entry in a splay tree.
+- * Returns ITE_NULL if not found.
+- */
+-
+-ipc_tree_entry_t
+-ipc_splay_tree_lookup(
+- ipc_splay_tree_t splay,
+- mach_port_t name)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- splay->ist_name = name;
+- splay->ist_root = root;
+- }
+-
+- if (name != root->ite_name)
+- root = ITE_NULL;
+- }
+-
+- ist_unlock(splay);
+-
+- return root;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_insert
+- * Purpose:
+- * Inserts a new entry into a splay tree.
+- * The caller supplies a new entry.
+- * The name can't already be present in the tree.
+- */
+-
+-void
+-ipc_splay_tree_insert(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_tree_entry_t root;
+-
+- assert(entry != ITE_NULL);
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- entry->ite_lchild = ITE_NULL;
+- entry->ite_rchild = ITE_NULL;
+- } else {
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- assert(root->ite_name != name);
+-
+- if (name < root->ite_name) {
+- assert(root->ite_lchild == ITE_NULL);
+-
+- *splay->ist_ltreep = ITE_NULL;
+- *splay->ist_rtreep = root;
+- } else {
+- assert(root->ite_rchild == ITE_NULL);
+-
+- *splay->ist_ltreep = root;
+- *splay->ist_rtreep = ITE_NULL;
+- }
+-
+- entry->ite_lchild = splay->ist_ltree;
+- entry->ite_rchild = splay->ist_rtree;
+- }
+-
+- entry->ite_name = name;
+- splay->ist_root = entry;
+- splay->ist_name = name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_delete
+- * Purpose:
+- * Deletes an entry from a splay tree.
+- * The name must be present in the tree.
+- * Frees the entry.
+- *
+- * The "entry" argument isn't currently used.
+- * Other implementations might want it, though.
+- */
+-
+-void
+-ipc_splay_tree_delete(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_tree_entry_t root, saved;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- assert(root != ITE_NULL);
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- assert(root->ite_name == name);
+- assert(root == entry);
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- *splay->ist_rtreep = root->ite_rchild;
+- ite_free(root);
+-
+- root = splay->ist_ltree;
+- saved = splay->ist_rtree;
+-
+- if (root == ITE_NULL)
+- root = saved;
+- else if (saved != ITE_NULL) {
+- /*
+- * Find the largest node in the left subtree, and splay it
+- * to the root. Then add the saved right subtree.
+- */
+-
+- ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- assert(root->ite_rchild == ITE_NULL);
+- root->ite_rchild = saved;
+- }
+-
+- splay->ist_root = root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_split
+- * Purpose:
+- * Split a splay tree. Puts all entries smaller than "name"
+- * into a new tree, "small".
+- *
+- * Doesn't do locking on "small", because nobody else
+- * should be fiddling with the uninitialized tree.
+- */
+-
+-void
+-ipc_splay_tree_split(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_splay_tree_t small)
+-{
+- ipc_tree_entry_t root;
+-
+- ipc_splay_tree_init(small);
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- /* lookup name, to get it (or last traversed) to the top */
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- if (root->ite_name < name) {
+- /* root goes into small */
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- *splay->ist_rtreep = ITE_NULL;
+- root->ite_lchild = splay->ist_ltree;
+- assert(root->ite_rchild == ITE_NULL);
+-
+- small->ist_root = root;
+- small->ist_name = root->ite_name;
+- small->ist_ltreep = &small->ist_ltree;
+- small->ist_rtreep = &small->ist_rtree;
+-
+- /* rtree goes into splay */
+-
+- root = splay->ist_rtree;
+- splay->ist_root = root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+- } else {
+- /* root stays in splay */
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- root->ite_lchild = ITE_NULL;
+-
+- splay->ist_root = root;
+- splay->ist_name = name;
+- splay->ist_ltreep = &splay->ist_ltree;
+-
+- /* ltree goes into small */
+-
+- root = splay->ist_ltree;
+- small->ist_root = root;
+- if (root != ITE_NULL) {
+- small->ist_name = root->ite_name;
+- small->ist_ltreep = &small->ist_ltree;
+- small->ist_rtreep = &small->ist_rtree;
+- }
+- }
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_join
+- * Purpose:
+- * Joins two splay trees. Merges the entries in "small",
+- * which must all be smaller than the entries in "splay",
+- * into "splay".
+- */
+-
+-void
+-ipc_splay_tree_join(
+- ipc_splay_tree_t splay,
+- ipc_splay_tree_t small)
+-{
+- ipc_tree_entry_t sroot;
+-
+- /* pull entries out of small */
+-
+- ist_lock(small);
+-
+- sroot = small->ist_root;
+- if (sroot != ITE_NULL) {
+- ipc_splay_prim_assemble(sroot,
+- &small->ist_ltree, small->ist_ltreep,
+- &small->ist_rtree, small->ist_rtreep);
+- small->ist_root = ITE_NULL;
+- }
+-
+- ist_unlock(small);
+-
+- /* put entries, if any, into splay */
+-
+- if (sroot != ITE_NULL) {
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- root = sroot;
+- } else {
+- /* get smallest entry in splay tree to top */
+-
+- if (splay->ist_name != MACH_PORT_SMALLEST) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(MACH_PORT_SMALLEST,
+- root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- assert(root->ite_lchild == ITE_NULL);
+- assert(sroot->ite_name < root->ite_name);
+- root->ite_lchild = sroot;
+- }
+-
+- splay->ist_root = root;
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+-
+- ist_unlock(splay);
+- }
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_bounds
+- * Purpose:
+- * Given a name, returns the largest value present
+- * in the tree that is smaller than or equal to the name,
+- * or ~0 if no such value exists. Similarly, returns
+- * the smallest value present that is greater than or
+- * equal to the name, or 0 if no such value exists.
+- *
+- * Hence, if
+- * lower = upper, then lower = name = upper
+- * and name is present in the tree
+- * lower = ~0 and upper = 0,
+- * then the tree is empty
+- * lower = ~0 and upper > 0, then name < upper
+- * and upper is smallest value in tree
+- * lower < ~0 and upper = 0, then lower < name
+- * and lower is largest value in tree
+- * lower < ~0 and upper > 0, then lower < name < upper
+- * and they are tight bounds on name
+- *
+- * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.)
+- */
+-
+-void
+-ipc_splay_tree_bounds(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- mach_port_t *lowerp,
+- mach_port_t *upperp)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- *lowerp = MACH_PORT_LARGEST;
+- *upperp = MACH_PORT_SMALLEST;
+- } else {
+- mach_port_t rname;
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- splay->ist_name = name;
+- splay->ist_root = root;
+- }
+-
+- rname = root->ite_name;
+-
+- /*
+- * OK, it's a hack. We convert the ltreep and rtreep
+- * pointers back into real entry pointers,
+- * so we can pick the names out of the entries.
+- */
+-
+- if (rname <= name)
+- *lowerp = rname;
+- else if (splay->ist_ltreep == &splay->ist_ltree)
+- *lowerp = MACH_PORT_LARGEST;
+- else {
+- ipc_tree_entry_t entry;
+-
+- entry = (ipc_tree_entry_t)
+- ((char *)splay->ist_ltreep -
+- ((char *)&root->ite_rchild -
+- (char *)root));
+- *lowerp = entry->ite_name;
+- }
+-
+- if (rname >= name)
+- *upperp = rname;
+- else if (splay->ist_rtreep == &splay->ist_rtree)
+- *upperp = MACH_PORT_SMALLEST;
+- else {
+- ipc_tree_entry_t entry;
+-
+- entry = (ipc_tree_entry_t)
+- ((char *)splay->ist_rtreep -
+- ((char *)&root->ite_lchild -
+- (char *)root));
+- *upperp = entry->ite_name;
+- }
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_traverse_start
+- * Routine: ipc_splay_traverse_next
+- * Routine: ipc_splay_traverse_finish
+- * Purpose:
+- * Perform a symmetric order traversal of a splay tree.
+- * Usage:
+- * for (entry = ipc_splay_traverse_start(splay);
+- * entry != ITE_NULL;
+- * entry = ipc_splay_traverse_next(splay, delete)) {
+- * do something with entry
+- * }
+- * ipc_splay_traverse_finish(splay);
+- *
+- * If "delete" is TRUE, then the current entry
+- * is removed from the tree and deallocated.
+- *
+- * During the traversal, the splay tree is locked.
+- */
+-
+-ipc_tree_entry_t
+-ipc_splay_traverse_start(
+- ipc_splay_tree_t splay)
+-{
+- ipc_tree_entry_t current, parent;
+-
+- ist_lock(splay);
+-
+- current = splay->ist_root;
+- if (current != ITE_NULL) {
+- ipc_splay_prim_assemble(current,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- parent = ITE_NULL;
+-
+- while (current->ite_lchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_lchild;
+- current->ite_lchild = parent;
+- parent = current;
+- current = next;
+- }
+-
+- splay->ist_ltree = current;
+- splay->ist_rtree = parent;
+- }
+-
+- return current;
+-}
+-
+-ipc_tree_entry_t
+-ipc_splay_traverse_next(
+- ipc_splay_tree_t splay,
+- boolean_t delete)
+-{
+- ipc_tree_entry_t current, parent;
+-
+- /* pick up where traverse_entry left off */
+-
+- current = splay->ist_ltree;
+- parent = splay->ist_rtree;
+- assert(current != ITE_NULL);
+-
+- if (!delete)
+- goto traverse_right;
+-
+- /* we must delete current and patch the tree */
+-
+- if (current->ite_lchild == ITE_NULL) {
+- if (current->ite_rchild == ITE_NULL) {
+- /* like traverse_back, but with deletion */
+-
+- if (parent == ITE_NULL) {
+- ite_free(current);
+-
+- splay->ist_root = ITE_NULL;
+- return ITE_NULL;
+- }
+-
+- if (current->ite_name < parent->ite_name) {
+- ite_free(current);
+-
+- current = parent;
+- parent = current->ite_lchild;
+- current->ite_lchild = ITE_NULL;
+- goto traverse_entry;
+- } else {
+- ite_free(current);
+-
+- current = parent;
+- parent = current->ite_rchild;
+- current->ite_rchild = ITE_NULL;
+- goto traverse_back;
+- }
+- } else {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = current->ite_rchild;
+- ite_free(prev);
+- goto traverse_left;
+- }
+- } else {
+- if (current->ite_rchild == ITE_NULL) {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = current->ite_lchild;
+- ite_free(prev);
+- goto traverse_back;
+- } else {
+- ipc_tree_entry_t prev;
+- ipc_tree_entry_t ltree, rtree;
+- ipc_tree_entry_t *ltreep, *rtreep;
+-
+- /* replace current with largest of left children */
+-
+- prev = current;
+- ipc_splay_prim_lookup(MACH_PORT_LARGEST,
+- current->ite_lchild, &current,
+- &ltree, &ltreep, &rtree, &rtreep);
+- ipc_splay_prim_assemble(current,
+- &ltree, ltreep, &rtree, rtreep);
+-
+- assert(current->ite_rchild == ITE_NULL);
+- current->ite_rchild = prev->ite_rchild;
+- ite_free(prev);
+- goto traverse_right;
+- }
+- }
+- /*NOTREACHED*/
+-
+- /*
+- * A state machine: for each entry, we
+- * 1) traverse left subtree
+- * 2) traverse the entry
+- * 3) traverse right subtree
+- * 4) traverse back to parent
+- */
+-
+- traverse_left:
+- if (current->ite_lchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_lchild;
+- current->ite_lchild = parent;
+- parent = current;
+- current = next;
+- goto traverse_left;
+- }
+-
+- traverse_entry:
+- splay->ist_ltree = current;
+- splay->ist_rtree = parent;
+- return current;
+-
+- traverse_right:
+- if (current->ite_rchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_rchild;
+- current->ite_rchild = parent;
+- parent = current;
+- current = next;
+- goto traverse_left;
+- }
+-
+- traverse_back:
+- if (parent == ITE_NULL) {
+- splay->ist_root = current;
+- return ITE_NULL;
+- }
+-
+- if (current->ite_name < parent->ite_name) {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = parent;
+- parent = current->ite_lchild;
+- current->ite_lchild = prev;
+- goto traverse_entry;
+- } else {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = parent;
+- parent = current->ite_rchild;
+- current->ite_rchild = prev;
+- goto traverse_back;
+- }
+-}
+-
+-void
+-ipc_splay_traverse_finish(
+- ipc_splay_tree_t splay)
+-{
+- ipc_tree_entry_t root;
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+-
+- ist_unlock(splay);
+-}
+-
+diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
+deleted file mode 100644
+index d3316ef..0000000
+--- a/ipc/ipc_splay.h
++++ /dev/null
+@@ -1,114 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- */
+-/*
+- * File: ipc/ipc_splay.h
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Declarations of primitive splay tree operations.
+- */
+-
+-#ifndef _IPC_IPC_SPLAY_H_
+-#define _IPC_IPC_SPLAY_H_
+-
+-#include <mach/port.h>
+-#include <kern/assert.h>
+-#include <kern/macro_help.h>
+-#include <ipc/ipc_entry.h>
+-
+-typedef struct ipc_splay_tree {
+- mach_port_t ist_name; /* name used in last lookup */
+- ipc_tree_entry_t ist_root; /* root of middle tree */
+- ipc_tree_entry_t ist_ltree; /* root of left tree */
+- ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */
+- ipc_tree_entry_t ist_rtree; /* root of right tree */
+- ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */
+-} *ipc_splay_tree_t;
+-
+-#define ist_lock(splay) /* no locking */
+-#define ist_unlock(splay) /* no locking */
+-
+-/* Initialize a raw splay tree */
+-extern void ipc_splay_tree_init(
+- ipc_splay_tree_t splay);
+-
+-/* Pick a random entry in a splay tree */
+-extern boolean_t ipc_splay_tree_pick(
+- ipc_splay_tree_t splay,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp);
+-
+-/* Find an entry in a splay tree */
+-extern ipc_tree_entry_t ipc_splay_tree_lookup(
+- ipc_splay_tree_t splay,
+- mach_port_t name);
+-
+-/* Insert a new entry into a splay tree */
+-extern void ipc_splay_tree_insert(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry);
+-
+-/* Delete an entry from a splay tree */
+-extern void ipc_splay_tree_delete(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry);
+-
+-/* Split a splay tree */
+-extern void ipc_splay_tree_split(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_splay_tree_t entry);
+-
+-/* Join two splay trees */
+-extern void ipc_splay_tree_join(
+- ipc_splay_tree_t splay,
+- ipc_splay_tree_t small);
+-
+-/* Do a bounded splay tree lookup */
+-extern void ipc_splay_tree_bounds(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- mach_port_t *lowerp,
+- mach_port_t *upperp);
+-
+-/* Initialize a symmetric order traversal of a splay tree */
+-extern ipc_tree_entry_t ipc_splay_traverse_start(
+- ipc_splay_tree_t splay);
+-
+-/* Return the next entry in a symmetric order traversal of a splay tree */
+-extern ipc_tree_entry_t ipc_splay_traverse_next(
+- ipc_splay_tree_t splay,
+- boolean_t delete);
+-
+-/* Terminate a symmetric order traversal of a splay tree */
+-extern void ipc_splay_traverse_finish(
+- ipc_splay_tree_t splay);
+-
+-#endif /* _IPC_IPC_SPLAY_H_ */
+diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
+index eb52e1c..9d01d84 100644
+--- a/ipc/mach_debug.c
++++ b/ipc/mach_debug.c
+@@ -46,7 +46,6 @@
+ #include <vm/vm_kern.h>
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_port.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_marequest.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_right.h>
+@@ -111,64 +110,7 @@ host_ipc_hash_info(
+ hash_info_bucket_array_t *infop,
+ mach_msg_type_number_t *countp)
+ {
+- vm_offset_t addr;
+- vm_size_t size = 0; /* Suppress gcc warning */
+- hash_info_bucket_t *info;
+- unsigned int potential, actual;
+- kern_return_t kr;
+-
+- if (host == HOST_NULL)
+- return KERN_INVALID_HOST;
+-
+- /* start with in-line data */
+-
+- info = *infop;
+- potential = *countp;
+-
+- for (;;) {
+- actual = ipc_hash_info(info, potential);
+- if (actual <= potential)
+- break;
+-
+- /* allocate more memory */
+-
+- if (info != *infop)
+- kmem_free(ipc_kernel_map, addr, size);
+-
+- size = round_page(actual * sizeof *info);
+- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+- if (kr != KERN_SUCCESS)
+- return KERN_RESOURCE_SHORTAGE;
+-
+- info = (hash_info_bucket_t *) addr;
+- potential = size/sizeof *info;
+- }
+-
+- if (info == *infop) {
+- /* data fit in-line; nothing to deallocate */
+-
+- *countp = actual;
+- } else if (actual == 0) {
+- kmem_free(ipc_kernel_map, addr, size);
+-
+- *countp = 0;
+- } else {
+- vm_map_copy_t copy;
+- vm_size_t used;
+-
+- used = round_page(actual * sizeof *info);
+-
+- if (used != size)
+- kmem_free(ipc_kernel_map, addr + used, size - used);
+-
+- kr = vm_map_copyin(ipc_kernel_map, addr, used,
+- TRUE, &copy);
+- assert(kr == KERN_SUCCESS);
+-
+- *infop = (hash_info_bucket_t *) copy;
+- *countp = actual;
+- }
+-
++ *countp = 0;
+ return KERN_SUCCESS;
+ }
+
+@@ -278,13 +220,6 @@ mach_port_space_info(
+ unsigned int table_potential, table_actual;
+ vm_offset_t table_addr;
+ vm_size_t table_size = 0; /* Suppress gcc warning */
+- ipc_info_tree_name_t *tree_info;
+- unsigned int tree_potential, tree_actual;
+- vm_offset_t tree_addr;
+- vm_size_t tree_size = 0; /* Suppress gcc warning */
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ kern_return_t kr;
+
+@@ -295,8 +230,6 @@ mach_port_space_info(
+
+ table_info = *tablep;
+ table_potential = *tableCntp;
+- tree_info = *treep;
+- tree_potential = *treeCntp;
+
+ for (;;) {
+ is_read_lock(space);
+@@ -305,17 +238,12 @@ mach_port_space_info(
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+ return KERN_INVALID_TASK;
+ }
+
+- table_actual = space->is_table_size;
+- tree_actual = space->is_tree_total;
++ table_actual = space->is_size;
+
+- if ((table_actual <= table_potential) &&
+- (tree_actual <= tree_potential))
++ if (table_actual <= table_potential)
+ break;
+
+ is_read_unlock(space);
+@@ -329,99 +257,37 @@ mach_port_space_info(
+ sizeof *table_info);
+ kr = kmem_alloc(ipc_kernel_map,
+ &table_addr, table_size);
+- if (kr != KERN_SUCCESS) {
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+-
++ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+- }
+
+ table_info = (ipc_info_name_t *) table_addr;
+ table_potential = table_size/sizeof *table_info;
+ }
+-
+- if (tree_actual > tree_potential) {
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+-
+- tree_size = round_page(tree_actual *
+- sizeof *tree_info);
+- kr = kmem_alloc(ipc_kernel_map,
+- &tree_addr, tree_size);
+- if (kr != KERN_SUCCESS) {
+- if (table_info != *tablep)
+- kmem_free(ipc_kernel_map,
+- table_addr, table_size);
+-
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- tree_info = (ipc_info_tree_name_t *) tree_addr;
+- tree_potential = tree_size/sizeof *tree_info;
+- }
+ }
+ /* space is read-locked and active; we have enough wired memory */
+
+ infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
+- infop->iis_table_size = space->is_table_size;
+- infop->iis_table_next = space->is_table_next->its_size;
+- infop->iis_tree_size = space->is_tree_total;
+- infop->iis_tree_small = space->is_tree_small;
+- infop->iis_tree_hash = space->is_tree_hash;
++ infop->iis_table_size = 0;
++ infop->iis_table_next = 0;
++ infop->iis_tree_size = 0;
++ infop->iis_tree_small = 0;
++ infop->iis_tree_hash = 0;
+
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
++ ipc_entry_t entry;
++ struct rdxtree_iter iter;
++ index = 0;
++ rdxtree_for_each(&space->is_map, &iter, entry) {
+ ipc_info_name_t *iin = &table_info[index];
+- ipc_entry_t entry = &table[index];
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+- iin->iin_name = MACH_PORT_MAKEB(index, bits);
+- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+- iin->iin_compat = FALSE;
++ iin->iin_name = MACH_PORT_MAKEB(entry->ie_name, bits);
+ iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+ iin->iin_type = IE_BITS_TYPE(bits);
+ iin->iin_urefs = IE_BITS_UREFS(bits);
+ iin->iin_object = (vm_offset_t) entry->ie_object;
+- iin->iin_next = entry->ie_next;
+- iin->iin_hash = entry->ie_index;
++ iin->iin_next = entry->ie_request;
++ index += 1;
+ }
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+- ipc_info_tree_name_t *iitn = &tree_info[index++];
+- ipc_info_name_t *iin = &iitn->iitn_name;
+- ipc_entry_t entry = &tentry->ite_entry;
+- ipc_entry_bits_t bits = entry->ie_bits;
+-
+- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+-
+- iin->iin_name = tentry->ite_name;
+- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+- iin->iin_compat = FALSE;
+- iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+- iin->iin_type = IE_BITS_TYPE(bits);
+- iin->iin_urefs = IE_BITS_UREFS(bits);
+- iin->iin_object = (vm_offset_t) entry->ie_object;
+- iin->iin_next = entry->ie_next;
+- iin->iin_hash = entry->ie_index;
+-
+- if (tentry->ite_lchild == ITE_NULL)
+- iitn->iitn_lchild = MACH_PORT_NULL;
+- else
+- iitn->iitn_lchild = tentry->ite_lchild->ite_name;
+-
+- if (tentry->ite_rchild == ITE_NULL)
+- iitn->iitn_rchild = MACH_PORT_NULL;
+- else
+- iitn->iitn_rchild = tentry->ite_rchild->ite_name;
+-
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (table_info == *tablep) {
+@@ -459,41 +325,8 @@ mach_port_space_info(
+ *tableCntp = table_actual;
+ }
+
+- if (tree_info == *treep) {
+- /* data fit in-line; nothing to deallocate */
+-
+- *treeCntp = tree_actual;
+- } else if (tree_actual == 0) {
+- kmem_free(ipc_kernel_map, tree_addr, tree_size);
+-
+- *treeCntp = 0;
+- } else {
+- vm_size_t size_used, rsize_used;
+- vm_map_copy_t copy;
+-
+- /* kmem_alloc doesn't zero memory */
+-
+- size_used = tree_actual * sizeof *tree_info;
+- rsize_used = round_page(size_used);
+-
+- if (rsize_used != tree_size)
+- kmem_free(ipc_kernel_map,
+- tree_addr + rsize_used,
+- tree_size - rsize_used);
+-
+- if (size_used != rsize_used)
+- memset((void *) (tree_addr + size_used), 0,
+- rsize_used - size_used);
+-
+- kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
+- TRUE, &copy);
+-
+- assert(kr == KERN_SUCCESS);
+-
+- *treep = (ipc_info_tree_name_t *) copy;
+- *treeCntp = tree_actual;
+- }
+-
++ /* The splay tree is gone. */
++ *treeCntp = 0;
+ return KERN_SUCCESS;
+ }
+
+diff --git a/ipc/mach_port.c b/ipc/mach_port.c
+index 4e89527..93a1248 100644
+--- a/ipc/mach_port.c
++++ b/ipc/mach_port.c
+@@ -150,10 +150,6 @@ mach_port_names(
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
+ {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t tsize;
+- mach_port_index_t index;
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ mach_port_t *names;
+@@ -190,7 +186,7 @@ mach_port_names(
+
+ /* upper bound on number of names in the space */
+
+- bound = space->is_table_size + space->is_tree_total;
++ bound = space->is_size;
+ size_needed = round_page(bound * sizeof(mach_port_t));
+
+ if (size_needed <= size)
+@@ -235,33 +231,16 @@ mach_port_names(
+
+ timestamp = ipc_port_timestamp();
+
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
+- ipc_entry_t entry = &table[index];
++ ipc_entry_t entry;
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&space->is_map, &iter, entry) {
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+- mach_port_t name = MACH_PORT_MAKEB(index, bits);
+-
+- mach_port_names_helper(timestamp, entry, name,
++ mach_port_names_helper(timestamp, entry, entry->ie_name,
+ names, types, &actual);
+ }
+ }
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+- ipc_entry_t entry = &tentry->ite_entry;
+- mach_port_t name = tentry->ite_name;
+-
+- assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
+-
+- mach_port_names_helper(timestamp, entry, name,
+- names, types, &actual);
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual == 0) {
+@@ -946,10 +925,7 @@ mach_port_get_set_status(
+ size = PAGE_SIZE; /* initial guess */
+
+ for (;;) {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t entry, table;
+- ipc_entry_num_t tsize;
+- mach_port_index_t index;
++ ipc_entry_t entry;
+ mach_port_t *names;
+ ipc_pset_t pset;
+
+@@ -986,11 +962,9 @@ mach_port_get_set_status(
+ maxnames = size / sizeof(mach_port_t);
+ actual = 0;
+
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
+- ipc_entry_t ientry = &table[index];
++ ipc_entry_t ientry;
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&space->is_map, &iter, ientry) {
+ ipc_entry_bits_t bits = ientry->ie_bits;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+@@ -1002,22 +976,6 @@ mach_port_get_set_status(
+ }
+ }
+
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
+- ipc_entry_bits_t bits = tentry->ite_bits;
+-
+- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+-
+- if (bits & MACH_PORT_TYPE_RECEIVE) {
+- ipc_port_t port =
+- (ipc_port_t) tentry->ite_object;
+-
+- mach_port_gst_helper(pset, port, maxnames,
+- names, &actual);
+- }
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual <= maxnames)
+--
+2.1.4
+