summaryrefslogtreecommitdiff
path: root/debian
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-05-19 16:57:33 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-05-19 16:57:33 +0200
commit617744c5e30834b4ae39e790b95f9e72e03983ac (patch)
tree46579e521b515ca617af9ac52f8fb0ee1fc4fab0 /debian
parent835adac266451562a4b630155be3a16dc77bdbe3 (diff)
add patch series
Diffstat (limited to 'debian')
-rw-r--r--debian/patches/0001-kern-import-macros.h-from-x15.patch622
-rw-r--r--debian/patches/0002-kern-add-radix-tree-library.patch1197
-rw-r--r--debian/patches/0003-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch535
-rw-r--r--debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch440
-rw-r--r--debian/patches/0005-ipc-replace-the-IPC-table-with-a-radix-tree.patch3803
-rw-r--r--debian/patches/0006-ipc-inline-key-ipc-entry-lookup-functions.patch316
-rw-r--r--debian/patches/series6
7 files changed, 6919 insertions, 0 deletions
diff --git a/debian/patches/0001-kern-import-macros.h-from-x15.patch b/debian/patches/0001-kern-import-macros.h-from-x15.patch
new file mode 100644
index 0000000..2373e20
--- /dev/null
+++ b/debian/patches/0001-kern-import-macros.h-from-x15.patch
@@ -0,0 +1,622 @@
+From b3c5e41bc05bc622c637d1da75a3c63091e4e789 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 31 Mar 2015 12:57:05 +0200
+Subject: [PATCH gnumach 1/6] kern: import `macros.h' from x15
+
+Import the macro definitions from the x15 kernel project, and replace
+all similar definitions littered all over the place with it.
+
+Importing this file will make importing code from the x15 kernel
+easier. We are already using the red-black tree implementation and
+the slab allocator from it, and we will import even more code in the
+near future.
+
+* kern/list.h: Do not define `structof', include `macros.h' instead.
+* kern/rbtree.h: Likewise.
+* kern/slab.c: Do not define `ARRAY_SIZE', include `macros.h' instead.
+* i386/grub/misc.h: Likewise.
+* i386/i386/xen.h: Do not define `barrier', include `macros.h' instead.
+* kern/macro_help.h: Delete file. Replaced by `macros.h'.
+* kern/macros.h: New file.
+* Makefrag.am (libkernel_a_SOURCES): Add new file, remove old file.
+* device/dev_master.h: Adopt accordingly.
+* device/io_req.h: Likewise.
+* device/net_io.h: Likewise.
+* i386/intel/read_fault.c: Likewise.
+* ipc/ipc_kmsg.h: Likewise.
+* ipc/ipc_mqueue.h: Likewise.
+* ipc/ipc_object.h: Likewise.
+* ipc/ipc_port.h: Likewise.
+* ipc/ipc_space.h: Likewise.
+* ipc/ipc_splay.c: Likewise.
+* ipc/ipc_splay.h: Likewise.
+* kern/assert.h: Likewise.
+* kern/ast.h: Likewise.
+* kern/pc_sample.h: Likewise.
+* kern/refcount.h: Likewise.
+* kern/sched.h: Likewise.
+* kern/sched_prim.c: Likewise.
+* kern/timer.c: Likewise.
+* kern/timer.h: Likewise.
+* vm/vm_fault.c: Likewise.
+* vm/vm_map.h: Likewise.
+* vm/vm_object.h: Likewise.
+* vm/vm_page.h: Likewise.
+---
+ Makefrag.am | 2 +-
+ device/dev_master.h | 2 +-
+ device/io_req.h | 2 +-
+ device/net_io.h | 2 +-
+ i386/grub/misc.h | 2 +-
+ i386/i386/xen.h | 2 +-
+ i386/intel/read_fault.c | 2 +-
+ ipc/ipc_kmsg.h | 2 +-
+ ipc/ipc_mqueue.h | 2 +-
+ ipc/ipc_object.h | 2 +-
+ ipc/ipc_port.h | 2 +-
+ ipc/ipc_space.h | 2 +-
+ ipc/ipc_splay.c | 2 +-
+ ipc/ipc_splay.h | 2 +-
+ kern/assert.h | 2 +-
+ kern/ast.h | 2 +-
+ kern/list.h | 4 +--
+ kern/macro_help.h | 50 ----------------------------------
+ kern/macros.h | 72 +++++++++++++++++++++++++++++++++++++++++++++++++
+ kern/pc_sample.h | 2 +-
+ kern/rbtree.h | 5 +---
+ kern/refcount.h | 2 +-
+ kern/sched.h | 2 +-
+ kern/sched_prim.c | 2 +-
+ kern/slab.c | 2 +-
+ kern/timer.c | 2 +-
+ kern/timer.h | 2 +-
+ vm/vm_fault.c | 2 +-
+ vm/vm_map.h | 2 +-
+ vm/vm_object.h | 2 +-
+ vm/vm_page.h | 2 +-
+ 31 files changed, 101 insertions(+), 84 deletions(-)
+ delete mode 100644 kern/macro_help.h
+ create mode 100644 kern/macros.h
+
+diff --git a/Makefrag.am b/Makefrag.am
+index 9166143..9222ad2 100644
+--- a/Makefrag.am
++++ b/Makefrag.am
+@@ -171,7 +171,7 @@ libkernel_a_SOURCES += \
+ kern/mach_factor.h \
+ kern/machine.c \
+ kern/machine.h \
+- kern/macro_help.h \
++ kern/macros.h \
+ kern/pc_sample.c \
+ kern/pc_sample.h \
+ kern/printf.c \
+diff --git a/device/dev_master.h b/device/dev_master.h
+index 6ad1152..70d4c63 100644
+--- a/device/dev_master.h
++++ b/device/dev_master.h
+@@ -37,7 +37,7 @@
+
+ #if NCPUS > 1
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/cpu_number.h>
+ #include <kern/sched_prim.h>
+ #include <kern/thread.h>
+diff --git a/device/io_req.h b/device/io_req.h
+index 65e23e6..1ad4680 100644
+--- a/device/io_req.h
++++ b/device/io_req.h
+@@ -42,7 +42,7 @@
+ #include <device/device_types.h>
+ #include <device/dev_hdr.h>
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ /*
+ * IO request element, queued on device for delayed replies.
+diff --git a/device/net_io.h b/device/net_io.h
+index f6de854..d4e24d4 100644
+--- a/device/net_io.h
++++ b/device/net_io.h
+@@ -38,7 +38,7 @@
+ #include <mach/machine/vm_types.h>
+ #include <ipc/ipc_kmsg.h>
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/lock.h>
+ #include <kern/kalloc.h>
+
+diff --git a/i386/grub/misc.h b/i386/grub/misc.h
+index c6cd456..b71140a 100644
+--- a/i386/grub/misc.h
++++ b/i386/grub/misc.h
+@@ -21,6 +21,7 @@
+ #define GRUB_MISC_HEADER 1
+
+ #include <stdarg.h>
++#include <kern/macros.h>
+ #include <grub/types.h>
+ #include <grub/symbol.h>
+ #include <grub/err.h>
+@@ -32,7 +33,6 @@
+ #define ALIGN_UP_OVERHEAD(addr, align) ((-(addr)) & ((typeof (addr)) (align) - 1))
+ #define ALIGN_DOWN(addr, align) \
+ ((addr) & ~((typeof (addr)) align - 1))
+-#define ARRAY_SIZE(array) (sizeof (array) / sizeof (array[0]))
+ #define COMPILE_TIME_ASSERT(cond) switch (0) { case 1: case !(cond): ; }
+
+ #define grub_dprintf(condition, ...) grub_real_dprintf(GRUB_FILE, __LINE__, condition, __VA_ARGS__)
+diff --git a/i386/i386/xen.h b/i386/i386/xen.h
+index 638d671..c681187 100644
+--- a/i386/i386/xen.h
++++ b/i386/i386/xen.h
+@@ -21,6 +21,7 @@
+
+ #ifdef MACH_XEN
+ #ifndef __ASSEMBLER__
++#include <kern/macros.h>
+ #include <kern/printf.h>
+ #include <mach/machine/vm_types.h>
+ #include <mach/vm_param.h>
+@@ -32,7 +33,6 @@
+ #include <xen/public/xen.h>
+
+ /* TODO: this should be moved in appropriate non-Xen place. */
+-#define barrier() __asm__ __volatile__ ("": : :"memory")
+ #define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)":::"memory")
+ #define rmb() mb()
+ #define wmb() mb()
+diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c
+index 29f4439..4b1edce 100644
+--- a/i386/intel/read_fault.c
++++ b/i386/intel/read_fault.c
+@@ -31,7 +31,7 @@
+ #include <vm/vm_page.h>
+ #include <vm/pmap.h>
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ /*
+ * Expansion of vm_fault for read fault in kernel mode.
+diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
+index 620785b..393c039 100644
+--- a/ipc/ipc_kmsg.h
++++ b/ipc/ipc_kmsg.h
+@@ -38,7 +38,7 @@
+ #include <mach/message.h>
+ #include <kern/assert.h>
+ #include <kern/cpu_number.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/kalloc.h>
+ #include <ipc/ipc_marequest.h>
+ #include <ipc/ipc_object.h>
+diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
+index f8a2f1e..2af5e02 100644
+--- a/ipc/ipc_mqueue.h
++++ b/ipc/ipc_mqueue.h
+@@ -37,7 +37,7 @@
+ #include <mach/message.h>
+ #include <kern/assert.h>
+ #include <kern/lock.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <ipc/ipc_kmsg_queue.h>
+ #include <ipc/ipc_kmsg.h>
+ #include <ipc/ipc_thread.h>
+diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
+index b83bb5a..be5bea7 100644
+--- a/ipc/ipc_object.h
++++ b/ipc/ipc_object.h
+@@ -38,7 +38,7 @@
+ #include <mach/message.h>
+ #include <ipc/ipc_types.h>
+ #include <kern/lock.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/slab.h>
+
+ typedef unsigned int ipc_object_refs_t;
+diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
+index 6914c71..ade6967 100644
+--- a/ipc/ipc_port.h
++++ b/ipc/ipc_port.h
+@@ -43,7 +43,7 @@
+ #include <mach/kern_return.h>
+ #include <mach/port.h>
+ #include <kern/lock.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/ipc_kobject.h>
+ #include <ipc/ipc_mqueue.h>
+ #include <ipc/ipc_table.h>
+diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
+index c4683d2..3bd2f4d 100644
+--- a/ipc/ipc_space.h
++++ b/ipc/ipc_space.h
+@@ -42,7 +42,7 @@
+ #include <mach/boolean.h>
+ #include <mach/kern_return.h>
+ #include <mach/mach_types.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/lock.h>
+ #include <kern/slab.h>
+ #include <ipc/ipc_splay.h>
+diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
+index 6fb5bcb..062a69f 100644
+--- a/ipc/ipc_splay.c
++++ b/ipc/ipc_splay.c
+@@ -35,7 +35,7 @@
+
+ #include <mach/port.h>
+ #include <kern/assert.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_splay.h>
+
+diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
+index d3316ef..42e5a80 100644
+--- a/ipc/ipc_splay.h
++++ b/ipc/ipc_splay.h
+@@ -38,7 +38,7 @@
+
+ #include <mach/port.h>
+ #include <kern/assert.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <ipc/ipc_entry.h>
+
+ typedef struct ipc_splay_tree {
+diff --git a/kern/assert.h b/kern/assert.h
+index bd2a8be..7b66d1b 100644
+--- a/kern/assert.h
++++ b/kern/assert.h
+@@ -29,7 +29,7 @@
+
+ /* assert.h 4.2 85/01/21 */
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ #ifndef NDEBUG
+ #define MACH_ASSERT 1
+diff --git a/kern/ast.h b/kern/ast.h
+index 4c28b1e..7d472be 100644
+--- a/kern/ast.h
++++ b/kern/ast.h
+@@ -41,7 +41,7 @@
+ */
+
+ #include "cpu_number.h"
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <machine/ast.h>
+
+ /*
+diff --git a/kern/list.h b/kern/list.h
+index ad782a8..be92762 100644
+--- a/kern/list.h
++++ b/kern/list.h
+@@ -31,9 +31,7 @@
+
+ #include <stddef.h>
+ #include <sys/types.h>
+-
+-#define structof(ptr, type, member) \
+- ((type *)((char *)ptr - offsetof(type, member)))
++#include <kern/macros.h>
+
+ /*
+ * Structure used as both head and node.
+diff --git a/kern/macro_help.h b/kern/macro_help.h
+deleted file mode 100644
+index 7ce171f..0000000
+--- a/kern/macro_help.h
++++ /dev/null
+@@ -1,50 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- * File: kern/macro_help.h
+- *
+- * Provide help in making lint-free macro routines
+- *
+- */
+-
+-#ifndef _KERN_MACRO_HELP_H_
+-#define _KERN_MACRO_HELP_H_
+-
+-#if !defined(MACRO_BEGIN)
+-
+-#include <mach/boolean.h>
+-
+-#define NEVER FALSE
+-#define ALWAYS TRUE
+-
+-#define MACRO_BEGIN ({
+-#define MACRO_END })
+-
+-#define MACRO_RETURN if (ALWAYS) return
+-
+-#endif /* !MACRO_BEGIN */
+-
+-#endif /* _KERN_MACRO_HELP_H_ */
+diff --git a/kern/macros.h b/kern/macros.h
+new file mode 100644
+index 0000000..fb8dc5e
+--- /dev/null
++++ b/kern/macros.h
+@@ -0,0 +1,72 @@
++/*
++ * Copyright (c) 2009, 2010, 2013 Richard Braun.
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ *
++ * Helper macros.
++ */
++
++#ifndef _KERN_MACROS_H
++#define _KERN_MACROS_H
++
++#define MACRO_BEGIN ({
++#define MACRO_END })
++#define MACRO_RETURN if (1) return
++
++#define __QUOTE(x) #x
++#define QUOTE(x) __QUOTE(x)
++
++#ifdef __ASSEMBLER__
++#define DECL_CONST(x, s) x
++#else /* __ASSEMBLER__ */
++#define __DECL_CONST(x, s) x##s
++#define DECL_CONST(x, s) __DECL_CONST(x, s)
++#endif /* __ASSEMBLER__ */
++
++#define STRLEN(x) (sizeof(x) - 1)
++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
++
++#define MIN(a, b) ((a) < (b) ? (a) : (b))
++#define MAX(a, b) ((a) > (b) ? (a) : (b))
++
++#define DIV_CEIL(n, d) (((n) + (d) - 1) / (d))
++
++#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
++#define ISP2(x) P2ALIGNED(x, x)
++#define P2ALIGN(x, a) ((x) & -(a))
++#define P2ROUND(x, a) (-(-(x) & -(a)))
++#define P2END(x, a) (-(~(x) & -(a)))
++
++#define structof(ptr, type, member) \
++ ((type *)((char *)(ptr) - offsetof(type, member)))
++
++#define alignof(x) __alignof__(x)
++
++#define likely(expr) __builtin_expect(!!(expr), 1)
++#define unlikely(expr) __builtin_expect(!!(expr), 0)
++
++#define barrier() asm volatile("" : : : "memory")
++
++#define __noreturn __attribute__((noreturn))
++#define __aligned(x) __attribute__((aligned(x)))
++#define __always_inline inline __attribute__((always_inline))
++#define __section(x) __attribute__((section(x)))
++#define __packed __attribute__((packed))
++#define __alias(x) __attribute__((alias(x)))
++
++#define __format_printf(fmt, args) \
++ __attribute__((format(printf, fmt, args)))
++
++#endif /* _KERN_MACROS_H */
+diff --git a/kern/pc_sample.h b/kern/pc_sample.h
+index 3c64068..4832cb9 100644
+--- a/kern/pc_sample.h
++++ b/kern/pc_sample.h
+@@ -49,7 +49,7 @@
+ #include <mach/pc_sample.h>
+ #include <mach/machine/vm_types.h>
+ #include <kern/kern_types.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ /*
+ * Control structure for sampling, included in
+diff --git a/kern/rbtree.h b/kern/rbtree.h
+index 189a7fd..16ef273 100644
+--- a/kern/rbtree.h
++++ b/kern/rbtree.h
+@@ -31,12 +31,9 @@
+
+ #include <stddef.h>
+ #include <kern/assert.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <sys/types.h>
+
+-#define structof(ptr, type, member) \
+- ((type *)((char *)ptr - offsetof(type, member)))
+-
+ /*
+ * Indexes of the left and right nodes in the children array of a node.
+ */
+diff --git a/kern/refcount.h b/kern/refcount.h
+index 74204d6..f32feb8 100644
+--- a/kern/refcount.h
++++ b/kern/refcount.h
+@@ -27,7 +27,7 @@
+ #ifndef _KERN_REFCOUNT_H_
+ #define _KERN_REFCOUNT_H_
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ /* Unless the above include file specified otherwise,
+ use the system-independent (unoptimized) atomic reference counter. */
+diff --git a/kern/sched.h b/kern/sched.h
+index ea601c5..f82f9f5 100644
+--- a/kern/sched.h
++++ b/kern/sched.h
+@@ -38,7 +38,7 @@
+ #include <kern/queue.h>
+ #include <kern/lock.h>
+ #include <kern/kern_types.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ #if MACH_FIXPRI
+ #include <mach/policy.h>
+diff --git a/kern/sched_prim.c b/kern/sched_prim.c
+index d7792ae..e8f260e 100644
+--- a/kern/sched_prim.c
++++ b/kern/sched_prim.c
+@@ -44,7 +44,7 @@
+ #include <kern/lock.h>
+ #include <kern/mach_clock.h>
+ #include <kern/mach_factor.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/processor.h>
+ #include <kern/queue.h>
+ #include <kern/sched.h>
+diff --git a/kern/slab.c b/kern/slab.c
+index 19ebfed..60378b5 100644
+--- a/kern/slab.c
++++ b/kern/slab.c
+@@ -79,6 +79,7 @@
+ #include <string.h>
+ #include <kern/assert.h>
+ #include <kern/mach_clock.h>
++#include <kern/macros.h>
+ #include <kern/printf.h>
+ #include <kern/slab.h>
+ #include <kern/kalloc.h>
+@@ -96,7 +97,6 @@
+ /*
+ * Utility macros.
+ */
+-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+ #define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+ #define ISP2(x) P2ALIGNED(x, x)
+ #define P2ALIGN(x, a) ((x) & -(a))
+diff --git a/kern/timer.c b/kern/timer.c
+index 6d6517e..79ada27 100644
+--- a/kern/timer.c
++++ b/kern/timer.c
+@@ -33,7 +33,7 @@
+ #include <kern/cpu_number.h>
+
+ #include <kern/assert.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+
+
+diff --git a/kern/timer.h b/kern/timer.h
+index 57f017a..2f473cf 100644
+--- a/kern/timer.h
++++ b/kern/timer.h
+@@ -27,7 +27,7 @@
+ #ifndef _KERN_TIMER_H_
+ #define _KERN_TIMER_H_
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ #if STAT_TIME
+ /*
+diff --git a/vm/vm_fault.c b/vm/vm_fault.c
+index 686156c..0fa4d6a 100644
+--- a/vm/vm_fault.c
++++ b/vm/vm_fault.c
+@@ -51,7 +51,7 @@
+ #include <mach/memory_object.h>
+ #include <vm/memory_object_user.user.h>
+ /* For memory_object_data_{request,unlock} */
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/slab.h>
+
+ #if MACH_PCSAMPLE
+diff --git a/vm/vm_map.h b/vm/vm_map.h
+index b8103eb..fc7730a 100644
+--- a/vm/vm_map.h
++++ b/vm/vm_map.h
+@@ -52,7 +52,7 @@
+ #include <vm/vm_types.h>
+ #include <kern/lock.h>
+ #include <kern/rbtree.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+
+ /* TODO: make it dynamic */
+ #define KENTRY_DATA_SIZE (256*PAGE_SIZE)
+diff --git a/vm/vm_object.h b/vm/vm_object.h
+index 5c42f56..3bfc67a 100644
+--- a/vm/vm_object.h
++++ b/vm/vm_object.h
+@@ -45,7 +45,7 @@
+ #include <kern/lock.h>
+ #include <kern/assert.h>
+ #include <kern/debug.h>
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <vm/pmap.h>
+ #include <ipc/ipc_types.h>
+
+diff --git a/vm/vm_page.h b/vm/vm_page.h
+index 4fe1b41..e6a8c49 100644
+--- a/vm/vm_page.h
++++ b/vm/vm_page.h
+@@ -42,7 +42,7 @@
+ #include <kern/queue.h>
+ #include <kern/lock.h>
+
+-#include <kern/macro_help.h>
++#include <kern/macros.h>
+ #include <kern/sched_prim.h> /* definitions of wait/wakeup */
+
+ #if MACH_VM_DEBUG
+--
+2.1.4
+
diff --git a/debian/patches/0002-kern-add-radix-tree-library.patch b/debian/patches/0002-kern-add-radix-tree-library.patch
new file mode 100644
index 0000000..15cc404
--- /dev/null
+++ b/debian/patches/0002-kern-add-radix-tree-library.patch
@@ -0,0 +1,1197 @@
+From cc7e02923b9688cb1e8fff58774e5f55a98c45e2 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Mon, 16 Mar 2015 11:37:06 +0100
+Subject: [PATCH gnumach 2/6] kern: add radix tree library
+
+Import a radix tree library from Richard Braun's librbraun.
+
+* Makefile.am (clib_routines): Steal `__ffsdi2'.
+* Makefrag.am (libkernel_a_SOURCES): Add new files.
+* kern/rdxtree.c: New file.
+* kern/rdxtree.h: Likewise.
+* kern/rdxtree_i.h: Likewise.
+* kern/startup.c (setup_main): Initialize radix tree library.
+---
+ Makefile.am | 1 +
+ Makefrag.am | 3 +
+ kern/rdxtree.c | 830 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ kern/rdxtree.h | 209 ++++++++++++++
+ kern/rdxtree_i.h | 66 +++++
+ kern/startup.c | 2 +
+ 6 files changed, 1111 insertions(+)
+ create mode 100644 kern/rdxtree.c
+ create mode 100644 kern/rdxtree.h
+ create mode 100644 kern/rdxtree_i.h
+
+diff --git a/Makefile.am b/Makefile.am
+index 37dee76..eb940cb 100644
+--- a/Makefile.am
++++ b/Makefile.am
+@@ -165,6 +165,7 @@ clib_routines := memcmp memcpy memmove \
+ htonl htons ntohl ntohs \
+ udivdi3 __udivdi3 \
+ __rel_iplt_start __rel_iplt_end \
++ __ffsdi2 \
+ _START _start etext _edata end _end # actually ld magic, not libc.
+ gnumach-undef: gnumach.$(OBJEXT)
+ $(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
+diff --git a/Makefrag.am b/Makefrag.am
+index 9222ad2..2eb835e 100644
+--- a/Makefrag.am
++++ b/Makefrag.am
+@@ -186,6 +186,9 @@ libkernel_a_SOURCES += \
+ kern/rbtree.c \
+ kern/rbtree.h \
+ kern/rbtree_i.h \
++ kern/rdxtree.c \
++ kern/rdxtree.h \
++ kern/rdxtree_i.h \
+ kern/refcount.h \
+ kern/slab.c \
+ kern/slab.h \
+diff --git a/kern/rdxtree.c b/kern/rdxtree.c
+new file mode 100644
+index 0000000..78868b1
+--- /dev/null
++++ b/kern/rdxtree.c
+@@ -0,0 +1,830 @@
++/*
++ * Copyright (c) 2011-2015 Richard Braun.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * Upstream site with license notes :
++ * http://git.sceen.net/rbraun/librbraun.git/
++ */
++
++#include <kern/assert.h>
++#include <kern/slab.h>
++#include <mach/kern_return.h>
++#include <stddef.h>
++#include <string.h>
++
++#include "macros.h"
++#include "rdxtree.h"
++#include "rdxtree_i.h"
++
++/* XXX */
++#define CHAR_BIT 8U
++#define ERR_SUCCESS KERN_SUCCESS
++#define ERR_BUSY KERN_INVALID_ARGUMENT
++#define ERR_NOMEM KERN_RESOURCE_SHORTAGE
++
++/*
++ * Mask applied on an entry to obtain its address.
++ */
++#define RDXTREE_ENTRY_ADDR_MASK (~0x3UL)
++
++/*
++ * Global properties used to shape radix trees.
++ */
++#define RDXTREE_RADIX 6
++#define RDXTREE_RADIX_SIZE (1UL << RDXTREE_RADIX)
++#define RDXTREE_RADIX_MASK (RDXTREE_RADIX_SIZE - 1)
++
++#if RDXTREE_RADIX < 6
++typedef unsigned long rdxtree_bm_t;
++#define rdxtree_ffs(x) __builtin_ffsl(x)
++#elif RDXTREE_RADIX == 6 /* RDXTREE_RADIX < 6 */
++typedef unsigned long long rdxtree_bm_t;
++#define rdxtree_ffs(x) __builtin_ffsll(x)
++#else /* RDXTREE_RADIX < 6 */
++#error "radix too high"
++#endif /* RDXTREE_RADIX < 6 */
++
++/*
++ * Allocation bitmap size in bits.
++ */
++#define RDXTREE_BM_SIZE (sizeof(rdxtree_bm_t) * CHAR_BIT)
++
++/*
++ * Empty/full allocation bitmap words.
++ */
++#define RDXTREE_BM_EMPTY ((rdxtree_bm_t)0)
++#define RDXTREE_BM_FULL \
++ ((~(rdxtree_bm_t)0) >> (RDXTREE_BM_SIZE - RDXTREE_RADIX_SIZE))
++
++/*
++ * These macros can be replaced by actual functions in an environment
++ * that provides lockless synchronization such as RCU.
++ */
++#define llsync_assign_ptr(ptr, value) ((ptr) = (value))
++#define llsync_read_ptr(ptr) (ptr)
++
++/*
++ * Radix tree node.
++ *
++ * The height of a tree is the number of nodes to traverse until stored
++ * pointers are reached. A height of 0 means the entries of a node (or the
++ * tree root) directly point to stored pointers.
++ *
++ * The index is valid if and only if the parent isn't NULL.
++ *
++ * Concerning the allocation bitmap, a bit is set when the node it denotes,
++ * or one of its children, can be used to allocate an entry. Conversely, a bit
++ * is clear when the matching node and all of its children have no free entry.
++ *
++ * In order to support safe lockless lookups, in particular during a resize,
++ * each node includes the height of its subtree, which is invariant during
++ * the entire node lifetime. Since the tree height does vary, it can't be
++ * used to determine whether the tree root is a node or a stored pointer.
++ * This implementation assumes that all nodes and stored pointers are at least
++ * 4-byte aligned, and uses the least significant bit of entries to indicate
++ * the pointer type. This bit is set for internal nodes, and clear for stored
++ * pointers so that they can be accessed from slots without conversion.
++ */
++struct rdxtree_node {
++ struct rdxtree_node *parent;
++ unsigned int index;
++ unsigned int height;
++ unsigned int nr_entries;
++ rdxtree_bm_t alloc_bm;
++ void *entries[RDXTREE_RADIX_SIZE];
++};
++
++/*
++ * We allocate nodes using the slab allocator.
++ */
++static struct kmem_cache rdxtree_node_cache;
++
++void
++rdxtree_cache_init(void)
++{
++ kmem_cache_init(&rdxtree_node_cache, "rdxtree_node",
++ sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0);
++}
++
++#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
++unsigned int rdxtree_fail_node_creation_threshold;
++unsigned int rdxtree_nr_node_creations;
++#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
++
++static inline int
++rdxtree_check_alignment(const void *ptr)
++{
++ return ((unsigned long)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0;
++}
++
++static inline void *
++rdxtree_entry_addr(void *entry)
++{
++ return (void *)((unsigned long)entry & RDXTREE_ENTRY_ADDR_MASK);
++}
++
++static inline int
++rdxtree_entry_is_node(const void *entry)
++{
++ return ((unsigned long)entry & 1) != 0;
++}
++
++static inline void *
++rdxtree_node_to_entry(struct rdxtree_node *node)
++{
++ return (void *)((unsigned long)node | 1);
++}
++
++static int
++rdxtree_node_create(struct rdxtree_node **nodep, unsigned int height)
++{
++ struct rdxtree_node *node;
++
++#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
++ if (rdxtree_fail_node_creation_threshold != 0) {
++ rdxtree_nr_node_creations++;
++
++ if (rdxtree_nr_node_creations == rdxtree_fail_node_creation_threshold)
++ return ERR_NOMEM;
++ }
++#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
++
++ node = (struct rdxtree_node *) kmem_cache_alloc(&rdxtree_node_cache);
++
++ if (node == NULL)
++ return ERR_NOMEM;
++
++ assert(rdxtree_check_alignment(node));
++ node->parent = NULL;
++ node->height = height;
++ node->nr_entries = 0;
++ node->alloc_bm = RDXTREE_BM_FULL;
++ memset(node->entries, 0, sizeof(node->entries));
++ *nodep = node;
++ return 0;
++}
++
++static void
++rdxtree_node_schedule_destruction(struct rdxtree_node *node)
++{
++ /*
++ * This function is intended to use the appropriate interface to defer
++ * destruction until all read-side references are dropped in an
++ * environment that provides lockless synchronization.
++ *
++ * Otherwise, it simply "schedules" destruction immediately.
++ */
++ kmem_cache_free(&rdxtree_node_cache, (vm_offset_t) node);
++}
++
++static inline void
++rdxtree_node_link(struct rdxtree_node *node, struct rdxtree_node *parent,
++ unsigned int index)
++{
++ node->parent = parent;
++ node->index = index;
++}
++
++static inline void
++rdxtree_node_unlink(struct rdxtree_node *node)
++{
++ assert(node->parent != NULL);
++ node->parent = NULL;
++}
++
++static inline int
++rdxtree_node_full(struct rdxtree_node *node)
++{
++ return (node->nr_entries == ARRAY_SIZE(node->entries));
++}
++
++static inline int
++rdxtree_node_empty(struct rdxtree_node *node)
++{
++ return (node->nr_entries == 0);
++}
++
++static inline void
++rdxtree_node_insert(struct rdxtree_node *node, unsigned int index,
++ void *entry)
++{
++ assert(index < ARRAY_SIZE(node->entries));
++ assert(node->entries[index] == NULL);
++
++ node->nr_entries++;
++ llsync_assign_ptr(node->entries[index], entry);
++}
++
++static inline void
++rdxtree_node_insert_node(struct rdxtree_node *node, unsigned int index,
++ struct rdxtree_node *child)
++{
++ rdxtree_node_insert(node, index, rdxtree_node_to_entry(child));
++}
++
++static inline void
++rdxtree_node_remove(struct rdxtree_node *node, unsigned int index)
++{
++ assert(index < ARRAY_SIZE(node->entries));
++ assert(node->entries[index] != NULL);
++
++ node->nr_entries--;
++ llsync_assign_ptr(node->entries[index], NULL);
++}
++
++static inline void *
++rdxtree_node_find(struct rdxtree_node *node, unsigned int *indexp)
++{
++ unsigned int index;
++ void *ptr;
++
++ index = *indexp;
++
++ while (index < ARRAY_SIZE(node->entries)) {
++ ptr = rdxtree_entry_addr(llsync_read_ptr(node->entries[index]));
++
++ if (ptr != NULL) {
++ *indexp = index;
++ return ptr;
++ }
++
++ index++;
++ }
++
++ return NULL;
++}
++
++static inline void
++rdxtree_node_bm_set(struct rdxtree_node *node, unsigned int index)
++{
++ node->alloc_bm |= (rdxtree_bm_t)1 << index;
++}
++
++static inline void
++rdxtree_node_bm_clear(struct rdxtree_node *node, unsigned int index)
++{
++ node->alloc_bm &= ~((rdxtree_bm_t)1 << index);
++}
++
++static inline int
++rdxtree_node_bm_is_set(struct rdxtree_node *node, unsigned int index)
++{
++ return (node->alloc_bm & ((rdxtree_bm_t)1 << index));
++}
++
++static inline int
++rdxtree_node_bm_empty(struct rdxtree_node *node)
++{
++ return (node->alloc_bm == RDXTREE_BM_EMPTY);
++}
++
++static inline unsigned int
++rdxtree_node_bm_first(struct rdxtree_node *node)
++{
++ return rdxtree_ffs(node->alloc_bm) - 1;
++}
++
++static inline rdxtree_key_t
++rdxtree_max_key(unsigned int height)
++{
++ size_t shift;
++
++ shift = RDXTREE_RADIX * height;
++
++ if (likely(shift < (sizeof(rdxtree_key_t) * CHAR_BIT)))
++ return ((rdxtree_key_t)1 << shift) - 1;
++ else
++ return ~((rdxtree_key_t)0);
++}
++
++static void
++rdxtree_shrink(struct rdxtree *tree)
++{
++ struct rdxtree_node *node;
++ void *entry;
++
++ while (tree->height > 0) {
++ node = rdxtree_entry_addr(tree->root);
++
++ if (node->nr_entries != 1)
++ break;
++
++ entry = node->entries[0];
++
++ if (entry == NULL)
++ break;
++
++ tree->height--;
++
++ if (tree->height > 0)
++ rdxtree_node_unlink(rdxtree_entry_addr(entry));
++
++ llsync_assign_ptr(tree->root, entry);
++ rdxtree_node_schedule_destruction(node);
++ }
++}
++
++static int
++rdxtree_grow(struct rdxtree *tree, rdxtree_key_t key)
++{
++ struct rdxtree_node *root, *node;
++ unsigned int new_height;
++ int error;
++
++ new_height = tree->height + 1;
++
++ while (key > rdxtree_max_key(new_height))
++ new_height++;
++
++ if (tree->root == NULL) {
++ tree->height = new_height;
++ return ERR_SUCCESS;
++ }
++
++ root = rdxtree_entry_addr(tree->root);
++
++ do {
++ error = rdxtree_node_create(&node, tree->height);
++
++ if (error) {
++ rdxtree_shrink(tree);
++ return error;
++ }
++
++ if (tree->height == 0)
++ rdxtree_node_bm_clear(node, 0);
++ else {
++ rdxtree_node_link(root, node, 0);
++
++ if (rdxtree_node_bm_empty(root))
++ rdxtree_node_bm_clear(node, 0);
++ }
++
++ rdxtree_node_insert(node, 0, tree->root);
++ tree->height++;
++ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
++ root = node;
++ } while (new_height > tree->height);
++
++ return ERR_SUCCESS;
++}
++
++static void
++rdxtree_cleanup(struct rdxtree *tree, struct rdxtree_node *node)
++{
++ struct rdxtree_node *prev;
++
++ for (;;) {
++ if (likely(!rdxtree_node_empty(node))) {
++ if (unlikely(node->parent == NULL))
++ rdxtree_shrink(tree);
++
++ break;
++ }
++
++ if (node->parent == NULL) {
++ tree->height = 0;
++ llsync_assign_ptr(tree->root, NULL);
++ rdxtree_node_schedule_destruction(node);
++ break;
++ }
++
++ prev = node;
++ node = node->parent;
++ rdxtree_node_unlink(prev);
++ rdxtree_node_remove(node, prev->index);
++ rdxtree_node_schedule_destruction(prev);
++ }
++}
++
++static void
++rdxtree_insert_bm_clear(struct rdxtree_node *node, unsigned int index)
++{
++ for (;;) {
++ rdxtree_node_bm_clear(node, index);
++
++ if (!rdxtree_node_full(node) || (node->parent == NULL))
++ break;
++
++ index = node->index;
++ node = node->parent;
++ }
++}
++
++int
++rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
++ void *ptr, void ***slotp)
++{
++ struct rdxtree_node *node, *prev;
++ unsigned int height, shift, index = index;
++ int error;
++
++ assert(ptr != NULL);
++ assert(rdxtree_check_alignment(ptr));
++
++ if (unlikely(key > rdxtree_max_key(tree->height))) {
++ error = rdxtree_grow(tree, key);
++
++ if (error)
++ return error;
++ }
++
++ height = tree->height;
++
++ if (unlikely(height == 0)) {
++ if (tree->root != NULL)
++ return ERR_BUSY;
++
++ llsync_assign_ptr(tree->root, ptr);
++
++ if (slotp != NULL)
++ *slotp = &tree->root;
++
++ return ERR_SUCCESS;
++ }
++
++ node = rdxtree_entry_addr(tree->root);
++ shift = (height - 1) * RDXTREE_RADIX;
++ prev = NULL;
++
++ do {
++ if (node == NULL) {
++ error = rdxtree_node_create(&node, height - 1);
++
++ if (error) {
++ if (prev == NULL)
++ tree->height = 0;
++ else
++ rdxtree_cleanup(tree, prev);
++
++ return error;
++ }
++
++ if (prev == NULL)
++ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
++ else {
++ rdxtree_node_link(node, prev, index);
++ rdxtree_node_insert_node(prev, index, node);
++ }
++ }
++
++ prev = node;
++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
++ node = rdxtree_entry_addr(prev->entries[index]);
++ shift -= RDXTREE_RADIX;
++ height--;
++ } while (height > 0);
++
++ if (unlikely(node != NULL))
++ return ERR_BUSY;
++
++ rdxtree_node_insert(prev, index, ptr);
++ rdxtree_insert_bm_clear(prev, index);
++
++ if (slotp != NULL)
++ *slotp = &prev->entries[index];
++
++ return ERR_SUCCESS;
++}
++
++int
++rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
++ rdxtree_key_t *keyp, void ***slotp)
++{
++ struct rdxtree_node *node, *prev;
++ unsigned int height, shift, index = index;
++ rdxtree_key_t key;
++ int error;
++
++ assert(ptr != NULL);
++ assert(rdxtree_check_alignment(ptr));
++
++ height = tree->height;
++
++ if (unlikely(height == 0)) {
++ if (tree->root == NULL) {
++ llsync_assign_ptr(tree->root, ptr);
++ *keyp = 0;
++
++ if (slotp != NULL)
++ *slotp = &tree->root;
++
++ return ERR_SUCCESS;
++ }
++
++ goto grow;
++ }
++
++ node = rdxtree_entry_addr(tree->root);
++ key = 0;
++ shift = (height - 1) * RDXTREE_RADIX;
++ prev = NULL;
++
++ do {
++ if (node == NULL) {
++ error = rdxtree_node_create(&node, height - 1);
++
++ if (error) {
++ rdxtree_cleanup(tree, prev);
++ return error;
++ }
++
++ rdxtree_node_link(node, prev, index);
++ rdxtree_node_insert_node(prev, index, node);
++ }
++
++ prev = node;
++ index = rdxtree_node_bm_first(node);
++
++ if (index == (unsigned int)-1)
++ goto grow;
++
++ key |= (rdxtree_key_t)index << shift;
++ node = rdxtree_entry_addr(node->entries[index]);
++ shift -= RDXTREE_RADIX;
++ height--;
++ } while (height > 0);
++
++ rdxtree_node_insert(prev, index, ptr);
++ rdxtree_insert_bm_clear(prev, index);
++
++ if (slotp != NULL)
++ *slotp = &prev->entries[index];
++
++ goto out;
++
++grow:
++ key = rdxtree_max_key(height) + 1;
++ error = rdxtree_insert_common(tree, key, ptr, slotp);
++
++ if (error)
++ return error;
++
++out:
++ *keyp = key;
++ return ERR_SUCCESS;
++}
++
++static void
++rdxtree_remove_bm_set(struct rdxtree_node *node, unsigned int index)
++{
++ do {
++ rdxtree_node_bm_set(node, index);
++
++ if (node->parent == NULL)
++ break;
++
++ index = node->index;
++ node = node->parent;
++ } while (!rdxtree_node_bm_is_set(node, index));
++}
++
++void *
++rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key)
++{
++ struct rdxtree_node *node, *prev;
++ unsigned int height, shift, index;
++
++ height = tree->height;
++
++ if (unlikely(key > rdxtree_max_key(height)))
++ return NULL;
++
++ node = rdxtree_entry_addr(tree->root);
++
++ if (unlikely(height == 0)) {
++ llsync_assign_ptr(tree->root, NULL);
++ return node;
++ }
++
++ shift = (height - 1) * RDXTREE_RADIX;
++
++ do {
++ if (node == NULL)
++ return NULL;
++
++ prev = node;
++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
++ node = rdxtree_entry_addr(node->entries[index]);
++ shift -= RDXTREE_RADIX;
++ height--;
++ } while (height > 0);
++
++ if (node == NULL)
++ return NULL;
++
++ rdxtree_node_remove(prev, index);
++ rdxtree_remove_bm_set(prev, index);
++ rdxtree_cleanup(tree, prev);
++ return node;
++}
++
++void *
++rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
++ int get_slot)
++{
++ struct rdxtree_node *node, *prev;
++ unsigned int height, shift, index;
++ void *entry;
++
++ entry = llsync_read_ptr(tree->root);
++
++ if (entry == NULL) {
++ node = NULL;
++ height = 0;
++ } else {
++ node = rdxtree_entry_addr(entry);
++ height = rdxtree_entry_is_node(entry) ? node->height + 1 : 0;
++ }
++
++ if (key > rdxtree_max_key(height))
++ return NULL;
++
++ if (height == 0) {
++ if (node == NULL)
++ return NULL;
++
++ return get_slot ? (void *)&tree->root : node;
++ }
++
++ shift = (height - 1) * RDXTREE_RADIX;
++
++ do {
++ if (node == NULL)
++ return NULL;
++
++ prev = node;
++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
++ entry = llsync_read_ptr(node->entries[index]);
++ node = rdxtree_entry_addr(entry);
++ shift -= RDXTREE_RADIX;
++ height--;
++ } while (height > 0);
++
++ if (node == NULL)
++ return NULL;
++
++ return get_slot ? (void *)&prev->entries[index] : node;
++}
++
++void *
++rdxtree_replace_slot(void **slot, void *ptr)
++{
++ void *old;
++
++ assert(ptr != NULL);
++ assert(rdxtree_check_alignment(ptr));
++
++ old = *slot;
++ assert(old != NULL);
++ assert(rdxtree_check_alignment(old));
++ llsync_assign_ptr(*slot, ptr);
++ return old;
++}
++
++static void *
++rdxtree_walk_next(struct rdxtree *tree, struct rdxtree_iter *iter)
++{
++ struct rdxtree_node *root, *node, *prev;
++ unsigned int height, shift, index, orig_index;
++ rdxtree_key_t key;
++ void *entry;
++
++ entry = llsync_read_ptr(tree->root);
++
++ if (entry == NULL)
++ return NULL;
++
++ if (!rdxtree_entry_is_node(entry)) {
++ if (iter->key != (rdxtree_key_t)-1)
++ return NULL;
++ else {
++ iter->key = 0;
++ return rdxtree_entry_addr(entry);
++ }
++ }
++
++ key = iter->key + 1;
++
++ if ((key == 0) && (iter->node != NULL))
++ return NULL;
++
++ root = rdxtree_entry_addr(entry);
++
++restart:
++ node = root;
++ height = root->height + 1;
++
++ if (key > rdxtree_max_key(height))
++ return NULL;
++
++ shift = (height - 1) * RDXTREE_RADIX;
++
++ do {
++ prev = node;
++ index = (key >> shift) & RDXTREE_RADIX_MASK;
++ orig_index = index;
++ node = rdxtree_node_find(node, &index);
++
++ if (node == NULL) {
++ shift += RDXTREE_RADIX;
++ key = ((key >> shift) + 1) << shift;
++
++ if (key == 0)
++ return NULL;
++
++ goto restart;
++ }
++
++ if (orig_index != index)
++ key = ((key >> shift) + (index - orig_index)) << shift;
++
++ shift -= RDXTREE_RADIX;
++ height--;
++ } while (height > 0);
++
++ iter->node = prev;
++ iter->key = key;
++ return node;
++}
++
++void *
++rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter)
++{
++ unsigned int index, orig_index;
++ void *ptr;
++
++ if (iter->node == NULL)
++ return rdxtree_walk_next(tree, iter);
++
++ index = (iter->key + 1) & RDXTREE_RADIX_MASK;
++
++ if (index != 0) {
++ orig_index = index;
++ ptr = rdxtree_node_find(iter->node, &index);
++
++ if (ptr != NULL) {
++ iter->key += (index - orig_index) + 1;
++ return ptr;
++ }
++ }
++
++ return rdxtree_walk_next(tree, iter);
++}
++
++void
++rdxtree_remove_all(struct rdxtree *tree)
++{
++ struct rdxtree_node *node, *parent;
++ struct rdxtree_iter iter;
++
++ if (tree->height == 0) {
++ if (tree->root != NULL)
++ llsync_assign_ptr(tree->root, NULL);
++
++ return;
++ }
++
++ for (;;) {
++ rdxtree_iter_init(&iter);
++ rdxtree_walk_next(tree, &iter);
++
++ if (iter.node == NULL)
++ break;
++
++ node = iter.node;
++ parent = node->parent;
++
++ if (parent == NULL)
++ rdxtree_init(tree);
++ else {
++ rdxtree_node_remove(parent, node->index);
++ rdxtree_remove_bm_set(parent, node->index);
++ rdxtree_cleanup(tree, parent);
++ node->parent = NULL;
++ }
++
++ rdxtree_node_schedule_destruction(node);
++ }
++}
+diff --git a/kern/rdxtree.h b/kern/rdxtree.h
+new file mode 100644
+index 0000000..1f8456e
+--- /dev/null
++++ b/kern/rdxtree.h
+@@ -0,0 +1,209 @@
++/*
++ * Copyright (c) 2011-2015 Richard Braun.
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ *
++ *
++ * Radix tree.
++ *
++ * In addition to the standard insertion operation, this implementation
++ * can allocate keys for the caller at insertion time.
++ *
++ * Upstream site with license notes :
++ * http://git.sceen.net/rbraun/librbraun.git/
++ */
++
++#ifndef _RDXTREE_H
++#define _RDXTREE_H
++
++#include <stddef.h>
++#include <sys/types.h>
++
++/*
++ * Initialize the node cache.
++ */
++void rdxtree_cache_init(void);
++
++/*
++ * This macro selects between 32 or 64-bits (the default) keys.
++ */
++#if 0
++#define RDXTREE_KEY_32
++#endif
++
++#ifdef RDXTREE_KEY_32
++typedef uint32_t rdxtree_key_t;
++#else /* RDXTREE_KEY_32 */
++typedef uint64_t rdxtree_key_t;
++#endif /* RDXTREE_KEY_32 */
++
++/*
++ * Radix tree.
++ */
++struct rdxtree;
++
++/*
++ * Radix tree iterator.
++ */
++struct rdxtree_iter;
++
++/*
++ * Static tree initializer.
++ */
++#define RDXTREE_INITIALIZER { 0, NULL }
++
++#include "rdxtree_i.h"
++
++/*
++ * Initialize a tree.
++ */
++static inline void
++rdxtree_init(struct rdxtree *tree)
++{
++ tree->height = 0;
++ tree->root = NULL;
++}
++
++/*
++ * Insert a pointer in a tree.
++ *
++ * The ptr parameter must not be NULL.
++ */
++static inline int
++rdxtree_insert(struct rdxtree *tree, rdxtree_key_t key, void *ptr)
++{
++ return rdxtree_insert_common(tree, key, ptr, NULL);
++}
++
++/*
++ * Insert a pointer in a tree and obtain its slot.
++ *
++ * The ptr and slotp parameters must not be NULL. If successful, the slot of
++ * the newly inserted pointer is stored at the address pointed to by the slotp
++ * parameter.
++ */
++static inline int
++rdxtree_insert_slot(struct rdxtree *tree, rdxtree_key_t key,
++ void *ptr, void ***slotp)
++{
++ return rdxtree_insert_common(tree, key, ptr, slotp);
++}
++
++/*
++ * Insert a pointer in a tree, for which a new key is allocated.
++ *
++ * The ptr and keyp parameters must not be NULL. The newly allocated key is
++ * stored at the address pointed to by the keyp parameter.
++ */
++static inline int
++rdxtree_insert_alloc(struct rdxtree *tree, void *ptr, rdxtree_key_t *keyp)
++{
++ return rdxtree_insert_alloc_common(tree, ptr, keyp, NULL);
++}
++
++/*
++ * Insert a pointer in a tree, for which a new key is allocated, and obtain
++ * its slot.
++ *
++ * The ptr, keyp and slotp parameters must not be NULL. The newly allocated
++ * key is stored at the address pointed to by the keyp parameter while the
++ * slot of the inserted pointer is stored at the address pointed to by the
++ * slotp parameter.
++ */
++static inline int
++rdxtree_insert_alloc_slot(struct rdxtree *tree, void *ptr,
++ rdxtree_key_t *keyp, void ***slotp)
++{
++ return rdxtree_insert_alloc_common(tree, ptr, keyp, slotp);
++}
++
++/*
++ * Remove a pointer from a tree.
++ *
++ * The matching pointer is returned if successful, NULL otherwise.
++ */
++void * rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key);
++
++/*
++ * Look up a pointer in a tree.
++ *
++ * The matching pointer is returned if successful, NULL otherwise.
++ */
++static inline void *
++rdxtree_lookup(const struct rdxtree *tree, rdxtree_key_t key)
++{
++ return rdxtree_lookup_common(tree, key, 0);
++}
++
++/*
++ * Look up a slot in a tree.
++ *
++ * A slot is a pointer to a stored pointer in a tree. It can be used as
++ * a placeholder for fast replacements to avoid multiple lookups on the same
++ * key.
++ *
++ * A slot for the matching pointer is returned if successful, NULL otherwise.
++ *
++ * See rdxtree_replace_slot().
++ */
++static inline void **
++rdxtree_lookup_slot(const struct rdxtree *tree, rdxtree_key_t key)
++{
++ return rdxtree_lookup_common(tree, key, 1);
++}
++
++/*
++ * Replace a pointer in a tree.
++ *
++ * The ptr parameter must not be NULL. The previous pointer is returned.
++ *
++ * See rdxtree_lookup_slot().
++ */
++void * rdxtree_replace_slot(void **slot, void *ptr);
++
++/*
++ * Forge a loop to process all pointers of a tree.
++ */
++#define rdxtree_for_each(tree, iter, ptr) \
++for (rdxtree_iter_init(iter), ptr = rdxtree_walk(tree, iter); \
++ ptr != NULL; \
++ ptr = rdxtree_walk(tree, iter))
++
++/*
++ * Return the key of the current pointer from an iterator.
++ */
++static inline rdxtree_key_t
++rdxtree_iter_key(const struct rdxtree_iter *iter)
++{
++ return iter->key;
++}
++
++/*
++ * Remove all pointers from a tree.
++ *
++ * The common way to destroy a tree and its pointers is to loop over all
++ * the pointers using rdxtree_for_each(), freeing them, then call this
++ * function.
++ */
++void rdxtree_remove_all(struct rdxtree *tree);
++
++#endif /* _RDXTREE_H */
+diff --git a/kern/rdxtree_i.h b/kern/rdxtree_i.h
+new file mode 100644
+index 0000000..1bd1f64
+--- /dev/null
++++ b/kern/rdxtree_i.h
+@@ -0,0 +1,66 @@
++/*
++ * Copyright (c) 2013-2015 Richard Braun.
++ *
++ * This program is free software: you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation, either version 3 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program. If not, see <http://www.gnu.org/licenses/>.
++ *
++ *
++ * Upstream site with license notes :
++ * http://git.sceen.net/rbraun/librbraun.git/
++ */
++
++#ifndef _RDXTREE_I_H
++#define _RDXTREE_I_H
++
++/*
++ * Radix tree.
++ */
++struct rdxtree {
++ unsigned int height;
++ void *root;
++};
++
++/*
++ * Radix tree iterator.
++ *
++ * The node member refers to the node containing the current pointer, if any.
++ * The key member refers to the current pointer, and is valid if and only if
++ * rdxtree_walk() has been called at least once on the iterator.
++ */
++struct rdxtree_iter {
++ void *node;
++ rdxtree_key_t key;
++};
++
++/*
++ * Initialize an iterator.
++ */
++static inline void
++rdxtree_iter_init(struct rdxtree_iter *iter)
++{
++ iter->node = NULL;
++ iter->key = (rdxtree_key_t)-1;
++}
++
++int rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
++ void *ptr, void ***slotp);
++
++int rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
++ rdxtree_key_t *keyp, void ***slotp);
++
++void * rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
++ int get_slot);
++
++void * rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter);
++
++#endif /* _RDXTREE_I_H */
+diff --git a/kern/startup.c b/kern/startup.c
+index 71cd04d..f9f0c34 100644
+--- a/kern/startup.c
++++ b/kern/startup.c
+@@ -41,6 +41,7 @@
+ #include <kern/mach_clock.h>
+ #include <kern/printf.h>
+ #include <kern/processor.h>
++#include <kern/rdxtree.h>
+ #include <kern/sched_prim.h>
+ #include <kern/task.h>
+ #include <kern/thread.h>
+@@ -112,6 +113,7 @@ void setup_main(void)
+
+ sched_init();
+ vm_mem_bootstrap();
++ rdxtree_cache_init();
+ ipc_bootstrap();
+ vm_mem_init();
+ ipc_init();
+--
+2.1.4
+
diff --git a/debian/patches/0003-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch b/debian/patches/0003-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch
new file mode 100644
index 0000000..df13e0a
--- /dev/null
+++ b/debian/patches/0003-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch
@@ -0,0 +1,535 @@
+From 95cc13087c944e7fd95bd39374bdd5d7477dff76 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Mon, 16 Mar 2015 12:52:24 +0100
+Subject: [PATCH gnumach 3/6] ipc: undo manual inlining of `ipc_entry_X'
+ functions
+
+Today we can rely on the compiler to inline functions. Undoing this
+manual optimization is a first step to replace the IPC tables.
+
+* ipc/mach_msg.c (mach_msg_trap): Undo the manual inlining of
+`ipc_entry_lookup', `ipc_entry_dealloc', and `ipc_entry_get'.
+* ipc/ipc_kmsg.c (ipc_kmsg_copyin_header, ipc_kmsg_copyout_header): Likewise.
+* kern/exception.c (exception_raise): Likewise.
+* kern/ipc_mig.c (fast_send_right_lookup): Likewise.
+---
+ ipc/ipc_kmsg.c | 107 +++++++++++-------------------------------
+ ipc/mach_msg.c | 139 ++++++++-----------------------------------------------
+ kern/exception.c | 18 ++-----
+ kern/ipc_mig.c | 14 +++---
+ 4 files changed, 57 insertions(+), 221 deletions(-)
+
+diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
+index c0f07dd..cae700f 100644
+--- a/ipc/ipc_kmsg.c
++++ b/ipc/ipc_kmsg.c
+@@ -698,24 +698,14 @@ ipc_kmsg_copyin_header(
+ if (!space->is_active)
+ goto abort_async;
+
+- /* optimized ipc_entry_lookup */
+-
+- {
+- mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+- mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+-
+- if (index >= space->is_table_size)
++ entry = ipc_entry_lookup (space, dest_name);
++ if (entry == IE_NULL)
+ goto abort_async;
+-
+- entry = &space->is_table[index];
+ bits = entry->ie_bits;
+
+- /* check generation number and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+- (gen | MACH_PORT_TYPE_SEND))
++ /* check type bits */
++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_async;
+- }
+
+ /* optimized ipc_right_copyin */
+
+@@ -750,8 +740,6 @@ ipc_kmsg_copyin_header(
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+- ipc_entry_num_t size;
+- ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port, reply_port;
+@@ -762,51 +750,28 @@ ipc_kmsg_copyin_header(
+ if (!space->is_active)
+ goto abort_request;
+
+- size = space->is_table_size;
+- table = space->is_table;
+-
+- /* optimized ipc_entry_lookup of dest_name */
+-
+- {
+- mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+- mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+-
+- if (index >= size)
++ entry = ipc_entry_lookup (space, dest_name);
++ if (entry == IE_NULL)
+ goto abort_request;
+-
+- entry = &table[index];
+ bits = entry->ie_bits;
+
+- /* check generation number and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+- (gen | MACH_PORT_TYPE_SEND))
++ /* check type bits */
++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_request;
+- }
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+- /* optimized ipc_entry_lookup of reply_name */
+-
+- {
+- mach_port_index_t index = MACH_PORT_INDEX(reply_name);
+- mach_port_gen_t gen = MACH_PORT_GEN(reply_name);
+-
+- if (index >= size)
++ entry = ipc_entry_lookup (space, reply_name);
++ if (entry == IE_NULL)
+ goto abort_request;
+-
+- entry = &table[index];
+ bits = entry->ie_bits;
+
+- /* check generation number and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_RECEIVE)) !=
+- (gen | MACH_PORT_TYPE_RECEIVE))
++ /* check type bits */
++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_RECEIVE)
+ goto abort_request;
+- }
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+@@ -853,9 +818,6 @@ ipc_kmsg_copyin_header(
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+- mach_port_index_t index;
+- mach_port_gen_t gen;
+- ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+@@ -869,24 +831,13 @@ ipc_kmsg_copyin_header(
+ if (!space->is_active)
+ goto abort_reply;
+
+- /* optimized ipc_entry_lookup */
+-
+- table = space->is_table;
+-
+- index = MACH_PORT_INDEX(dest_name);
+- gen = MACH_PORT_GEN(dest_name);
+-
+- if (index >= space->is_table_size)
++ entry = ipc_entry_lookup (space, dest_name);
++ if (entry == IE_NULL)
+ goto abort_reply;
+-
+- entry = &table[index];
+ bits = entry->ie_bits;
+
+- /* check generation number, collision bit, and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|IE_BITS_COLLISION|
+- MACH_PORT_TYPE_SEND_ONCE)) !=
+- (gen | MACH_PORT_TYPE_SEND_ONCE))
++ /* check and type bits */
++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND_ONCE)
+ goto abort_reply;
+
+ /* optimized ipc_right_copyin */
+@@ -910,12 +861,8 @@ ipc_kmsg_copyin_header(
+ assert(dest_port->ip_sorights > 0);
+ ip_unlock(dest_port);
+
+- /* optimized ipc_entry_dealloc */
+-
+- entry->ie_next = table->ie_next;
+- table->ie_next = index;
+- entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
++ ipc_entry_dealloc (space, dest_name, entry);
+ is_write_unlock(space);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+@@ -1815,8 +1762,6 @@ ipc_kmsg_copyout_header(
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+- ipc_entry_t table;
+- mach_port_index_t index;
+ ipc_entry_t entry;
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+@@ -1829,8 +1774,7 @@ ipc_kmsg_copyout_header(
+ break;
+
+ is_write_lock(space);
+- if (!space->is_active ||
+- ((index = (table = space->is_table)->ie_next) == 0)) {
++ if (!space->is_active || space->is_table->ie_next == 0) {
+ is_write_unlock(space);
+ break;
+ }
+@@ -1860,11 +1804,14 @@ ipc_kmsg_copyout_header(
+ assert(reply->ip_sorights > 0);
+ ip_unlock(reply);
+
+- /* optimized ipc_entry_get */
+-
+- entry = &table[index];
+- table->ie_next = entry->ie_next;
+- entry->ie_request = 0;
++ kern_return_t kr;
++ kr = ipc_entry_get (space, &reply_name, &entry);
++ if (kr) {
++ ip_unlock(reply);
++ ip_unlock(dest);
++ is_write_unlock(space);
++ break;
++ }
+
+ {
+ mach_port_gen_t gen;
+@@ -1872,8 +1819,6 @@ ipc_kmsg_copyout_header(
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+- reply_name = MACH_PORT_MAKE(index, gen);
+-
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
+index aecfcd4..fe0c43e 100644
+--- a/ipc/mach_msg.c
++++ b/ipc/mach_msg.c
+@@ -482,16 +482,7 @@ mach_msg_trap(
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+ ipc_port_t reply_port;
+-
+- /* sending a request message */
+-
+- {
+- mach_port_index_t index;
+- mach_port_gen_t gen;
+-
+ {
+ mach_port_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+@@ -499,68 +490,30 @@ mach_msg_trap(
+ if (reply_name != rcv_name)
+ goto slow_copyin;
+
+- /* optimized ipc_entry_lookup of reply_name */
+-
+- index = MACH_PORT_INDEX(reply_name);
+- gen = MACH_PORT_GEN(reply_name);
+- }
+-
+ is_read_lock(space);
+ assert(space->is_active);
+
+- size = space->is_table_size;
+- table = space->is_table;
+-
+- if (index >= size)
+- goto abort_request_copyin;
+-
+- {
+ ipc_entry_t entry;
+- ipc_entry_bits_t bits;
+-
+- entry = &table[index];
+- bits = entry->ie_bits;
+-
+- /* check generation number and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|
+- MACH_PORT_TYPE_RECEIVE)) !=
+- (gen | MACH_PORT_TYPE_RECEIVE))
++ entry = ipc_entry_lookup (space, reply_name);
++ if (entry == IE_NULL)
+ goto abort_request_copyin;
+-
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+ }
+- }
+-
+- /* optimized ipc_entry_lookup of dest_name */
+-
+- {
+- mach_port_index_t index;
+- mach_port_gen_t gen;
+
+ {
+ mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+- index = MACH_PORT_INDEX(dest_name);
+- gen = MACH_PORT_GEN(dest_name);
+- }
+-
+- if (index >= size)
+- goto abort_request_copyin;
+-
+- {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+-
+- entry = &table[index];
++ entry = ipc_entry_lookup (space, dest_name);
++ if (entry == IE_NULL)
++ goto abort_request_copyin;
+ bits = entry->ie_bits;
+
+- /* check generation number and type bit */
+-
+- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+- (gen | MACH_PORT_TYPE_SEND))
++ /* check type bits */
++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
+ goto abort_request_copyin;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+@@ -568,7 +521,6 @@ mach_msg_trap(
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+ }
+- }
+
+ /*
+ * To do an atomic copyin, need simultaneous
+@@ -649,9 +601,6 @@ mach_msg_trap(
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+- ipc_entry_num_t size;
+- ipc_entry_t table;
+-
+ /* sending a reply message */
+
+ {
+@@ -665,35 +614,18 @@ mach_msg_trap(
+ is_write_lock(space);
+ assert(space->is_active);
+
+- /* optimized ipc_entry_lookup */
+-
+- size = space->is_table_size;
+- table = space->is_table;
+-
+ {
+ ipc_entry_t entry;
+- mach_port_gen_t gen;
+- mach_port_index_t index;
+-
+- {
+ mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+- index = MACH_PORT_INDEX(dest_name);
+- gen = MACH_PORT_GEN(dest_name);
+- }
+-
+- if (index >= size)
++ entry = ipc_entry_lookup (space, dest_name);
++ if (entry == IE_NULL)
+ goto abort_reply_dest_copyin;
+
+- entry = &table[index];
+-
+- /* check generation, collision bit, and type bit */
+-
+- if ((entry->ie_bits & (IE_BITS_GEN_MASK|
+- IE_BITS_COLLISION|
+- MACH_PORT_TYPE_SEND_ONCE)) !=
+- (gen | MACH_PORT_TYPE_SEND_ONCE))
++ /* check type bits */
++ if (IE_BITS_TYPE (entry->ie_bits) !=
++ MACH_PORT_TYPE_SEND_ONCE)
+ goto abort_reply_dest_copyin;
+
+ /* optimized ipc_right_copyin */
+@@ -716,13 +648,8 @@ mach_msg_trap(
+ }
+
+ assert(dest_port->ip_sorights > 0);
+-
+- /* optimized ipc_entry_dealloc */
+-
+- entry->ie_next = table->ie_next;
+- table->ie_next = index;
+- entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
++ ipc_entry_dealloc (space, dest_name, entry);
+ }
+
+ kmsg->ikm_header.msgh_bits =
+@@ -735,31 +662,16 @@ mach_msg_trap(
+
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+
+- /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
++ /* optimized ipc_mqueue_copyin */
+
+ {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+-
+- {
+- mach_port_index_t index;
+- mach_port_gen_t gen;
+-
+- index = MACH_PORT_INDEX(rcv_name);
+- gen = MACH_PORT_GEN(rcv_name);
+-
+- if (index >= size)
++ entry = ipc_entry_lookup (space, rcv_name);
++ if (entry == IE_NULL)
+ goto abort_reply_rcv_copyin;
+-
+- entry = &table[index];
+ bits = entry->ie_bits;
+
+- /* check generation number */
+-
+- if ((bits & IE_BITS_GEN_MASK) != gen)
+- goto abort_reply_rcv_copyin;
+- }
+-
+ /* check type bits; looking for receive or set */
+
+ if (bits & MACH_PORT_TYPE_PORT_SET) {
+@@ -1073,21 +985,12 @@ mach_msg_trap(
+ ip_unlock(reply_port);
+
+ {
+- ipc_entry_t table;
+ ipc_entry_t entry;
+- mach_port_index_t index;
+-
+- /* optimized ipc_entry_get */
+-
+- table = space->is_table;
+- index = table->ie_next;
+-
+- if (index == 0)
++ kern_return_t kr;
++ kr = ipc_entry_get (space, &reply_name, &entry);
++ if (kr)
+ goto abort_request_copyout;
+-
+- entry = &table[index];
+- table->ie_next = entry->ie_next;
+- entry->ie_request = 0;
++ assert (entry != NULL);
+
+ {
+ mach_port_gen_t gen;
+@@ -1095,8 +998,6 @@ mach_msg_trap(
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+- reply_name = MACH_PORT_MAKE(index, gen);
+-
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+diff --git a/kern/exception.c b/kern/exception.c
+index 7954fba..6e84c0a 100644
+--- a/kern/exception.c
++++ b/kern/exception.c
+@@ -603,30 +603,18 @@ exception_raise(
+ ip_unlock(reply_port);
+
+ {
+- ipc_entry_t table;
++ kern_return_t kr;
+ ipc_entry_t entry;
+- mach_port_index_t index;
+-
+- /* optimized ipc_entry_get */
+-
+- table = space->is_table;
+- index = table->ie_next;
+
+- if (index == 0)
++ kr = ipc_entry_get (space, &exc->Head.msgh_remote_port, &entry);
++ if (kr)
+ goto abort_copyout;
+-
+- entry = &table[index];
+- table->ie_next = entry->ie_next;
+- entry->ie_request = 0;
+-
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+- exc->Head.msgh_remote_port = MACH_PORT_MAKE(index, gen);
+-
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+diff --git a/kern/ipc_mig.c b/kern/ipc_mig.c
+index cc61ec7..22dac42 100644
+--- a/kern/ipc_mig.c
++++ b/kern/ipc_mig.c
+@@ -310,16 +310,18 @@ mig_strncpy(dest, src, len)
+ MACRO_BEGIN \
+ ipc_space_t space = current_space(); \
+ ipc_entry_t entry; \
+- mach_port_index_t index = MACH_PORT_INDEX(name); \
+ \
+ is_read_lock(space); \
+ assert(space->is_active); \
+ \
+- if ((index >= space->is_table_size) || \
+- (((entry = &space->is_table[index])->ie_bits & \
+- (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != \
+- (MACH_PORT_GEN(name) | MACH_PORT_TYPE_SEND))) { \
+- is_read_unlock(space); \
++ entry = ipc_entry_lookup (space, name); \
++ if (entry == IE_NULL) { \
++ is_read_unlock (space); \
++ abort; \
++ } \
++ \
++ if (IE_BITS_TYPE (entry->ie_bits) != MACH_PORT_TYPE_SEND) { \
++ is_read_unlock (space); \
+ abort; \
+ } \
+ \
+--
+2.1.4
+
diff --git a/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch b/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch
new file mode 100644
index 0000000..3da8481
--- /dev/null
+++ b/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch
@@ -0,0 +1,440 @@
+From 2c12d9faa2623fa0b14dc5efb1fc1f1dd22e557b Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 18 Mar 2015 12:25:26 +0100
+Subject: [PATCH gnumach 4/6] ipc: replace reverse hash table with a radix tree
+
+Currently, there is a hash table mapping (space, object) tuples to
+`ipc_entry' objects. This hash table is intertwined with the IPC
+tables. There is one hash table per IPC space, but it is only for the
+entries in the IPC table. This hash table is called `local' in the
+source. All IPC entries being spilled into the splay tree are instead
+mapped by a global hash table.
+
+Replace the local (i.e. per IPC space) reverse hash table with a radix
+tree.
+
+* ipc/ipc_entry.c (ipc_entry_grow_table): Adjust accordingly.
+* ipc/ipc_entry.h (struct ipc_entry): Adjust comment.
+* ipc/ipc_hash.c: Adjust comment explaining the local lookup table.
+(IPC_LOCAL_HASH_INVARIANT): New macro.
+(ipc_hash_local_lookup): Use the new `ipc_reverse_lookup' function.
+(ipc_hash_local_insert): Use the new `ipc_reverse_insert' function.
+(ipc_hash_local_delete): Use the new `ipc_reverse_remove' function.
+* ipc/ipc_space.c (ipc_space_create): Initialize radix tree.
+(ipc_space_destroy): Free radix tree.
+* ipc/ipc_space.h (struct ipc_space): Add radix tree.
+(ipc_reverse_insert): New function.
+(ipc_reverse_remove): Likewise.
+(ipc_reverse_remove_all): Likewise.
+(ipc_reverse_lookup): Likewise.
+* ipc/ipc_right.c (ipc_right_clean): Update comment.
+---
+ ipc/ipc_entry.c | 5 +-
+ ipc/ipc_entry.h | 5 --
+ ipc/ipc_hash.c | 184 ++++++++------------------------------------------------
+ ipc/ipc_right.c | 2 +-
+ ipc/ipc_space.c | 3 +
+ ipc/ipc_space.h | 63 +++++++++++++++++++
+ 6 files changed, 94 insertions(+), 168 deletions(-)
+
+diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
+index e78f74e..5b9fd98 100644
+--- a/ipc/ipc_entry.c
++++ b/ipc/ipc_entry.c
+@@ -618,6 +618,7 @@ ipc_entry_grow_table(ipc_space_t space)
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ it_entries_free(its, table);
++ ipc_reverse_remove_all(space);
+ is_write_lock(space);
+ return KERN_SUCCESS;
+ }
+@@ -641,9 +642,6 @@ ipc_entry_grow_table(ipc_space_t space)
+ memcpy(table, otable,
+ osize * sizeof(struct ipc_entry));
+
+- for (i = 0; i < osize; i++)
+- table[i].ie_index = 0;
+-
+ (void) memset((void *) (table + osize), 0,
+ (size - osize) * sizeof(struct ipc_entry));
+
+@@ -651,6 +649,7 @@ ipc_entry_grow_table(ipc_space_t space)
+ * Put old entries into the reverse hash table.
+ */
+
++ ipc_reverse_remove_all(space);
+ for (i = 0; i < osize; i++) {
+ ipc_entry_t entry = &table[i];
+
+diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
+index cb6d3f9..0caa70b 100644
+--- a/ipc/ipc_entry.h
++++ b/ipc/ipc_entry.h
+@@ -54,11 +54,6 @@
+ * those entries. The cutoff point between the table and the tree
+ * is adjusted dynamically to minimize memory consumption.
+ *
+- * The ie_index field of entries in the table implements
+- * a ordered hash table with open addressing and linear probing.
+- * This hash table converts (space, object) -> name.
+- * It is used independently of the other fields.
+- *
+ * Free (unallocated) entries in the table have null ie_object
+ * fields. The ie_bits field is zero except for IE_BITS_GEN.
+ * The ie_next (ie_request) field links free entries into a free list.
+diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
+index c2c6d6e..87952a7 100644
+--- a/ipc/ipc_hash.c
++++ b/ipc/ipc_hash.c
+@@ -296,37 +296,19 @@ ipc_hash_global_delete(
+ }
+
+ /*
+- * Each space has a local reverse hash table, which holds
+- * entries from the space's table. In fact, the hash table
+- * just uses a field (ie_index) in the table itself.
+- *
+- * The local hash table is an open-addressing hash table,
+- * which means that when a collision occurs, instead of
+- * throwing the entry into a bucket, the entry is rehashed
+- * to another position in the table. In this case the rehash
+- * is very simple: linear probing (ie, just increment the position).
+- * This simple rehash makes deletions tractable (they're still a pain),
+- * but it means that collisions tend to build up into clumps.
+- *
+- * Because at least one entry in the table (index 0) is always unused,
+- * there will always be room in the reverse hash table. If a table
+- * with n slots gets completely full, the reverse hash table will
+- * have one giant clump of n-1 slots and one free slot somewhere.
+- * Because entries are only entered into the reverse table if they
+- * are pure send rights (not receive, send-once, port-set,
+- * or dead-name rights), and free entries of course aren't entered,
+- * I expect the reverse hash table won't get unreasonably full.
+- *
+- * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
+- * pp. 135-142.) may be desirable here. They can dramatically help
+- * unsuccessful lookups. But unsuccessful lookups are almost always
+- * followed by insertions, and those slow down somewhat. They
+- * also can help deletions somewhat. Successful lookups aren't affected.
+- * So possibly a small win; probably nothing significant.
++ * Each space has a local reverse mapping from objects to entries
++ * from the space's table. This used to be a hash table.
+ */
+
+-#define IH_LOCAL_HASH(obj, size) \
+- ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) & (size - 1))
++#define IPC_LOCAL_HASH_INVARIANT(S, O, N, E) \
++ MACRO_BEGIN \
++ assert(IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND || \
++ IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND_RECEIVE || \
++ IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_NONE); \
++ assert((E)->ie_object == (O)); \
++ assert((E)->ie_index == (N)); \
++ assert(&(S)->is_table[N] == (E)); \
++ MACRO_END
+
+ /*
+ * Routine: ipc_hash_local_lookup
+@@ -345,37 +327,15 @@ ipc_hash_local_lookup(
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+ {
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t hindex, index;
+-
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+- table = space->is_table;
+- size = space->is_table_size;
+- hindex = IH_LOCAL_HASH(obj, size);
+-
+- /*
+- * Ideally, table[hindex].ie_index is the name we want.
+- * However, must check ie_object to verify this,
+- * because collisions can happen. In case of a collision,
+- * search farther along in the clump.
+- */
+-
+- while ((index = table[hindex].ie_index) != 0) {
+- ipc_entry_t entry = &table[index];
+-
+- if (entry->ie_object == obj) {
+- *namep = MACH_PORT_MAKEB(index, entry->ie_bits);
+- *entryp = entry;
+- return TRUE;
+- }
+-
+- if (++hindex == size)
+- hindex = 0;
++ *entryp = ipc_reverse_lookup(space, obj);
++ if (*entryp != IE_NULL) {
++ *namep = (*entryp)->ie_index;
++ IPC_LOCAL_HASH_INVARIANT(space, obj, *namep, *entryp);
++ return TRUE;
+ }
+-
+ return FALSE;
+ }
+
+@@ -394,33 +354,15 @@ ipc_hash_local_insert(
+ mach_port_index_t index,
+ ipc_entry_t entry)
+ {
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t hindex;
+-
++ kern_return_t kr;
+ assert(index != 0);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+- table = space->is_table;
+- size = space->is_table_size;
+- hindex = IH_LOCAL_HASH(obj, size);
+-
+- assert(entry == &table[index]);
+- assert(entry->ie_object == obj);
+-
+- /*
+- * We want to insert at hindex, but there may be collisions.
+- * If a collision occurs, search for the end of the clump
+- * and insert there.
+- */
+-
+- while (table[hindex].ie_index != 0) {
+- if (++hindex == size)
+- hindex = 0;
+- }
+-
+- table[hindex].ie_index = index;
++ entry->ie_index = index;
++ IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
++ kr = ipc_reverse_insert(space, obj, entry);
++ assert(kr == 0);
+ }
+
+ /*
+@@ -438,90 +380,14 @@ ipc_hash_local_delete(
+ mach_port_index_t index,
+ ipc_entry_t entry)
+ {
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t hindex, dindex;
+-
++ ipc_entry_t removed;
+ assert(index != MACH_PORT_NULL);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+- table = space->is_table;
+- size = space->is_table_size;
+- hindex = IH_LOCAL_HASH(obj, size);
+-
+- assert(entry == &table[index]);
+- assert(entry->ie_object == obj);
+-
+- /*
+- * First check we have the right hindex for this index.
+- * In case of collision, we have to search farther
+- * along in this clump.
+- */
+-
+- while (table[hindex].ie_index != index) {
+- if (table[hindex].ie_index == 0)
+- {
+- static int gak = 0;
+- if (gak == 0)
+- {
+- printf("gak! entry wasn't in hash table!\n");
+- gak = 1;
+- }
+- return;
+- }
+- if (++hindex == size)
+- hindex = 0;
+- }
+-
+- /*
+- * Now we want to set table[hindex].ie_index = 0.
+- * But if we aren't the last index in a clump,
+- * this might cause problems for lookups of objects
+- * farther along in the clump that are displaced
+- * due to collisions. Searches for them would fail
+- * at hindex instead of succeeding.
+- *
+- * So we must check the clump after hindex for objects
+- * that are so displaced, and move one up to the new hole.
+- *
+- * hindex - index of new hole in the clump
+- * dindex - index we are checking for a displaced object
+- *
+- * When we move a displaced object up into the hole,
+- * it creates a new hole, and we have to repeat the process
+- * until we get to the end of the clump.
+- */
+-
+- for (dindex = hindex; index != 0; hindex = dindex) {
+- for (;;) {
+- mach_port_index_t tindex;
+- ipc_object_t tobj;
+-
+- if (++dindex == size)
+- dindex = 0;
+- assert(dindex != hindex);
+-
+- /* are we at the end of the clump? */
+-
+- index = table[dindex].ie_index;
+- if (index == 0)
+- break;
+-
+- /* is this a displaced object? */
+-
+- tobj = table[index].ie_object;
+- assert(tobj != IO_NULL);
+- tindex = IH_LOCAL_HASH(tobj, size);
+-
+- if ((dindex < hindex) ?
+- ((dindex < tindex) && (tindex <= hindex)) :
+- ((dindex < tindex) || (tindex <= hindex)))
+- break;
+- }
+-
+- table[hindex].ie_index = index;
+- }
++ IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
++ removed = ipc_reverse_remove(space, obj);
++ assert(removed == entry);
+ }
+
+ /*
+diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
+index 503eb1f..35061c9 100644
+--- a/ipc/ipc_right.c
++++ b/ipc/ipc_right.c
+@@ -423,7 +423,7 @@ ipc_right_check(
+ * Purpose:
+ * Cleans up an entry in a dead space.
+ * The entry isn't deallocated or removed
+- * from reverse hash tables.
++ * from the reverse mappings.
+ * Conditions:
+ * The space is dead and unlocked.
+ */
+diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
+index ab55e83..dcc96b3 100644
+--- a/ipc/ipc_space.c
++++ b/ipc/ipc_space.c
+@@ -148,6 +148,7 @@ ipc_space_create(
+ space->is_tree_total = 0;
+ space->is_tree_small = 0;
+ space->is_tree_hash = 0;
++ rdxtree_init(&space->is_reverse_map);
+
+ *spacep = space;
+ return KERN_SUCCESS;
+@@ -271,6 +272,8 @@ ipc_space_destroy(
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+
++ rdxtree_remove_all(&space->is_reverse_map);
++
+ /*
+ * Because the space is now dead,
+ * we must release the "active" reference for it.
+diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
+index 3bd2f4d..f41c64c 100644
+--- a/ipc/ipc_space.h
++++ b/ipc/ipc_space.h
+@@ -42,8 +42,10 @@
+ #include <mach/boolean.h>
+ #include <mach/kern_return.h>
+ #include <mach/mach_types.h>
++#include <machine/vm_param.h>
+ #include <kern/macros.h>
+ #include <kern/lock.h>
++#include <kern/rdxtree.h>
+ #include <kern/slab.h>
+ #include <ipc/ipc_splay.h>
+ #include <ipc/ipc_types.h>
+@@ -79,6 +81,8 @@ struct ipc_space {
+ ipc_entry_num_t is_tree_total; /* number of entries in the tree */
+ ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
+ ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
++ struct rdxtree is_reverse_map; /* maps objects to entries */
++
+ };
+
+ #define IS_NULL ((ipc_space_t) 0)
+@@ -135,4 +139,63 @@ kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *);
+ kern_return_t ipc_space_create_special(struct ipc_space **);
+ void ipc_space_destroy(struct ipc_space *);
+
++/* Reverse lookups. */
++
++/* Cast a pointer to a suitable key. */
++#define KEY(X) \
++ ({ \
++ assert((((unsigned long) (X)) & 0x07) == 0); \
++ ((unsigned long long) \
++ (((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \
++ })
++
++/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must
++ be write-locked. */
++static inline kern_return_t
++ipc_reverse_insert(ipc_space_t space,
++ ipc_object_t obj,
++ ipc_entry_t entry)
++{
++ assert(space != IS_NULL);
++ assert(obj != IO_NULL);
++ return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
++ KEY(obj), entry);
++}
++
++/* Remove OBJ from the reverse mapping. SPACE must be
++ write-locked. */
++static inline ipc_entry_t
++ipc_reverse_remove(ipc_space_t space,
++ ipc_object_t obj)
++{
++ assert(space != IS_NULL);
++ assert(obj != IO_NULL);
++ return rdxtree_remove(&space->is_reverse_map, KEY(obj));
++}
++
++/* Remove all entries from the reverse mapping. SPACE must be
++ write-locked. */
++static inline void
++ipc_reverse_remove_all(ipc_space_t space)
++{
++ assert(space != IS_NULL);
++ rdxtree_remove_all(&space->is_reverse_map);
++ assert(space->is_reverse_map.height == 0);
++ assert(space->is_reverse_map.root == NULL);
++}
++
++/* Return ENTRY related to OBJ, or NULL if no such entry is found in
++ the reverse mapping. SPACE must be read-locked or
++ write-locked. */
++static inline ipc_entry_t
++ipc_reverse_lookup(ipc_space_t space,
++ ipc_object_t obj)
++{
++ assert(space != IS_NULL);
++ assert(obj != IO_NULL);
++ return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
++}
++
++#undef KEY
++
+ #endif /* _IPC_IPC_SPACE_H_ */
+--
+2.1.4
+
diff --git a/debian/patches/0005-ipc-replace-the-IPC-table-with-a-radix-tree.patch b/debian/patches/0005-ipc-replace-the-IPC-table-with-a-radix-tree.patch
new file mode 100644
index 0000000..9073ded
--- /dev/null
+++ b/debian/patches/0005-ipc-replace-the-IPC-table-with-a-radix-tree.patch
@@ -0,0 +1,3803 @@
+From 37b32140d68d55b0ff761b722339092d0fab5074 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 20 Mar 2015 00:21:14 +0100
+Subject: [PATCH gnumach 5/6] ipc: replace the IPC table with a radix tree
+
+Currently, the port names are mapped to an IPC object (e.g. a port)
+using a table. This, however, requires large chunks of continuous
+memory, and leads to scalability problems as virtual kernel memory is
+a scarce resource. To avoid excessive overhead, non-contiguous port
+names are spilled into a splay tree.
+
+Replace the IPC table with a radix tree. As the radix tree is able to
+store non-contiguous names with reasonable overhead, we can drop the
+splay tree as well.
+
+* ipc/ipc_entry.c (ipc_entry_tree_collision): Remove function.
+(ipc_entry_cache): New variable.
+(ipc_entry_lookup): Replace with a radix tree lookup.
+(ipc_entry_get): The free list handling is changed a little. Adopt
+accordingly.
+(ipc_entry_free_name): New function.
+(ipc_entry_alloc): Adopt accordingly.
+(ipc_entry_alloc_name): Likewise.
+(ipc_entry_dealloc): Likewise.
+(ipc_entry_grow_table): Remove function.
+* ipc/ipc_entry.h (struct ipc_entry): Update comment, add field for
+name and free list, remove unused fields.
+(ipc_entry_cache, ie_alloc, ie_free): New declarations.
+(struct ipc_tree_entry): Remove. Also remove any related declarations.
+(ipc_entry_grow_table): Remove declaration.
+* ipc/ipc_init.c (ipc_bootstrap): Adopt initialization.
+* ipc/ipc_kmsg.c (ipc_kmsg_copyout_header): Use `ipc_entry_alloc'
+instead of re-coding it. Adopt free list handling.
+(ipc_kmsg_copyout_object): Adopt free list handling, store the name.
+* ipc/ipc_object.c (ipc_object_copyout): Likewise.
+(ipc_object_copyout_multiname): Likewise.
+* ipc/ipc_space.c (ipc_space_create): Initialize radix tree and free list.
+Drop table and splay tree initialization.
+(ipc_space_destroy): Free ipc entries and radix tree, remove table and
+splay tree cleanup.
+* ipc/ipc_space.h (struct ipc_space): Add radix tree, free list, and size.
+Remove all fields related to the table and splay tree.
+* ddb/db_print.c (db_port_iterate): Adopt iteration.
+(db_lookup_port): Adopt lookup.
+* include/mach_debug/ipc_info.h: Remove unused parts of the debug interface.
+* include/mach_debug/mach_debug.defs: Likewise.
+* include/mach_debug/mach_debug_types.defs: Likewise.
+* ipc/mach_debug.c: Likewise.
+* ipc/ipc_right.c (ipc_right_reverse): Adopt lookup, store name.
+(ipc_right_check): Adopt removal.
+(ipc_right_destroy): Likewise.
+(ipc_right_dealloc): Likewise.
+(ipc_right_delta): Likewise.
+(ipc_right_copyin): Adopt insertion, adopt removal.
+(ipc_right_copyin_two): Adopt removal.
+(ipc_right_copyout): Adopt insertion, adopt removal.
+(ipc_right_rename): Likewise, also update comment.
+* ipc/mach_port.c (mach_port_names): Adopt iteration.
+(mach_port_get_set_status): Likewise.
+* ipc/port.h: Update comment.
+* ipc/ipc_hash.c: Delete file.
+* ipc/ipc_hash.h: Likewise.
+* ipc/ipc_splay.c: Likewise.
+* ipc/ipc_splay.h: Likewise.
+* Makefrag.am (libkernel_a_SOURCES): Remove these files.
+---
+ Makefrag.am | 4 -
+ ddb/db_print.c | 20 +-
+ include/mach_debug/ipc_info.h | 23 -
+ include/mach_debug/mach_debug.defs | 21 +-
+ include/mach_debug/mach_debug_types.defs | 7 +-
+ ipc/ipc_entry.c | 720 ++++--------------------
+ ipc/ipc_entry.h | 55 +-
+ ipc/ipc_hash.c | 486 ----------------
+ ipc/ipc_hash.h | 96 ----
+ ipc/ipc_init.c | 6 +-
+ ipc/ipc_kmsg.c | 32 +-
+ ipc/ipc_object.c | 23 +-
+ ipc/ipc_right.c | 39 +-
+ ipc/ipc_space.c | 104 +---
+ ipc/ipc_space.h | 18 +-
+ ipc/ipc_splay.c | 920 -------------------------------
+ ipc/ipc_splay.h | 114 ----
+ ipc/mach_debug.c | 325 -----------
+ ipc/mach_port.c | 60 +-
+ ipc/port.h | 5 +-
+ 20 files changed, 198 insertions(+), 2880 deletions(-)
+ delete mode 100644 ipc/ipc_hash.c
+ delete mode 100644 ipc/ipc_hash.h
+ delete mode 100644 ipc/ipc_splay.c
+ delete mode 100644 ipc/ipc_splay.h
+
+diff --git a/Makefrag.am b/Makefrag.am
+index 2eb835e..023a4d1 100644
+--- a/Makefrag.am
++++ b/Makefrag.am
+@@ -80,8 +80,6 @@ endif
+ libkernel_a_SOURCES += \
+ ipc/ipc_entry.c \
+ ipc/ipc_entry.h \
+- ipc/ipc_hash.c \
+- ipc/ipc_hash.h \
+ ipc/ipc_init.c \
+ ipc/ipc_init.h \
+ ipc/ipc_kmsg.c \
+@@ -105,8 +103,6 @@ libkernel_a_SOURCES += \
+ ipc/ipc_right.h \
+ ipc/ipc_space.c \
+ ipc/ipc_space.h \
+- ipc/ipc_splay.c \
+- ipc/ipc_splay.h \
+ ipc/ipc_table.c \
+ ipc/ipc_table.h \
+ ipc/ipc_target.c \
+diff --git a/ddb/db_print.c b/ddb/db_print.c
+index 24a3e33..fb4efaa 100644
+--- a/ddb/db_print.c
++++ b/ddb/db_print.c
+@@ -439,17 +439,11 @@ db_port_iterate(thread, func)
+ void (*func)();
+ {
+ ipc_entry_t entry;
+- int index;
+ int n = 0;
+- int size;
+- ipc_space_t space;
+-
+- space = thread->task->itk_space;
+- entry = space->is_table;
+- size = space->is_table_size;
+- for (index = 0; index < size; index++, entry++) {
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&thread->task->itk_space->is_map, &iter, entry) {
+ if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+- (*func)(index, (ipc_port_t) entry->ie_object,
++ (*func)(entry->ie_name, (ipc_port_t) entry->ie_object,
+ entry->ie_bits, n++);
+ }
+ return(n);
+@@ -460,16 +454,14 @@ db_lookup_port(
+ thread_t thread,
+ int id)
+ {
+- ipc_space_t space;
+ ipc_entry_t entry;
+
+ if (thread == THREAD_NULL)
+ return(0);
+- space = thread->task->itk_space;
+- if (id < 0 || id >= space->is_table_size)
++ if (id < 0)
+ return(0);
+- entry = &space->is_table[id];
+- if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
++ entry = ipc_entry_lookup(thread->task->itk_space, (mach_port_t) id);
++ if (entry && entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ return((ipc_port_t)entry->ie_object);
+ return(0);
+ }
+diff --git a/include/mach_debug/ipc_info.h b/include/mach_debug/ipc_info.h
+index ef0b0c6..a47ae7b 100644
+--- a/include/mach_debug/ipc_info.h
++++ b/include/mach_debug/ipc_info.h
+@@ -43,40 +43,17 @@
+ * in mach_debug_types.defs when adding/removing fields.
+ */
+
+-
+-typedef struct ipc_info_space {
+- natural_t iis_genno_mask; /* generation number mask */
+- natural_t iis_table_size; /* size of table */
+- natural_t iis_table_next; /* next possible size of table */
+- natural_t iis_tree_size; /* size of tree */
+- natural_t iis_tree_small; /* # of small entries in tree */
+- natural_t iis_tree_hash; /* # of hashed entries in tree */
+-} ipc_info_space_t;
+-
+-
+ typedef struct ipc_info_name {
+ mach_port_t iin_name; /* port name, including gen number */
+-/*boolean_t*/integer_t iin_collision; /* collision at this entry? */
+-/*boolean_t*/integer_t iin_compat; /* is this a compat-mode entry? */
+ /*boolean_t*/integer_t iin_marequest; /* extant msg-accepted request? */
+ mach_port_type_t iin_type; /* straight port type */
+ mach_port_urefs_t iin_urefs; /* user-references */
+ vm_offset_t iin_object; /* object pointer */
+ natural_t iin_next; /* marequest/next in free list */
+- natural_t iin_hash; /* hash index */
+ } ipc_info_name_t;
+
+ typedef ipc_info_name_t *ipc_info_name_array_t;
+
+-
+-typedef struct ipc_info_tree_name {
+- ipc_info_name_t iitn_name;
+- mach_port_t iitn_lchild; /* name of left child */
+- mach_port_t iitn_rchild; /* name of right child */
+-} ipc_info_tree_name_t;
+-
+-typedef ipc_info_tree_name_t *ipc_info_tree_name_array_t;
+-
+ /*
+ * Type definitions for mach_port_kernel_object.
+ * By remarkable coincidence, these closely resemble
+diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs
+index fb6e3a9..c8e8b1b 100644
+--- a/include/mach_debug/mach_debug.defs
++++ b/include/mach_debug/mach_debug.defs
+@@ -57,14 +57,7 @@ routine mach_port_get_srights(
+ name : mach_port_name_t;
+ out srights : mach_port_rights_t);
+
+-/*
+- * Returns information about the global reverse hash table.
+- */
+-
+-routine host_ipc_hash_info(
+- host : host_t;
+- out info : hash_info_bucket_array_t,
+- CountInOut, Dealloc);
++skip; /* host_ipc_hash_info */
+
+ /*
+ * Returns information about the marequest hash table.
+@@ -76,17 +69,7 @@ routine host_ipc_marequest_info(
+ out info : hash_info_bucket_array_t,
+ CountInOut, Dealloc);
+
+-/*
+- * Returns information about an IPC space.
+- */
+-
+-routine mach_port_space_info(
+- task : ipc_space_t;
+- out info : ipc_info_space_t;
+- out table_info : ipc_info_name_array_t,
+- CountInOut, Dealloc;
+- out tree_info : ipc_info_tree_name_array_t,
+- CountInOut, Dealloc);
++skip; /* mach_port_space_info */
+
+ /*
+ * Returns information about the dead-name requests
+diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
+index d24b6f9..8df2f34 100644
+--- a/include/mach_debug/mach_debug_types.defs
++++ b/include/mach_debug/mach_debug_types.defs
+@@ -38,14 +38,9 @@ type cache_info_array_t = array[] of cache_info_t;
+ type hash_info_bucket_t = struct[1] of natural_t;
+ type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
+
+-type ipc_info_space_t = struct[6] of natural_t;
+-
+-type ipc_info_name_t = struct[9] of natural_t;
++type ipc_info_name_t = struct[6] of natural_t;
+ type ipc_info_name_array_t = array[] of ipc_info_name_t;
+
+-type ipc_info_tree_name_t = struct[11] of natural_t;
+-type ipc_info_tree_name_array_t = array[] of ipc_info_tree_name_t;
+-
+ type vm_region_info_t = struct[11] of natural_t;
+ type vm_region_info_array_t = array[] of vm_region_info_t;
+
+diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
+index 5b9fd98..2d6e665 100644
+--- a/ipc/ipc_entry.c
++++ b/ipc/ipc_entry.c
+@@ -46,45 +46,10 @@
+ #include <ipc/ipc_types.h>
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_space.h>
+-#include <ipc/ipc_splay.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_object.h>
+
+-struct kmem_cache ipc_tree_entry_cache;
+-
+-/*
+- * Routine: ipc_entry_tree_collision
+- * Purpose:
+- * Checks if "name" collides with an allocated name
+- * in the space's tree. That is, returns TRUE
+- * if the splay tree contains a name with the same
+- * index as "name".
+- * Conditions:
+- * The space is locked (read or write) and active.
+- */
+-
+-boolean_t
+-ipc_entry_tree_collision(
+- ipc_space_t space,
+- mach_port_t name)
+-{
+- mach_port_index_t index;
+- mach_port_t lower, upper;
+-
+- assert(space->is_active);
+-
+- /*
+- * Check if we collide with the next smaller name
+- * or the next larger name.
+- */
+-
+- ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
+-
+- index = MACH_PORT_INDEX(name);
+- return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
+- ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
+-}
++struct kmem_cache ipc_entry_cache;
+
+ /*
+ * Routine: ipc_entry_lookup
+@@ -100,29 +65,13 @@ ipc_entry_lookup(
+ ipc_space_t space,
+ mach_port_t name)
+ {
+- mach_port_index_t index;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+-
+- index = MACH_PORT_INDEX(name);
+- if (index < space->is_table_size) {
+- entry = &space->is_table[index];
+- if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
+- if (entry->ie_bits & IE_BITS_COLLISION) {
+- assert(space->is_tree_total > 0);
+- goto tree_lookup;
+- } else
+- entry = IE_NULL;
+- else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+- entry = IE_NULL;
+- } else if (space->is_tree_total == 0)
+- entry = IE_NULL;
+- else
+- tree_lookup:
+- entry = (ipc_entry_t)
+- ipc_splay_tree_lookup(&space->is_tree, name);
+-
++ entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
++ if (entry != IE_NULL
++ && IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
++ entry = NULL;
+ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+ return entry;
+ }
+@@ -145,21 +94,18 @@ ipc_entry_get(
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+ {
+- ipc_entry_t table;
+- mach_port_index_t first_free;
+ mach_port_t new_name;
+ ipc_entry_t free_entry;
+
+ assert(space->is_active);
+
+- table = space->is_table;
+- first_free = table->ie_next;
+-
+- if (first_free == 0)
++ /* Get entry from the free list. */
++ free_entry = space->is_free_list;
++ if (free_entry == IE_NULL)
+ return KERN_NO_SPACE;
+
+- free_entry = &table[first_free];
+- table->ie_next = free_entry->ie_next;
++ space->is_free_list = free_entry->ie_next_free;
++ space->is_free_list_size -= 1;
+
+ /*
+ * Initialize the new entry. We need only
+@@ -173,7 +119,7 @@ ipc_entry_get(
+ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+ free_entry->ie_bits = gen;
+ free_entry->ie_request = 0;
+- new_name = MACH_PORT_MAKE(first_free, gen);
++ new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
+ }
+
+ /*
+@@ -186,6 +132,7 @@ ipc_entry_get(
+ assert(MACH_PORT_VALID(new_name));
+ assert(free_entry->ie_object == IO_NULL);
+
++ space->is_size += 1;
+ *namep = new_name;
+ *entryp = free_entry;
+ return KERN_SUCCESS;
+@@ -212,23 +159,44 @@ ipc_entry_alloc(
+ ipc_entry_t *entryp)
+ {
+ kern_return_t kr;
++ ipc_entry_t entry;
++ rdxtree_key_t key;
+
+ is_write_lock(space);
+
+- for (;;) {
+- if (!space->is_active) {
+- is_write_unlock(space);
+- return KERN_INVALID_TASK;
+- }
++ if (!space->is_active) {
++ is_write_unlock(space);
++ return KERN_INVALID_TASK;
++ }
+
+- kr = ipc_entry_get(space, namep, entryp);
+- if (kr == KERN_SUCCESS)
+- return kr;
++ kr = ipc_entry_get(space, namep, entryp);
++ if (kr == KERN_SUCCESS)
++ /* Success. Space is write-locked. */
++ return kr;
+
+- kr = ipc_entry_grow_table(space);
+- if (kr != KERN_SUCCESS)
+- return kr; /* space is unlocked */
++ entry = ie_alloc();
++ if (entry == IE_NULL) {
++ is_write_unlock(space);
++ return KERN_RESOURCE_SHORTAGE;
++ }
++
++ kr = rdxtree_insert_alloc(&space->is_map, entry, &key);
++ if (kr) {
++ is_write_unlock(space);
++ ie_free(entry);
++ return kr;
+ }
++ space->is_size += 1;
++
++ entry->ie_bits = 0;
++ entry->ie_object = IO_NULL;
++ entry->ie_request = 0;
++ entry->ie_name = (mach_port_t) key;
++
++ *entryp = entry;
++ *namep = (mach_port_t) key;
++ /* Success. Space is write-locked. */
++ return KERN_SUCCESS;
+ }
+
+ /*
+@@ -252,166 +220,77 @@ ipc_entry_alloc_name(
+ mach_port_t name,
+ ipc_entry_t *entryp)
+ {
+- mach_port_index_t index = MACH_PORT_INDEX(name);
+- mach_port_gen_t gen = MACH_PORT_GEN(name);
+- ipc_tree_entry_t tree_entry = ITE_NULL;
+-
++ kern_return_t kr;
++ ipc_entry_t entry, e, *prevp;
++ void **slot;
+ assert(MACH_PORT_VALID(name));
+
+-
+ is_write_lock(space);
+
+- for (;;) {
+- ipc_entry_t entry;
+- ipc_tree_entry_t tentry;
+- ipc_table_size_t its;
++ if (!space->is_active) {
++ is_write_unlock(space);
++ return KERN_INVALID_TASK;
++ }
++
++ slot = rdxtree_lookup_slot(&space->is_map, (rdxtree_key_t) name);
++ if (slot != NULL)
++ entry = *(ipc_entry_t *) slot;
+
+- if (!space->is_active) {
++ if (slot == NULL || entry == IE_NULL) {
++ entry = ie_alloc();
++ if (entry == IE_NULL) {
+ is_write_unlock(space);
+- if (tree_entry) ite_free(tree_entry);
+- return KERN_INVALID_TASK;
+- }
+-
+- /*
+- * If we are under the table cutoff,
+- * there are three cases:
+- * 1) The entry is inuse, for the same name
+- * 2) The entry is inuse, for a different name
+- * 3) The entry is free
+- */
+-
+- if ((0 < index) && (index < space->is_table_size)) {
+- ipc_entry_t table = space->is_table;
+-
+- entry = &table[index];
+-
+- if (IE_BITS_TYPE(entry->ie_bits)) {
+- if (IE_BITS_GEN(entry->ie_bits) == gen) {
+- *entryp = entry;
+- if (tree_entry) ite_free(tree_entry);
+- return KERN_SUCCESS;
+- }
+- } else {
+- mach_port_index_t free_index, next_index;
+-
+- /*
+- * Rip the entry out of the free list.
+- */
+-
+- for (free_index = 0;
+- (next_index = table[free_index].ie_next)
+- != index;
+- free_index = next_index)
+- continue;
+-
+- table[free_index].ie_next =
+- table[next_index].ie_next;
+-
+- entry->ie_bits = gen;
+- assert(entry->ie_object == IO_NULL);
+- entry->ie_request = 0;
+-
+- *entryp = entry;
+- if (tree_entry) ite_free(tree_entry);
+- return KERN_SUCCESS;
+- }
++ return KERN_RESOURCE_SHORTAGE;
+ }
+
+- /*
+- * Before trying to allocate any memory,
+- * check if the entry already exists in the tree.
+- * This avoids spurious resource errors.
+- * The splay tree makes a subsequent lookup/insert
+- * of the same name cheap, so this costs little.
+- */
+-
+- if ((space->is_tree_total > 0) &&
+- ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
+- != ITE_NULL)) {
+- assert(tentry->ite_space == space);
+- assert(IE_BITS_TYPE(tentry->ite_bits));
+-
+- *entryp = &tentry->ite_entry;
+- if (tree_entry) ite_free(tree_entry);
+- return KERN_SUCCESS;
+- }
++ entry->ie_bits = 0;
++ entry->ie_object = IO_NULL;
++ entry->ie_request = 0;
++ entry->ie_name = name;
+
+- its = space->is_table_next;
+-
+- /*
+- * Check if the table should be grown.
+- *
+- * Note that if space->is_table_size == its->its_size,
+- * then we won't ever try to grow the table.
+- *
+- * Note that we are optimistically assuming that name
+- * doesn't collide with any existing names. (So if
+- * it were entered into the tree, is_tree_small would
+- * be incremented.) This is OK, because even in that
+- * case, we don't lose memory by growing the table.
+- */
+-
+- if ((space->is_table_size <= index) &&
+- (index < its->its_size) &&
+- (((its->its_size - space->is_table_size) *
+- sizeof(struct ipc_entry)) <
+- ((space->is_tree_small + 1) *
+- sizeof(struct ipc_tree_entry)))) {
+- kern_return_t kr;
+-
+- /*
+- * Can save space by growing the table.
+- * Because the space will be unlocked,
+- * we must restart.
+- */
+-
+- kr = ipc_entry_grow_table(space);
+- assert(kr != KERN_NO_SPACE);
++ if (slot != NULL)
++ rdxtree_replace_slot(slot, entry);
++ else {
++ kr = rdxtree_insert(&space->is_map,
++ (rdxtree_key_t) name, entry);
+ if (kr != KERN_SUCCESS) {
+- /* space is unlocked */
+- if (tree_entry) ite_free(tree_entry);
++ is_write_unlock(space);
++ ie_free(entry);
+ return kr;
+ }
+-
+- continue;
+- }
+-
+- /*
+- * If a splay-tree entry was allocated previously,
+- * go ahead and insert it into the tree.
+- */
+-
+- if (tree_entry != ITE_NULL) {
+- space->is_tree_total++;
+-
+- if (index < space->is_table_size)
+- space->is_table[index].ie_bits |=
+- IE_BITS_COLLISION;
+- else if ((index < its->its_size) &&
+- !ipc_entry_tree_collision(space, name))
+- space->is_tree_small++;
+-
+- ipc_splay_tree_insert(&space->is_tree,
+- name, tree_entry);
+-
+- tree_entry->ite_bits = 0;
+- tree_entry->ite_object = IO_NULL;
+- tree_entry->ite_request = 0;
+- tree_entry->ite_space = space;
+- *entryp = &tree_entry->ite_entry;
+- return KERN_SUCCESS;
+ }
++ space->is_size += 1;
+
+- /*
+- * Allocate a tree entry and try again.
+- */
++ *entryp = entry;
++ /* Success. Space is write-locked. */
++ return KERN_SUCCESS;
++ }
+
+- is_write_unlock(space);
+- tree_entry = ite_alloc();
+- if (tree_entry == ITE_NULL)
+- return KERN_RESOURCE_SHORTAGE;
+- is_write_lock(space);
++ if (IE_BITS_TYPE(entry->ie_bits)) {
++ /* Used entry. */
++ *entryp = entry;
++ /* Success. Space is write-locked. */
++ return KERN_SUCCESS;
+ }
++
++ /* Free entry. Rip the entry out of the free list. */
++ for (prevp = &space->is_free_list, e = space->is_free_list;
++ e != entry;
++ ({ prevp = &e->ie_next_free; e = e->ie_next_free; }))
++ continue;
++
++ *prevp = entry->ie_next_free;
++ space->is_free_list_size -= 1;
++
++ entry->ie_bits = 0;
++ assert(entry->ie_object == IO_NULL);
++ assert(entry->ie_name == name);
++ entry->ie_request = 0;
++
++ space->is_size += 1;
++ *entryp = entry;
++ /* Success. Space is write-locked. */
++ return KERN_SUCCESS;
+ }
+
+ /*
+@@ -429,405 +308,20 @@ ipc_entry_dealloc(
+ mach_port_t name,
+ ipc_entry_t entry)
+ {
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t index;
+-
+ assert(space->is_active);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+- index = MACH_PORT_INDEX(name);
+- table = space->is_table;
+- size = space->is_table_size;
+-
+- if ((index < size) && (entry == &table[index])) {
+- assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
+-
+- if (entry->ie_bits & IE_BITS_COLLISION) {
+- struct ipc_splay_tree small, collisions;
+- ipc_tree_entry_t tentry;
+- mach_port_t tname;
+- boolean_t pick;
+- ipc_entry_bits_t bits;
+- ipc_object_t obj;
+-
+- /* must move an entry from tree to table */
+-
+- ipc_splay_tree_split(&space->is_tree,
+- MACH_PORT_MAKE(index+1, 0),
+- &collisions);
+- ipc_splay_tree_split(&collisions,
+- MACH_PORT_MAKE(index, 0),
+- &small);
+-
+- pick = ipc_splay_tree_pick(&collisions,
+- &tname, &tentry);
+- assert(pick);
+- assert(MACH_PORT_INDEX(tname) == index);
+-
+- bits = tentry->ite_bits;
+- entry->ie_bits = bits | MACH_PORT_GEN(tname);
+- entry->ie_object = obj = tentry->ite_object;
+- entry->ie_request = tentry->ite_request;
+- assert(tentry->ite_space == space);
+-
+- if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
+- ipc_hash_global_delete(space, obj,
+- tname, tentry);
+- ipc_hash_local_insert(space, obj,
+- index, entry);
+- }
+-
+- ipc_splay_tree_delete(&collisions, tname, tentry);
+-
+- assert(space->is_tree_total > 0);
+- space->is_tree_total--;
+-
+- /* check if collision bit should still be on */
+-
+- pick = ipc_splay_tree_pick(&collisions,
+- &tname, &tentry);
+- if (pick) {
+- entry->ie_bits |= IE_BITS_COLLISION;
+- ipc_splay_tree_join(&space->is_tree,
+- &collisions);
+- }
+-
+- ipc_splay_tree_join(&space->is_tree, &small);
+- } else {
+- entry->ie_bits &= IE_BITS_GEN_MASK;
+- entry->ie_next = table->ie_next;
+- table->ie_next = index;
+- }
++ if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
++ space->is_free_list_size += 1;
++ entry->ie_bits &= IE_BITS_GEN_MASK;
++ entry->ie_next_free = space->is_free_list;
++ space->is_free_list = entry;
+ } else {
+- ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
+-
+- assert(tentry->ite_space == space);
+-
+- ipc_splay_tree_delete(&space->is_tree, name, tentry);
+-
+- assert(space->is_tree_total > 0);
+- space->is_tree_total--;
+-
+- if (index < size) {
+- ipc_entry_t ientry = &table[index];
+-
+- assert(ientry->ie_bits & IE_BITS_COLLISION);
+-
+- if (!ipc_entry_tree_collision(space, name))
+- ientry->ie_bits &= ~IE_BITS_COLLISION;
+- } else if ((index < space->is_table_next->its_size) &&
+- !ipc_entry_tree_collision(space, name)) {
+- assert(space->is_tree_small > 0);
+- space->is_tree_small--;
+- }
++ rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
++ ie_free(entry);
+ }
+-}
+-
+-/*
+- * Routine: ipc_entry_grow_table
+- * Purpose:
+- * Grows the table in a space.
+- * Conditions:
+- * The space must be write-locked and active before.
+- * If successful, it is also returned locked.
+- * Allocates memory.
+- * Returns:
+- * KERN_SUCCESS Grew the table.
+- * KERN_SUCCESS Somebody else grew the table.
+- * KERN_SUCCESS The space died.
+- * KERN_NO_SPACE Table has maximum size already.
+- * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
+- */
+-
+-kern_return_t
+-ipc_entry_grow_table(ipc_space_t space)
+-{
+- ipc_entry_num_t osize, size, nsize;
+-
+- do {
+- ipc_entry_t otable, table;
+- ipc_table_size_t oits, its, nits;
+- mach_port_index_t i, free_index;
+-
+- assert(space->is_active);
+-
+- if (space->is_growing) {
+- /*
+- * Somebody else is growing the table.
+- * We just wait for them to finish.
+- */
+-
+- assert_wait((event_t) space, FALSE);
+- is_write_unlock(space);
+- thread_block((void (*)()) 0);
+- is_write_lock(space);
+- return KERN_SUCCESS;
+- }
+-
+- otable = space->is_table;
+- its = space->is_table_next;
+- size = its->its_size;
+- oits = its - 1;
+- osize = oits->its_size;
+- nits = its + 1;
+- nsize = nits->its_size;
+-
+- if (osize == size) {
+- is_write_unlock(space);
+- printf_once("no more room for ipc_entry_grow_table in space %p\n", space);
+- return KERN_NO_SPACE;
+- }
+-
+- assert((osize < size) && (size <= nsize));
+-
+- /*
+- * OK, we'll attempt to grow the table.
+- * The realloc requires that the old table
+- * remain in existence.
+- */
+-
+- space->is_growing = TRUE;
+- is_write_unlock(space);
+- if (it_entries_reallocable(oits))
+- table = it_entries_realloc(oits, otable, its);
+- else
+- table = it_entries_alloc(its);
+- is_write_lock(space);
+- space->is_growing = FALSE;
+-
+- /*
+- * We need to do a wakeup on the space,
+- * to rouse waiting threads. We defer
+- * this until the space is unlocked,
+- * because we don't want them to spin.
+- */
+-
+- if (table == IE_NULL) {
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- if (!space->is_active) {
+- /*
+- * The space died while it was unlocked.
+- */
+-
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- it_entries_free(its, table);
+- ipc_reverse_remove_all(space);
+- is_write_lock(space);
+- return KERN_SUCCESS;
+- }
+-
+- assert(space->is_table == otable);
+- assert(space->is_table_next == its);
+- assert(space->is_table_size == osize);
+-
+- space->is_table = table;
+- space->is_table_size = size;
+- space->is_table_next = nits;
+-
+- /*
+- * If we did a realloc, it remapped the data.
+- * Otherwise we copy by hand first. Then we have
+- * to clear the index fields in the old part and
+- * zero the new part.
+- */
+-
+- if (!it_entries_reallocable(oits))
+- memcpy(table, otable,
+- osize * sizeof(struct ipc_entry));
+-
+- (void) memset((void *) (table + osize), 0,
+- (size - osize) * sizeof(struct ipc_entry));
+-
+- /*
+- * Put old entries into the reverse hash table.
+- */
+-
+- ipc_reverse_remove_all(space);
+- for (i = 0; i < osize; i++) {
+- ipc_entry_t entry = &table[i];
+-
+- if (IE_BITS_TYPE(entry->ie_bits) ==
+- MACH_PORT_TYPE_SEND)
+- ipc_hash_local_insert(space, entry->ie_object,
+- i, entry);
+- }
+-
+- /*
+- * If there are entries in the splay tree,
+- * then we have work to do:
+- * 1) transfer entries to the table
+- * 2) update is_tree_small
+- */
+-
+- if (space->is_tree_total > 0) {
+- mach_port_index_t index;
+- boolean_t delete;
+- struct ipc_splay_tree ignore;
+- struct ipc_splay_tree move;
+- struct ipc_splay_tree small;
+- ipc_entry_num_t nosmall;
+- ipc_tree_entry_t tentry;
+-
+- /*
+- * The splay tree divides into four regions,
+- * based on the index of the entries:
+- * 1) 0 <= index < osize
+- * 2) osize <= index < size
+- * 3) size <= index < nsize
+- * 4) nsize <= index
+- *
+- * Entries in the first part are ignored.
+- * Entries in the second part, that don't
+- * collide, are moved into the table.
+- * Entries in the third part, that don't
+- * collide, are counted for is_tree_small.
+- * Entries in the fourth part are ignored.
+- */
+-
+- ipc_splay_tree_split(&space->is_tree,
+- MACH_PORT_MAKE(nsize, 0),
+- &small);
+- ipc_splay_tree_split(&small,
+- MACH_PORT_MAKE(size, 0),
+- &move);
+- ipc_splay_tree_split(&move,
+- MACH_PORT_MAKE(osize, 0),
+- &ignore);
+-
+- /* move entries into the table */
+-
+- for (tentry = ipc_splay_traverse_start(&move);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&move, delete)) {
+- mach_port_t name;
+- mach_port_gen_t gen;
+- mach_port_type_t type;
+- ipc_entry_bits_t bits;
+- ipc_object_t obj;
+- ipc_entry_t entry;
+-
+- name = tentry->ite_name;
+- gen = MACH_PORT_GEN(name);
+- index = MACH_PORT_INDEX(name);
+-
+- assert(tentry->ite_space == space);
+- assert((osize <= index) && (index < size));
+-
+- entry = &table[index];
+-
+- /* collision with previously moved entry? */
+-
+- bits = entry->ie_bits;
+- if (bits != 0) {
+- assert(IE_BITS_TYPE(bits));
+- assert(IE_BITS_GEN(bits) != gen);
+-
+- entry->ie_bits =
+- bits | IE_BITS_COLLISION;
+- delete = FALSE;
+- continue;
+- }
+-
+- bits = tentry->ite_bits;
+- type = IE_BITS_TYPE(bits);
+- assert(type != MACH_PORT_TYPE_NONE);
+-
+- entry->ie_bits = bits | gen;
+- entry->ie_object = obj = tentry->ite_object;
+- entry->ie_request = tentry->ite_request;
+-
+- if (type == MACH_PORT_TYPE_SEND) {
+- ipc_hash_global_delete(space, obj,
+- name, tentry);
+- ipc_hash_local_insert(space, obj,
+- index, entry);
+- }
+-
+- space->is_tree_total--;
+- delete = TRUE;
+- }
+- ipc_splay_traverse_finish(&move);
+-
+- /* count entries for is_tree_small */
+-
+- nosmall = 0; index = 0;
+- for (tentry = ipc_splay_traverse_start(&small);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&small, FALSE)) {
+- mach_port_index_t nindex;
+-
+- nindex = MACH_PORT_INDEX(tentry->ite_name);
+-
+- if (nindex != index) {
+- nosmall++;
+- index = nindex;
+- }
+- }
+- ipc_splay_traverse_finish(&small);
+-
+- assert(nosmall <= (nsize - size));
+- assert(nosmall <= space->is_tree_total);
+- space->is_tree_small = nosmall;
+-
+- /* put the splay tree back together */
+-
+- ipc_splay_tree_join(&space->is_tree, &small);
+- ipc_splay_tree_join(&space->is_tree, &move);
+- ipc_splay_tree_join(&space->is_tree, &ignore);
+- }
+-
+- /*
+- * Add entries in the new part which still aren't used
+- * to the free list. Add them in reverse order,
+- * and set the generation number to -1, so that
+- * early allocations produce "natural" names.
+- */
+-
+- free_index = table[0].ie_next;
+- for (i = size-1; i >= osize; --i) {
+- ipc_entry_t entry = &table[i];
+-
+- if (entry->ie_bits == 0) {
+- entry->ie_bits = IE_BITS_GEN_MASK;
+- entry->ie_next = free_index;
+- free_index = i;
+- }
+- }
+- table[0].ie_next = free_index;
+-
+- /*
+- * Now we need to free the old table.
+- * If the space dies or grows while unlocked,
+- * then we can quit here.
+- */
+-
+- is_write_unlock(space);
+- thread_wakeup((event_t) space);
+- it_entries_free(oits, otable);
+- is_write_lock(space);
+- if (!space->is_active || (space->is_table_next != nits))
+- return KERN_SUCCESS;
+-
+- /*
+- * We might have moved enough entries from
+- * the splay tree into the table that
+- * the table can be profitably grown again.
+- *
+- * Note that if size == nsize, then
+- * space->is_tree_small == 0.
+- */
+- } while ((space->is_tree_small > 0) &&
+- (((nsize - size) * sizeof(struct ipc_entry)) <
+- (space->is_tree_small * sizeof(struct ipc_tree_entry))));
+-
+- return KERN_SUCCESS;
++ space->is_size -= 1;
+ }
+
+
+diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
+index 0caa70b..5c1f8fd 100644
+--- a/ipc/ipc_entry.h
++++ b/ipc/ipc_entry.h
+@@ -48,42 +48,27 @@
+
+ /*
+ * Spaces hold capabilities for ipc_object_t's (ports and port sets).
+- * Each ipc_entry_t records a capability. Most capabilities have
+- * small names, and the entries are elements of a table.
+- * Capabilities can have large names, and a splay tree holds
+- * those entries. The cutoff point between the table and the tree
+- * is adjusted dynamically to minimize memory consumption.
+- *
+- * Free (unallocated) entries in the table have null ie_object
+- * fields. The ie_bits field is zero except for IE_BITS_GEN.
+- * The ie_next (ie_request) field links free entries into a free list.
+- *
+- * The first entry in the table (index 0) is always free.
+- * It is used as the head of the free list.
++ * Each ipc_entry_t records a capability.
+ */
+
+ typedef unsigned int ipc_entry_bits_t;
+ typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
+
+ typedef struct ipc_entry {
++ mach_port_t ie_name;
+ ipc_entry_bits_t ie_bits;
+ struct ipc_object *ie_object;
+ union {
+- mach_port_index_t next;
++ struct ipc_entry *next_free;
+ /*XXX ipc_port_request_index_t request;*/
+ unsigned int request;
+ } index;
+- union {
+- mach_port_index_t table;
+- struct ipc_tree_entry *tree;
+- } hash;
+ } *ipc_entry_t;
+
+ #define IE_NULL ((ipc_entry_t) 0)
+
+ #define ie_request index.request
+-#define ie_next index.next
+-#define ie_index hash.table
++#define ie_next_free index.next_free
+
+ #define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
+ #define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK)
+@@ -93,12 +78,10 @@ typedef struct ipc_entry {
+
+ #define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
+
+-#define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */
+-
+-#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */
+-#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */
++#define IE_BITS_RIGHT_MASK 0x003fffff /* relevant to the right */
+
+ #if PORT_GENERATIONS
++#error "not supported"
+ #define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
+ #define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
+ #define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
+@@ -109,26 +92,9 @@ typedef struct ipc_entry {
+ #endif
+
+
+-typedef struct ipc_tree_entry {
+- struct ipc_entry ite_entry;
+- mach_port_t ite_name;
+- struct ipc_space *ite_space;
+- struct ipc_tree_entry *ite_lchild;
+- struct ipc_tree_entry *ite_rchild;
+-} *ipc_tree_entry_t;
+-
+-#define ITE_NULL ((ipc_tree_entry_t) 0)
+-
+-#define ite_bits ite_entry.ie_bits
+-#define ite_object ite_entry.ie_object
+-#define ite_request ite_entry.ie_request
+-#define ite_next ite_entry.hash.tree
+-
+-extern struct kmem_cache ipc_tree_entry_cache;
+-
+-#define ite_alloc() ((ipc_tree_entry_t) kmem_cache_alloc(&ipc_tree_entry_cache))
+-#define ite_free(ite) kmem_cache_free(&ipc_tree_entry_cache, (vm_offset_t) (ite))
+-
++extern struct kmem_cache ipc_entry_cache;
++#define ie_alloc() ((ipc_entry_t) kmem_cache_alloc(&ipc_entry_cache))
++#define ie_free(e) kmem_cache_free(&ipc_entry_cache, (vm_offset_t) (e))
+
+ extern ipc_entry_t
+ ipc_entry_lookup(ipc_space_t space, mach_port_t name);
+@@ -145,9 +111,6 @@ ipc_entry_alloc_name(ipc_space_t space, mach_port_t name, ipc_entry_t *entryp);
+ extern void
+ ipc_entry_dealloc(ipc_space_t space, mach_port_t name, ipc_entry_t entry);
+
+-extern kern_return_t
+-ipc_entry_grow_table(ipc_space_t space);
+-
+ ipc_entry_t
+ db_ipc_object_by_name(
+ task_t task,
+diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
+deleted file mode 100644
+index 87952a7..0000000
+--- a/ipc/ipc_hash.c
++++ /dev/null
+@@ -1,486 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- * File: ipc/ipc_hash.c
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Entry hash table operations.
+- */
+-
+-#include <kern/printf.h>
+-#include <mach/boolean.h>
+-#include <mach/port.h>
+-#include <kern/lock.h>
+-#include <kern/kalloc.h>
+-#include <ipc/port.h>
+-#include <ipc/ipc_space.h>
+-#include <ipc/ipc_object.h>
+-#include <ipc/ipc_entry.h>
+-#include <ipc/ipc_hash.h>
+-#include <ipc/ipc_init.h>
+-#include <ipc/ipc_types.h>
+-
+-#if MACH_IPC_DEBUG
+-#include <mach/kern_return.h>
+-#include <mach_debug/hash_info.h>
+-#include <vm/vm_map.h>
+-#include <vm/vm_kern.h>
+-#include <vm/vm_user.h>
+-#endif
+-
+-
+-
+-/*
+- * Routine: ipc_hash_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_entry_t *entryp)
+-{
+- return (ipc_hash_local_lookup(space, obj, namep, entryp) ||
+- ((space->is_tree_hash > 0) &&
+- ipc_hash_global_lookup(space, obj, namep,
+- (ipc_tree_entry_t *) entryp)));
+-}
+-
+-/*
+- * Routine: ipc_hash_insert
+- * Purpose:
+- * Inserts an entry into the appropriate reverse hash table,
+- * so that ipc_hash_lookup will find it.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_entry_t entry)
+-{
+- mach_port_index_t index;
+-
+- index = MACH_PORT_INDEX(name);
+- if ((index < space->is_table_size) &&
+- (entry == &space->is_table[index]))
+- ipc_hash_local_insert(space, obj, index, entry);
+- else
+- ipc_hash_global_insert(space, obj, name,
+- (ipc_tree_entry_t) entry);
+-}
+-
+-/*
+- * Routine: ipc_hash_delete
+- * Purpose:
+- * Deletes an entry from the appropriate reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_entry_t entry)
+-{
+- mach_port_index_t index;
+-
+- index = MACH_PORT_INDEX(name);
+- if ((index < space->is_table_size) &&
+- (entry == &space->is_table[index]))
+- ipc_hash_local_delete(space, obj, index, entry);
+- else
+- ipc_hash_global_delete(space, obj, name,
+- (ipc_tree_entry_t) entry);
+-}
+-
+-/*
+- * The global reverse hash table holds splay tree entries.
+- * It is a simple open-chaining hash table with singly-linked buckets.
+- * Each bucket is locked separately, with an exclusive lock.
+- * Within each bucket, move-to-front is used.
+- */
+-
+-ipc_hash_index_t ipc_hash_global_size;
+-ipc_hash_index_t ipc_hash_global_mask;
+-
+-#define IH_GLOBAL_HASH(space, obj) \
+- (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
+- (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
+- ipc_hash_global_mask)
+-
+-typedef struct ipc_hash_global_bucket {
+- decl_simple_lock_data(, ihgb_lock_data)
+- ipc_tree_entry_t ihgb_head;
+-} *ipc_hash_global_bucket_t;
+-
+-#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
+-
+-#define ihgb_lock_init(ihgb) simple_lock_init(&(ihgb)->ihgb_lock_data)
+-#define ihgb_lock(ihgb) simple_lock(&(ihgb)->ihgb_lock_data)
+-#define ihgb_unlock(ihgb) simple_unlock(&(ihgb)->ihgb_lock_data)
+-
+-ipc_hash_global_bucket_t ipc_hash_global_table;
+-
+-/*
+- * Routine: ipc_hash_global_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Looks in the global table, for splay tree entries.
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_global_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp)
+-{
+- ipc_hash_global_bucket_t bucket;
+- ipc_tree_entry_t this, *last;
+-
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- if ((this = bucket->ihgb_head) != ITE_NULL) {
+- if ((this->ite_object == obj) &&
+- (this->ite_space == space)) {
+- /* found it at front; no need to move */
+-
+- *namep = this->ite_name;
+- *entryp = this;
+- } else for (last = &this->ite_next;
+- (this = *last) != ITE_NULL;
+- last = &this->ite_next) {
+- if ((this->ite_object == obj) &&
+- (this->ite_space == space)) {
+- /* found it; move to front */
+-
+- *last = this->ite_next;
+- this->ite_next = bucket->ihgb_head;
+- bucket->ihgb_head = this;
+-
+- *namep = this->ite_name;
+- *entryp = this;
+- break;
+- }
+- }
+- }
+-
+- ihgb_unlock(bucket);
+- return this != ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_hash_global_insert
+- * Purpose:
+- * Inserts an entry into the global reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_global_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_hash_global_bucket_t bucket;
+-
+-
+- assert(entry->ite_name == name);
+- assert(space != IS_NULL);
+- assert(entry->ite_space == space);
+- assert(obj != IO_NULL);
+- assert(entry->ite_object == obj);
+-
+- space->is_tree_hash++;
+- assert(space->is_tree_hash <= space->is_tree_total);
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- /* insert at front of bucket */
+-
+- entry->ite_next = bucket->ihgb_head;
+- bucket->ihgb_head = entry;
+-
+- ihgb_unlock(bucket);
+-}
+-
+-/*
+- * Routine: ipc_hash_global_delete
+- * Purpose:
+- * Deletes an entry from the global reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_global_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_hash_global_bucket_t bucket;
+- ipc_tree_entry_t this, *last;
+-
+- assert(entry->ite_name == name);
+- assert(space != IS_NULL);
+- assert(entry->ite_space == space);
+- assert(obj != IO_NULL);
+- assert(entry->ite_object == obj);
+-
+- assert(space->is_tree_hash > 0);
+- space->is_tree_hash--;
+-
+- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+- ihgb_lock(bucket);
+-
+- for (last = &bucket->ihgb_head;
+- (this = *last) != ITE_NULL;
+- last = &this->ite_next) {
+- if (this == entry) {
+- /* found it; remove from bucket */
+-
+- *last = this->ite_next;
+- break;
+- }
+- }
+- assert(this != ITE_NULL);
+-
+- ihgb_unlock(bucket);
+-}
+-
+-/*
+- * Each space has a local reverse mapping from objects to entries
+- * from the space's table. This used to be a hash table.
+- */
+-
+-#define IPC_LOCAL_HASH_INVARIANT(S, O, N, E) \
+- MACRO_BEGIN \
+- assert(IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND || \
+- IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND_RECEIVE || \
+- IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_NONE); \
+- assert((E)->ie_object == (O)); \
+- assert((E)->ie_index == (N)); \
+- assert(&(S)->is_table[N] == (E)); \
+- MACRO_END
+-
+-/*
+- * Routine: ipc_hash_local_lookup
+- * Purpose:
+- * Converts (space, obj) -> (name, entry).
+- * Looks in the space's local table, for table entries.
+- * Returns TRUE if an entry was found.
+- * Conditions:
+- * The space must be locked (read or write) throughout.
+- */
+-
+-boolean_t
+-ipc_hash_local_lookup(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_t *namep,
+- ipc_entry_t *entryp)
+-{
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- *entryp = ipc_reverse_lookup(space, obj);
+- if (*entryp != IE_NULL) {
+- *namep = (*entryp)->ie_index;
+- IPC_LOCAL_HASH_INVARIANT(space, obj, *namep, *entryp);
+- return TRUE;
+- }
+- return FALSE;
+-}
+-
+-/*
+- * Routine: ipc_hash_local_insert
+- * Purpose:
+- * Inserts an entry into the space's reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_local_insert(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_index_t index,
+- ipc_entry_t entry)
+-{
+- kern_return_t kr;
+- assert(index != 0);
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- entry->ie_index = index;
+- IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
+- kr = ipc_reverse_insert(space, obj, entry);
+- assert(kr == 0);
+-}
+-
+-/*
+- * Routine: ipc_hash_local_delete
+- * Purpose:
+- * Deletes an entry from the space's reverse hash table.
+- * Conditions:
+- * The space must be write-locked.
+- */
+-
+-void
+-ipc_hash_local_delete(
+- ipc_space_t space,
+- ipc_object_t obj,
+- mach_port_index_t index,
+- ipc_entry_t entry)
+-{
+- ipc_entry_t removed;
+- assert(index != MACH_PORT_NULL);
+- assert(space != IS_NULL);
+- assert(obj != IO_NULL);
+-
+- IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry);
+- removed = ipc_reverse_remove(space, obj);
+- assert(removed == entry);
+-}
+-
+-/*
+- * Routine: ipc_hash_init
+- * Purpose:
+- * Initialize the reverse hash table implementation.
+- */
+-
+-void
+-ipc_hash_init(void)
+-{
+- ipc_hash_index_t i;
+-
+- /* initialize ipc_hash_global_size */
+-
+- ipc_hash_global_size = IPC_HASH_GLOBAL_SIZE;
+-
+- /* make sure it is a power of two */
+-
+- ipc_hash_global_mask = ipc_hash_global_size - 1;
+- if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
+- natural_t bit;
+-
+- /* round up to closest power of two */
+-
+- for (bit = 1;; bit <<= 1) {
+- ipc_hash_global_mask |= bit;
+- ipc_hash_global_size = ipc_hash_global_mask + 1;
+-
+- if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
+- break;
+- }
+- }
+-
+- /* allocate ipc_hash_global_table */
+-
+- ipc_hash_global_table = (ipc_hash_global_bucket_t)
+- kalloc((vm_size_t) (ipc_hash_global_size *
+- sizeof(struct ipc_hash_global_bucket)));
+- assert(ipc_hash_global_table != IHGB_NULL);
+-
+- /* and initialize it */
+-
+- for (i = 0; i < ipc_hash_global_size; i++) {
+- ipc_hash_global_bucket_t bucket;
+-
+- bucket = &ipc_hash_global_table[i];
+- ihgb_lock_init(bucket);
+- bucket->ihgb_head = ITE_NULL;
+- }
+-}
+-
+-#if MACH_IPC_DEBUG
+-
+-/*
+- * Routine: ipc_hash_info
+- * Purpose:
+- * Return information about the global reverse hash table.
+- * Fills the buffer with as much information as possible
+- * and returns the desired size of the buffer.
+- * Conditions:
+- * Nothing locked. The caller should provide
+- * possibly-pageable memory.
+- */
+-
+-
+-ipc_hash_index_t
+-ipc_hash_info(
+- hash_info_bucket_t *info,
+- mach_msg_type_number_t count)
+-{
+- ipc_hash_index_t i;
+-
+- if (ipc_hash_global_size < count)
+- count = ipc_hash_global_size;
+-
+- for (i = 0; i < count; i++) {
+- ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
+- unsigned int bucket_count = 0;
+- ipc_tree_entry_t entry;
+-
+- ihgb_lock(bucket);
+- for (entry = bucket->ihgb_head;
+- entry != ITE_NULL;
+- entry = entry->ite_next)
+- bucket_count++;
+- ihgb_unlock(bucket);
+-
+- /* don't touch pageable memory while holding locks */
+- info[i].hib_count = bucket_count;
+- }
+-
+- return ipc_hash_global_size;
+-}
+-
+-#endif /* MACH_IPC_DEBUG */
+diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
+deleted file mode 100644
+index 929ba77..0000000
+--- a/ipc/ipc_hash.h
++++ /dev/null
+@@ -1,96 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- * File: ipc/ipc_hash.h
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Declarations of entry hash table operations.
+- */
+-
+-#ifndef _IPC_IPC_HASH_H_
+-#define _IPC_IPC_HASH_H_
+-
+-#include <mach/boolean.h>
+-#include <mach/kern_return.h>
+-
+-typedef natural_t ipc_hash_index_t;
+-
+-extern void
+-ipc_hash_init(void);
+-
+-#if MACH_IPC_DEBUG
+-
+-extern ipc_hash_index_t
+-ipc_hash_info(hash_info_bucket_t *, mach_msg_type_number_t);
+-
+-#endif /* MACH_IPC_DEBUG */
+-
+-extern boolean_t
+-ipc_hash_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_entry_t *entryp);
+-
+-extern void
+-ipc_hash_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_entry_t entry);
+-
+-extern void
+-ipc_hash_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_entry_t entry);
+-
+-/*
+- * For use by functions that know what they're doing:
+- * the global primitives, for splay tree entries,
+- * and the local primitives, for table entries.
+- */
+-
+-#define IPC_HASH_GLOBAL_SIZE 256
+-
+-extern boolean_t
+-ipc_hash_global_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_tree_entry_t *entryp);
+-
+-extern void
+-ipc_hash_global_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_tree_entry_t entry);
+-
+-extern void
+-ipc_hash_global_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_t name, ipc_tree_entry_t entry);
+-
+-extern boolean_t
+-ipc_hash_local_lookup(ipc_space_t space, ipc_object_t obj,
+- mach_port_t *namep, ipc_entry_t *entryp);
+-
+-extern void
+-ipc_hash_local_insert(ipc_space_t space, ipc_object_t obj,
+- mach_port_index_t index, ipc_entry_t entry);
+-
+-extern void
+-ipc_hash_local_delete(ipc_space_t space, ipc_object_t obj,
+- mach_port_index_t index, ipc_entry_t entry);
+-
+-#endif /* _IPC_IPC_HASH_H_ */
+diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
+index debda47..2c58a6e 100644
+--- a/ipc/ipc_init.c
++++ b/ipc/ipc_init.c
+@@ -47,7 +47,6 @@
+ #include <ipc/ipc_marequest.h>
+ #include <ipc/ipc_notify.h>
+ #include <ipc/ipc_kmsg.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_init.h>
+
+
+@@ -76,8 +75,8 @@ ipc_bootstrap(void)
+ kmem_cache_init(&ipc_space_cache, "ipc_space",
+ sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0);
+
+- kmem_cache_init(&ipc_tree_entry_cache, "ipc_tree_entry",
+- sizeof(struct ipc_tree_entry), 0, NULL, NULL, NULL, 0);
++ kmem_cache_init(&ipc_entry_cache, "ipc_entry",
++ sizeof(struct ipc_entry), 0, NULL, NULL, NULL, 0);
+
+ kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port",
+ sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0);
+@@ -97,7 +96,6 @@ ipc_bootstrap(void)
+
+ ipc_table_init();
+ ipc_notify_init();
+- ipc_hash_init();
+ ipc_marequest_init();
+ }
+
+diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
+index cae700f..5076809 100644
+--- a/ipc/ipc_kmsg.c
++++ b/ipc/ipc_kmsg.c
+@@ -50,7 +50,6 @@
+ #include <vm/vm_user.h>
+ #include <ipc/port.h>
+ #include <ipc/ipc_entry.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_kmsg.h>
+ #include <ipc/ipc_thread.h>
+ #include <ipc/ipc_marequest.h>
+@@ -1774,7 +1773,7 @@ ipc_kmsg_copyout_header(
+ break;
+
+ is_write_lock(space);
+- if (!space->is_active || space->is_table->ie_next == 0) {
++ if (!space->is_active || space->is_free_list == NULL) {
+ is_write_unlock(space);
+ break;
+ }
+@@ -2003,28 +2002,20 @@ ipc_kmsg_copyout_header(
+ goto copyout_dest;
+ }
+
+- kr = ipc_entry_get(space, &reply_name, &entry);
++ kr = ipc_entry_alloc(space, &reply_name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+- /* space is locked */
+- kr = ipc_entry_grow_table(space);
+- if (kr != KERN_SUCCESS) {
+- /* space is unlocked */
+-
+- if (kr == KERN_RESOURCE_SHORTAGE)
+- return (MACH_RCV_HEADER_ERROR|
+- MACH_MSG_IPC_KERNEL);
+- else
+- return (MACH_RCV_HEADER_ERROR|
+- MACH_MSG_IPC_SPACE);
+- }
+- /* space is locked again; start over */
+-
+- continue;
++ is_write_unlock(space);
++ if (kr == KERN_RESOURCE_SHORTAGE)
++ return (MACH_RCV_HEADER_ERROR|
++ MACH_MSG_IPC_KERNEL);
++ else
++ return (MACH_RCV_HEADER_ERROR|
++ MACH_MSG_IPC_SPACE);
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits)
+@@ -2266,12 +2257,13 @@ ipc_kmsg_copyout_object(
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+- !ipc_hash_local_lookup(space, (ipc_object_t) port,
+- namep, &entry)) {
++ (entry = ipc_reverse_lookup(space,
++ (ipc_object_t) port)) == NULL) {
+ ip_unlock(port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
++ *namep = entry->ie_name;
+
+ /*
+ * Copyout the send right, incrementing urefs
+diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
+index db6ef01..2d84cf5 100644
+--- a/ipc/ipc_object.c
++++ b/ipc/ipc_object.c
+@@ -41,7 +41,6 @@
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_object.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_right.h>
+ #include <ipc/ipc_notify.h>
+ #include <ipc/ipc_pset.h>
+@@ -630,15 +629,10 @@ ipc_object_copyout(
+ break;
+ }
+
+- kr = ipc_entry_get(space, &name, &entry);
++ kr = ipc_entry_alloc(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+- /* unlocks/locks space, so must start again */
+-
+- kr = ipc_entry_grow_table(space);
+- if (kr != KERN_SUCCESS)
+- return kr; /* space is unlocked */
+-
+- continue;
++ is_write_unlock(space);
++ return kr;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+@@ -691,15 +685,10 @@ ipc_object_copyout_multiname(space, object, namep)
+ return KERN_INVALID_TASK;
+ }
+
+- kr = ipc_entry_get(space, &name, &entry);
++ kr = ipc_entry_alloc(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+- /* unlocks/locks space, so must start again */
+-
+- kr = ipc_entry_grow_table(space);
+- if (kr != KERN_SUCCESS)
+- return kr; /* space is unlocked */
+-
+- continue;
++ is_write_unlock(space);
++ return kr; /* space is unlocked */
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
+index 35061c9..773b3b1 100644
+--- a/ipc/ipc_right.c
++++ b/ipc/ipc_right.c
+@@ -43,7 +43,6 @@
+ #include <ipc/ipc_entry.h>
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_object.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_port.h>
+ #include <ipc/ipc_pset.h>
+ #include <ipc/ipc_marequest.h>
+@@ -142,7 +141,8 @@ ipc_right_reverse(
+ return TRUE;
+ }
+
+- if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) {
++ if ((*entryp = ipc_reverse_lookup(space, (ipc_object_t) port))) {
++ *namep = (*entryp)->ie_name;
+ assert((entry = *entryp) != IE_NULL);
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
+ assert(port == (ipc_port_t) entry->ie_object);
+@@ -392,7 +392,7 @@ ipc_right_check(
+ ipc_marequest_cancel(space, name);
+ }
+
+- ipc_hash_delete(space, (ipc_object_t) port, name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+@@ -609,8 +609,7 @@ ipc_right_destroy(
+ }
+
+ if (type == MACH_PORT_TYPE_SEND)
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ ip_lock(port);
+
+@@ -789,8 +788,7 @@ ipc_right_dealloc(
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1134,8 +1132,7 @@ ipc_right_delta(
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1410,8 +1407,8 @@ ipc_right_copyin(
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+- ipc_hash_insert(space, (ipc_object_t) port,
+- name, entry);
++ entry->ie_name = name;
++ ipc_reverse_insert(space, (ipc_object_t) port, entry);
+
+ ip_reference(port);
+ } else {
+@@ -1534,8 +1531,7 @@ ipc_right_copyin(
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1796,8 +1792,7 @@ ipc_right_copyin_two(
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+@@ -1921,8 +1916,8 @@ ipc_right_copyout(
+
+ /* entry is locked holding ref, so can use port */
+
+- ipc_hash_insert(space, (ipc_object_t) port,
+- name, entry);
++ entry->ie_name = name;
++ ipc_reverse_insert(space, (ipc_object_t) port, entry);
+ }
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+@@ -1956,8 +1951,7 @@ ipc_right_copyout(
+
+ /* entry is locked holding ref, so can use port */
+
+- ipc_hash_delete(space, (ipc_object_t) port,
+- name, entry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+@@ -2083,7 +2077,7 @@ ipc_right_rename(
+ ipc_marequest_rename(space, oname, nname);
+ }
+
+- /* initialize nentry before letting ipc_hash_insert see it */
++ /* initialize nentry before letting ipc_reverse_insert see it */
+
+ assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
+ nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
+@@ -2097,8 +2091,9 @@ ipc_right_rename(
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+- ipc_hash_delete(space, (ipc_object_t) port, oname, oentry);
+- ipc_hash_insert(space, (ipc_object_t) port, nname, nentry);
++ ipc_reverse_remove(space, (ipc_object_t) port);
++ nentry->ie_name = nname;
++ ipc_reverse_insert(space, (ipc_object_t) port, nentry);
+ break;
+ }
+
+diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
+index dcc96b3..ea3cb3b 100644
+--- a/ipc/ipc_space.c
++++ b/ipc/ipc_space.c
+@@ -46,8 +46,6 @@
+ #include <kern/slab.h>
+ #include <ipc/port.h>
+ #include <ipc/ipc_entry.h>
+-#include <ipc/ipc_splay.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_port.h>
+ #include <ipc/ipc_space.h>
+@@ -82,6 +80,9 @@ ipc_space_release(
+ ipc_space_release_macro(space);
+ }
+
++/* A place-holder object for the zeroth entry. */
++struct ipc_entry zero_entry;
++
+ /*
+ * Routine: ipc_space_create
+ * Purpose:
+@@ -102,53 +103,24 @@ ipc_space_create(
+ ipc_space_t *spacep)
+ {
+ ipc_space_t space;
+- ipc_entry_t table;
+- ipc_entry_num_t new_size;
+- mach_port_index_t index;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+- table = it_entries_alloc(initial);
+- if (table == IE_NULL) {
+- is_free(space);
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- new_size = initial->its_size;
+- memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
+-
+- /*
+- * Initialize the free list in the table.
+- * Add the entries in reverse order, and
+- * set the generation number to -1, so that
+- * initial allocations produce "natural" names.
+- */
+-
+- for (index = 0; index < new_size; index++) {
+- ipc_entry_t entry = &table[index];
+-
+- entry->ie_bits = IE_BITS_GEN_MASK;
+- entry->ie_next = index+1;
+- }
+- table[new_size-1].ie_next = 0;
+-
+ is_ref_lock_init(space);
+ space->is_references = 2;
+
+ is_lock_init(space);
+ space->is_active = TRUE;
+- space->is_growing = FALSE;
+- space->is_table = table;
+- space->is_table_size = new_size;
+- space->is_table_next = initial+1;
+-
+- ipc_splay_tree_init(&space->is_tree);
+- space->is_tree_total = 0;
+- space->is_tree_small = 0;
+- space->is_tree_hash = 0;
++
++ rdxtree_init(&space->is_map);
+ rdxtree_init(&space->is_reverse_map);
++ /* The zeroth entry is reserved. */
++ rdxtree_insert(&space->is_map, 0, &zero_entry);
++ space->is_size = 1;
++ space->is_free_list = NULL;
++ space->is_free_list_size = 0;
+
+ *spacep = space;
+ return KERN_SUCCESS;
+@@ -202,10 +174,6 @@ void
+ ipc_space_destroy(
+ ipc_space_t space)
+ {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t size;
+- mach_port_index_t index;
+ boolean_t active;
+
+ assert(space != IS_NULL);
+@@ -218,60 +186,24 @@ ipc_space_destroy(
+ if (!active)
+ return;
+
+- /*
+- * If somebody is trying to grow the table,
+- * we must wait until they finish and figure
+- * out the space died.
+- */
++ ipc_entry_t entry;
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&space->is_map, &iter, entry) {
++ if (entry->ie_name == MACH_PORT_NULL)
++ continue;
+
+- is_read_lock(space);
+- while (space->is_growing) {
+- assert_wait((event_t) space, FALSE);
+- is_read_unlock(space);
+- thread_block((void (*)(void)) 0);
+- is_read_lock(space);
+- }
+- is_read_unlock(space);
+-
+- /*
+- * Now we can futz with it without having it locked.
+- */
+-
+- table = space->is_table;
+- size = space->is_table_size;
+-
+- for (index = 0; index < size; index++) {
+- ipc_entry_t entry = &table[index];
+ mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
+
+ if (type != MACH_PORT_TYPE_NONE) {
+ mach_port_t name =
+- MACH_PORT_MAKEB(index, entry->ie_bits);
++ MACH_PORT_MAKEB(entry->ie_name, entry->ie_bits);
+
+ ipc_right_clean(space, name, entry);
+ }
+- }
+
+- it_entries_free(space->is_table_next-1, table);
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
+- mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
+- mach_port_t name = tentry->ite_name;
+-
+- assert(type != MACH_PORT_TYPE_NONE);
+-
+- /* use object before ipc_right_clean releases ref */
+-
+- if (type == MACH_PORT_TYPE_SEND)
+- ipc_hash_global_delete(space, tentry->ite_object,
+- name, tentry);
+-
+- ipc_right_clean(space, name, &tentry->ite_entry);
++ ie_free(entry);
+ }
+- ipc_splay_traverse_finish(&space->is_tree);
+-
++ rdxtree_remove_all(&space->is_map);
+ rdxtree_remove_all(&space->is_reverse_map);
+
+ /*
+diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
+index f41c64c..20b9d57 100644
+--- a/ipc/ipc_space.h
++++ b/ipc/ipc_space.h
+@@ -47,7 +47,7 @@
+ #include <kern/lock.h>
+ #include <kern/rdxtree.h>
+ #include <kern/slab.h>
+-#include <ipc/ipc_splay.h>
++#include <ipc/ipc_entry.h>
+ #include <ipc/ipc_types.h>
+
+ /*
+@@ -73,18 +73,16 @@ struct ipc_space {
+
+ decl_simple_lock_data(,is_lock_data)
+ boolean_t is_active; /* is the space alive? */
+- boolean_t is_growing; /* is the space growing? */
+- ipc_entry_t is_table; /* an array of entries */
+- ipc_entry_num_t is_table_size; /* current size of table */
+- struct ipc_table_size *is_table_next; /* info for larger table */
+- struct ipc_splay_tree is_tree; /* a splay tree of entries */
+- ipc_entry_num_t is_tree_total; /* number of entries in the tree */
+- ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
+- ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
++ struct rdxtree is_map; /* a map of entries */
++ size_t is_size; /* number of entries */
+ struct rdxtree is_reverse_map; /* maps objects to entries */
+-
++ ipc_entry_t is_free_list; /* a linked list of free entries */
++ size_t is_free_list_size; /* number of free entries */
++#define IS_FREE_LIST_SIZE_LIMIT 64 /* maximum number of entries
++ in the free list */
+ };
+
++
+ #define IS_NULL ((ipc_space_t) 0)
+
+ extern struct kmem_cache ipc_space_cache;
+diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
+deleted file mode 100644
+index 062a69f..0000000
+--- a/ipc/ipc_splay.c
++++ /dev/null
+@@ -1,920 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- */
+-/*
+- * File: ipc/ipc_splay.c
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Primitive splay tree operations.
+- */
+-
+-#include <mach/port.h>
+-#include <kern/assert.h>
+-#include <kern/macros.h>
+-#include <ipc/ipc_entry.h>
+-#include <ipc/ipc_splay.h>
+-
+-/*
+- * Splay trees are self-adjusting binary search trees.
+- * They have the following attractive properties:
+- * 1) Space efficient; only two pointers per entry.
+- * 2) Robust performance; amortized O(log n) per operation.
+- * 3) Recursion not needed.
+- * This makes them a good fall-back data structure for those
+- * entries that don't fit into the lookup table.
+- *
+- * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686,
+- * describes the splaying operation. ipc_splay_prim_lookup
+- * and ipc_splay_prim_assemble implement the top-down splay
+- * described on p. 669.
+- *
+- * The tree is stored in an unassembled form. If ist_root is null,
+- * then the tree has no entries. Otherwise, ist_name records
+- * the value used for the last lookup. ist_root points to the
+- * middle tree obtained from the top-down splay. ist_ltree and
+- * ist_rtree point to left and right subtrees, whose entries
+- * are all smaller (larger) than those in the middle tree.
+- * ist_ltreep and ist_rtreep are pointers to fields in the
+- * left and right subtrees. ist_ltreep points to the rchild field
+- * of the largest entry in ltree, and ist_rtreep points to the
+- * lchild field of the smallest entry in rtree. The pointed-to
+- * fields aren't initialized. If the left (right) subtree is null,
+- * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree)
+- * field in the splay structure itself.
+- *
+- * The primary advantage of the unassembled form is that repeated
+- * unsuccessful lookups are efficient. In particular, an unsuccessful
+- * lookup followed by an insert only requires one splaying operation.
+- *
+- * The traversal algorithm works via pointer inversion.
+- * When descending down the tree, child pointers are reversed
+- * to point back to the parent entry. When ascending,
+- * the pointers are restored to their original value.
+- *
+- * The biggest potential problem with the splay tree implementation
+- * is that the operations, even lookup, require an exclusive lock.
+- * If IPC spaces are protected with exclusive locks, then
+- * the splay tree doesn't require its own lock, and ist_lock/ist_unlock
+- * needn't do anything. If IPC spaces are protected with read/write
+- * locks then ist_lock/ist_unlock should provide exclusive access.
+- *
+- * If it becomes important to let lookups run in parallel,
+- * or if the restructuring makes lookups too expensive, then
+- * there is hope. Use a read/write lock on the splay tree.
+- * Keep track of the number of entries in the tree. When doing
+- * a lookup, first try a non-restructuring lookup with a read lock held,
+- * with a bound (based on log of size of the tree) on the number of
+- * entries to traverse. If the lookup runs up against the bound,
+- * then take a write lock and do a reorganizing lookup.
+- * This way, if lookups only access roughly balanced parts
+- * of the tree, then lookups run in parallel and do no restructuring.
+- *
+- * The traversal algorithm currently requires an exclusive lock.
+- * If that is a problem, the tree could be changed from an lchild/rchild
+- * representation to a leftmost child/right sibling representation.
+- * In conjunction with non-restructing lookups, this would let
+- * lookups and traversals all run in parallel. But this representation
+- * is more complicated and would slow down the operations.
+- */
+-
+-/*
+- * Boundary values to hand to ipc_splay_prim_lookup:
+- */
+-
+-#define MACH_PORT_SMALLEST ((mach_port_t) 0)
+-#define MACH_PORT_LARGEST ((mach_port_t) ~0)
+-
+-/*
+- * Routine: ipc_splay_prim_lookup
+- * Purpose:
+- * Searches for the node labeled name in the splay tree.
+- * Returns three nodes (treep, ltreep, rtreep) and
+- * two pointers to nodes (ltreepp, rtreepp).
+- *
+- * ipc_splay_prim_lookup splits the supplied tree into
+- * three subtrees, left, middle, and right, returned
+- * in ltreep, treep, and rtreep.
+- *
+- * If name is present in the tree, then it is at
+- * the root of the middle tree. Otherwise, the root
+- * of the middle tree is the last node traversed.
+- *
+- * ipc_splay_prim_lookup returns a pointer into
+- * the left subtree, to the rchild field of its
+- * largest node, in ltreepp. It returns a pointer
+- * into the right subtree, to the lchild field of its
+- * smallest node, in rtreepp.
+- */
+-
+-static void
+-ipc_splay_prim_lookup(
+- mach_port_t name,
+- ipc_tree_entry_t tree,
+- ipc_tree_entry_t *treep,
+- ipc_tree_entry_t *ltreep,
+- ipc_tree_entry_t **ltreepp,
+- ipc_tree_entry_t *rtreep,
+- ipc_tree_entry_t **rtreepp)
+-{
+- mach_port_t tname; /* temp name */
+- ipc_tree_entry_t lchild, rchild; /* temp child pointers */
+-
+- assert(tree != ITE_NULL);
+-
+-#define link_left \
+-MACRO_BEGIN \
+- *ltreep = tree; \
+- ltreep = &tree->ite_rchild; \
+- tree = *ltreep; \
+-MACRO_END
+-
+-#define link_right \
+-MACRO_BEGIN \
+- *rtreep = tree; \
+- rtreep = &tree->ite_lchild; \
+- tree = *rtreep; \
+-MACRO_END
+-
+-#define rotate_left \
+-MACRO_BEGIN \
+- ipc_tree_entry_t temp = tree; \
+- \
+- tree = temp->ite_rchild; \
+- temp->ite_rchild = tree->ite_lchild; \
+- tree->ite_lchild = temp; \
+-MACRO_END
+-
+-#define rotate_right \
+-MACRO_BEGIN \
+- ipc_tree_entry_t temp = tree; \
+- \
+- tree = temp->ite_lchild; \
+- temp->ite_lchild = tree->ite_rchild; \
+- tree->ite_rchild = temp; \
+-MACRO_END
+-
+- while (name != (tname = tree->ite_name)) {
+- if (name < tname) {
+- /* descend to left */
+-
+- lchild = tree->ite_lchild;
+- if (lchild == ITE_NULL)
+- break;
+- tname = lchild->ite_name;
+-
+- if ((name < tname) &&
+- (lchild->ite_lchild != ITE_NULL))
+- rotate_right;
+- link_right;
+- if ((name > tname) &&
+- (lchild->ite_rchild != ITE_NULL))
+- link_left;
+- } else {
+- /* descend to right */
+-
+- rchild = tree->ite_rchild;
+- if (rchild == ITE_NULL)
+- break;
+- tname = rchild->ite_name;
+-
+- if ((name > tname) &&
+- (rchild->ite_rchild != ITE_NULL))
+- rotate_left;
+- link_left;
+- if ((name < tname) &&
+- (rchild->ite_lchild != ITE_NULL))
+- link_right;
+- }
+-
+- assert(tree != ITE_NULL);
+- }
+-
+- *treep = tree;
+- *ltreepp = ltreep;
+- *rtreepp = rtreep;
+-
+-#undef link_left
+-#undef link_right
+-#undef rotate_left
+-#undef rotate_right
+-}
+-
+-/*
+- * Routine: ipc_splay_prim_assemble
+- * Purpose:
+- * Assembles the results of ipc_splay_prim_lookup
+- * into a splay tree with the found node at the root.
+- *
+- * ltree and rtree are by-reference so storing
+- * through ltreep and rtreep can change them.
+- */
+-
+-static void
+-ipc_splay_prim_assemble(
+- ipc_tree_entry_t tree,
+- ipc_tree_entry_t *ltree,
+- ipc_tree_entry_t *ltreep,
+- ipc_tree_entry_t *rtree,
+- ipc_tree_entry_t *rtreep)
+-{
+- assert(tree != ITE_NULL);
+-
+- *ltreep = tree->ite_lchild;
+- *rtreep = tree->ite_rchild;
+-
+- tree->ite_lchild = *ltree;
+- tree->ite_rchild = *rtree;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_init
+- * Purpose:
+- * Initialize a raw splay tree for use.
+- */
+-
+-void
+-ipc_splay_tree_init(
+- ipc_splay_tree_t splay)
+-{
+- splay->ist_root = ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_pick
+- * Purpose:
+- * Picks and returns a random entry in a splay tree.
+- * Returns FALSE if the splay tree is empty.
+- */
+-
+-boolean_t
+-ipc_splay_tree_pick(
+- ipc_splay_tree_t splay,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- *namep = root->ite_name;
+- *entryp = root;
+- }
+-
+- ist_unlock(splay);
+-
+- return root != ITE_NULL;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_lookup
+- * Purpose:
+- * Finds an entry in a splay tree.
+- * Returns ITE_NULL if not found.
+- */
+-
+-ipc_tree_entry_t
+-ipc_splay_tree_lookup(
+- ipc_splay_tree_t splay,
+- mach_port_t name)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- splay->ist_name = name;
+- splay->ist_root = root;
+- }
+-
+- if (name != root->ite_name)
+- root = ITE_NULL;
+- }
+-
+- ist_unlock(splay);
+-
+- return root;
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_insert
+- * Purpose:
+- * Inserts a new entry into a splay tree.
+- * The caller supplies a new entry.
+- * The name can't already be present in the tree.
+- */
+-
+-void
+-ipc_splay_tree_insert(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_tree_entry_t root;
+-
+- assert(entry != ITE_NULL);
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- entry->ite_lchild = ITE_NULL;
+- entry->ite_rchild = ITE_NULL;
+- } else {
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- assert(root->ite_name != name);
+-
+- if (name < root->ite_name) {
+- assert(root->ite_lchild == ITE_NULL);
+-
+- *splay->ist_ltreep = ITE_NULL;
+- *splay->ist_rtreep = root;
+- } else {
+- assert(root->ite_rchild == ITE_NULL);
+-
+- *splay->ist_ltreep = root;
+- *splay->ist_rtreep = ITE_NULL;
+- }
+-
+- entry->ite_lchild = splay->ist_ltree;
+- entry->ite_rchild = splay->ist_rtree;
+- }
+-
+- entry->ite_name = name;
+- splay->ist_root = entry;
+- splay->ist_name = name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_delete
+- * Purpose:
+- * Deletes an entry from a splay tree.
+- * The name must be present in the tree.
+- * Frees the entry.
+- *
+- * The "entry" argument isn't currently used.
+- * Other implementations might want it, though.
+- */
+-
+-void
+-ipc_splay_tree_delete(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry)
+-{
+- ipc_tree_entry_t root, saved;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- assert(root != ITE_NULL);
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- assert(root->ite_name == name);
+- assert(root == entry);
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- *splay->ist_rtreep = root->ite_rchild;
+- ite_free(root);
+-
+- root = splay->ist_ltree;
+- saved = splay->ist_rtree;
+-
+- if (root == ITE_NULL)
+- root = saved;
+- else if (saved != ITE_NULL) {
+- /*
+- * Find the largest node in the left subtree, and splay it
+- * to the root. Then add the saved right subtree.
+- */
+-
+- ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- assert(root->ite_rchild == ITE_NULL);
+- root->ite_rchild = saved;
+- }
+-
+- splay->ist_root = root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_split
+- * Purpose:
+- * Split a splay tree. Puts all entries smaller than "name"
+- * into a new tree, "small".
+- *
+- * Doesn't do locking on "small", because nobody else
+- * should be fiddling with the uninitialized tree.
+- */
+-
+-void
+-ipc_splay_tree_split(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_splay_tree_t small)
+-{
+- ipc_tree_entry_t root;
+-
+- ipc_splay_tree_init(small);
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- /* lookup name, to get it (or last traversed) to the top */
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- if (root->ite_name < name) {
+- /* root goes into small */
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- *splay->ist_rtreep = ITE_NULL;
+- root->ite_lchild = splay->ist_ltree;
+- assert(root->ite_rchild == ITE_NULL);
+-
+- small->ist_root = root;
+- small->ist_name = root->ite_name;
+- small->ist_ltreep = &small->ist_ltree;
+- small->ist_rtreep = &small->ist_rtree;
+-
+- /* rtree goes into splay */
+-
+- root = splay->ist_rtree;
+- splay->ist_root = root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+- } else {
+- /* root stays in splay */
+-
+- *splay->ist_ltreep = root->ite_lchild;
+- root->ite_lchild = ITE_NULL;
+-
+- splay->ist_root = root;
+- splay->ist_name = name;
+- splay->ist_ltreep = &splay->ist_ltree;
+-
+- /* ltree goes into small */
+-
+- root = splay->ist_ltree;
+- small->ist_root = root;
+- if (root != ITE_NULL) {
+- small->ist_name = root->ite_name;
+- small->ist_ltreep = &small->ist_ltree;
+- small->ist_rtreep = &small->ist_rtree;
+- }
+- }
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_join
+- * Purpose:
+- * Joins two splay trees. Merges the entries in "small",
+- * which must all be smaller than the entries in "splay",
+- * into "splay".
+- */
+-
+-void
+-ipc_splay_tree_join(
+- ipc_splay_tree_t splay,
+- ipc_splay_tree_t small)
+-{
+- ipc_tree_entry_t sroot;
+-
+- /* pull entries out of small */
+-
+- ist_lock(small);
+-
+- sroot = small->ist_root;
+- if (sroot != ITE_NULL) {
+- ipc_splay_prim_assemble(sroot,
+- &small->ist_ltree, small->ist_ltreep,
+- &small->ist_rtree, small->ist_rtreep);
+- small->ist_root = ITE_NULL;
+- }
+-
+- ist_unlock(small);
+-
+- /* put entries, if any, into splay */
+-
+- if (sroot != ITE_NULL) {
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- root = sroot;
+- } else {
+- /* get smallest entry in splay tree to top */
+-
+- if (splay->ist_name != MACH_PORT_SMALLEST) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(MACH_PORT_SMALLEST,
+- root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- }
+-
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- assert(root->ite_lchild == ITE_NULL);
+- assert(sroot->ite_name < root->ite_name);
+- root->ite_lchild = sroot;
+- }
+-
+- splay->ist_root = root;
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+-
+- ist_unlock(splay);
+- }
+-}
+-
+-/*
+- * Routine: ipc_splay_tree_bounds
+- * Purpose:
+- * Given a name, returns the largest value present
+- * in the tree that is smaller than or equal to the name,
+- * or ~0 if no such value exists. Similarly, returns
+- * the smallest value present that is greater than or
+- * equal to the name, or 0 if no such value exists.
+- *
+- * Hence, if
+- * lower = upper, then lower = name = upper
+- * and name is present in the tree
+- * lower = ~0 and upper = 0,
+- * then the tree is empty
+- * lower = ~0 and upper > 0, then name < upper
+- * and upper is smallest value in tree
+- * lower < ~0 and upper = 0, then lower < name
+- * and lower is largest value in tree
+- * lower < ~0 and upper > 0, then lower < name < upper
+- * and they are tight bounds on name
+- *
+- * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.)
+- */
+-
+-void
+-ipc_splay_tree_bounds(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- mach_port_t *lowerp,
+- mach_port_t *upperp)
+-{
+- ipc_tree_entry_t root;
+-
+- ist_lock(splay);
+-
+- root = splay->ist_root;
+- if (root == ITE_NULL) {
+- *lowerp = MACH_PORT_LARGEST;
+- *upperp = MACH_PORT_SMALLEST;
+- } else {
+- mach_port_t rname;
+-
+- if (splay->ist_name != name) {
+- ipc_splay_prim_assemble(root,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+- ipc_splay_prim_lookup(name, root, &root,
+- &splay->ist_ltree, &splay->ist_ltreep,
+- &splay->ist_rtree, &splay->ist_rtreep);
+- splay->ist_name = name;
+- splay->ist_root = root;
+- }
+-
+- rname = root->ite_name;
+-
+- /*
+- * OK, it's a hack. We convert the ltreep and rtreep
+- * pointers back into real entry pointers,
+- * so we can pick the names out of the entries.
+- */
+-
+- if (rname <= name)
+- *lowerp = rname;
+- else if (splay->ist_ltreep == &splay->ist_ltree)
+- *lowerp = MACH_PORT_LARGEST;
+- else {
+- ipc_tree_entry_t entry;
+-
+- entry = (ipc_tree_entry_t)
+- ((char *)splay->ist_ltreep -
+- ((char *)&root->ite_rchild -
+- (char *)root));
+- *lowerp = entry->ite_name;
+- }
+-
+- if (rname >= name)
+- *upperp = rname;
+- else if (splay->ist_rtreep == &splay->ist_rtree)
+- *upperp = MACH_PORT_SMALLEST;
+- else {
+- ipc_tree_entry_t entry;
+-
+- entry = (ipc_tree_entry_t)
+- ((char *)splay->ist_rtreep -
+- ((char *)&root->ite_lchild -
+- (char *)root));
+- *upperp = entry->ite_name;
+- }
+- }
+-
+- ist_unlock(splay);
+-}
+-
+-/*
+- * Routine: ipc_splay_traverse_start
+- * Routine: ipc_splay_traverse_next
+- * Routine: ipc_splay_traverse_finish
+- * Purpose:
+- * Perform a symmetric order traversal of a splay tree.
+- * Usage:
+- * for (entry = ipc_splay_traverse_start(splay);
+- * entry != ITE_NULL;
+- * entry = ipc_splay_traverse_next(splay, delete)) {
+- * do something with entry
+- * }
+- * ipc_splay_traverse_finish(splay);
+- *
+- * If "delete" is TRUE, then the current entry
+- * is removed from the tree and deallocated.
+- *
+- * During the traversal, the splay tree is locked.
+- */
+-
+-ipc_tree_entry_t
+-ipc_splay_traverse_start(
+- ipc_splay_tree_t splay)
+-{
+- ipc_tree_entry_t current, parent;
+-
+- ist_lock(splay);
+-
+- current = splay->ist_root;
+- if (current != ITE_NULL) {
+- ipc_splay_prim_assemble(current,
+- &splay->ist_ltree, splay->ist_ltreep,
+- &splay->ist_rtree, splay->ist_rtreep);
+-
+- parent = ITE_NULL;
+-
+- while (current->ite_lchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_lchild;
+- current->ite_lchild = parent;
+- parent = current;
+- current = next;
+- }
+-
+- splay->ist_ltree = current;
+- splay->ist_rtree = parent;
+- }
+-
+- return current;
+-}
+-
+-ipc_tree_entry_t
+-ipc_splay_traverse_next(
+- ipc_splay_tree_t splay,
+- boolean_t delete)
+-{
+- ipc_tree_entry_t current, parent;
+-
+- /* pick up where traverse_entry left off */
+-
+- current = splay->ist_ltree;
+- parent = splay->ist_rtree;
+- assert(current != ITE_NULL);
+-
+- if (!delete)
+- goto traverse_right;
+-
+- /* we must delete current and patch the tree */
+-
+- if (current->ite_lchild == ITE_NULL) {
+- if (current->ite_rchild == ITE_NULL) {
+- /* like traverse_back, but with deletion */
+-
+- if (parent == ITE_NULL) {
+- ite_free(current);
+-
+- splay->ist_root = ITE_NULL;
+- return ITE_NULL;
+- }
+-
+- if (current->ite_name < parent->ite_name) {
+- ite_free(current);
+-
+- current = parent;
+- parent = current->ite_lchild;
+- current->ite_lchild = ITE_NULL;
+- goto traverse_entry;
+- } else {
+- ite_free(current);
+-
+- current = parent;
+- parent = current->ite_rchild;
+- current->ite_rchild = ITE_NULL;
+- goto traverse_back;
+- }
+- } else {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = current->ite_rchild;
+- ite_free(prev);
+- goto traverse_left;
+- }
+- } else {
+- if (current->ite_rchild == ITE_NULL) {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = current->ite_lchild;
+- ite_free(prev);
+- goto traverse_back;
+- } else {
+- ipc_tree_entry_t prev;
+- ipc_tree_entry_t ltree, rtree;
+- ipc_tree_entry_t *ltreep, *rtreep;
+-
+- /* replace current with largest of left children */
+-
+- prev = current;
+- ipc_splay_prim_lookup(MACH_PORT_LARGEST,
+- current->ite_lchild, &current,
+- &ltree, &ltreep, &rtree, &rtreep);
+- ipc_splay_prim_assemble(current,
+- &ltree, ltreep, &rtree, rtreep);
+-
+- assert(current->ite_rchild == ITE_NULL);
+- current->ite_rchild = prev->ite_rchild;
+- ite_free(prev);
+- goto traverse_right;
+- }
+- }
+- /*NOTREACHED*/
+-
+- /*
+- * A state machine: for each entry, we
+- * 1) traverse left subtree
+- * 2) traverse the entry
+- * 3) traverse right subtree
+- * 4) traverse back to parent
+- */
+-
+- traverse_left:
+- if (current->ite_lchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_lchild;
+- current->ite_lchild = parent;
+- parent = current;
+- current = next;
+- goto traverse_left;
+- }
+-
+- traverse_entry:
+- splay->ist_ltree = current;
+- splay->ist_rtree = parent;
+- return current;
+-
+- traverse_right:
+- if (current->ite_rchild != ITE_NULL) {
+- ipc_tree_entry_t next;
+-
+- next = current->ite_rchild;
+- current->ite_rchild = parent;
+- parent = current;
+- current = next;
+- goto traverse_left;
+- }
+-
+- traverse_back:
+- if (parent == ITE_NULL) {
+- splay->ist_root = current;
+- return ITE_NULL;
+- }
+-
+- if (current->ite_name < parent->ite_name) {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = parent;
+- parent = current->ite_lchild;
+- current->ite_lchild = prev;
+- goto traverse_entry;
+- } else {
+- ipc_tree_entry_t prev;
+-
+- prev = current;
+- current = parent;
+- parent = current->ite_rchild;
+- current->ite_rchild = prev;
+- goto traverse_back;
+- }
+-}
+-
+-void
+-ipc_splay_traverse_finish(
+- ipc_splay_tree_t splay)
+-{
+- ipc_tree_entry_t root;
+-
+- root = splay->ist_root;
+- if (root != ITE_NULL) {
+- splay->ist_name = root->ite_name;
+- splay->ist_ltreep = &splay->ist_ltree;
+- splay->ist_rtreep = &splay->ist_rtree;
+- }
+-
+- ist_unlock(splay);
+-}
+-
+diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
+deleted file mode 100644
+index 42e5a80..0000000
+--- a/ipc/ipc_splay.h
++++ /dev/null
+@@ -1,114 +0,0 @@
+-/*
+- * Mach Operating System
+- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+- * All Rights Reserved.
+- *
+- * Permission to use, copy, modify and distribute this software and its
+- * documentation is hereby granted, provided that both the copyright
+- * notice and this permission notice appear in all copies of the
+- * software, derivative works or modified versions, and any portions
+- * thereof, and that both notices appear in supporting documentation.
+- *
+- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+- *
+- * Carnegie Mellon requests users of this software to return to
+- *
+- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+- * School of Computer Science
+- * Carnegie Mellon University
+- * Pittsburgh PA 15213-3890
+- *
+- * any improvements or extensions that they make and grant Carnegie Mellon
+- * the rights to redistribute these changes.
+- */
+-/*
+- */
+-/*
+- * File: ipc/ipc_splay.h
+- * Author: Rich Draves
+- * Date: 1989
+- *
+- * Declarations of primitive splay tree operations.
+- */
+-
+-#ifndef _IPC_IPC_SPLAY_H_
+-#define _IPC_IPC_SPLAY_H_
+-
+-#include <mach/port.h>
+-#include <kern/assert.h>
+-#include <kern/macros.h>
+-#include <ipc/ipc_entry.h>
+-
+-typedef struct ipc_splay_tree {
+- mach_port_t ist_name; /* name used in last lookup */
+- ipc_tree_entry_t ist_root; /* root of middle tree */
+- ipc_tree_entry_t ist_ltree; /* root of left tree */
+- ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */
+- ipc_tree_entry_t ist_rtree; /* root of right tree */
+- ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */
+-} *ipc_splay_tree_t;
+-
+-#define ist_lock(splay) /* no locking */
+-#define ist_unlock(splay) /* no locking */
+-
+-/* Initialize a raw splay tree */
+-extern void ipc_splay_tree_init(
+- ipc_splay_tree_t splay);
+-
+-/* Pick a random entry in a splay tree */
+-extern boolean_t ipc_splay_tree_pick(
+- ipc_splay_tree_t splay,
+- mach_port_t *namep,
+- ipc_tree_entry_t *entryp);
+-
+-/* Find an entry in a splay tree */
+-extern ipc_tree_entry_t ipc_splay_tree_lookup(
+- ipc_splay_tree_t splay,
+- mach_port_t name);
+-
+-/* Insert a new entry into a splay tree */
+-extern void ipc_splay_tree_insert(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry);
+-
+-/* Delete an entry from a splay tree */
+-extern void ipc_splay_tree_delete(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_tree_entry_t entry);
+-
+-/* Split a splay tree */
+-extern void ipc_splay_tree_split(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- ipc_splay_tree_t entry);
+-
+-/* Join two splay trees */
+-extern void ipc_splay_tree_join(
+- ipc_splay_tree_t splay,
+- ipc_splay_tree_t small);
+-
+-/* Do a bounded splay tree lookup */
+-extern void ipc_splay_tree_bounds(
+- ipc_splay_tree_t splay,
+- mach_port_t name,
+- mach_port_t *lowerp,
+- mach_port_t *upperp);
+-
+-/* Initialize a symmetric order traversal of a splay tree */
+-extern ipc_tree_entry_t ipc_splay_traverse_start(
+- ipc_splay_tree_t splay);
+-
+-/* Return the next entry in a symmetric order traversal of a splay tree */
+-extern ipc_tree_entry_t ipc_splay_traverse_next(
+- ipc_splay_tree_t splay,
+- boolean_t delete);
+-
+-/* Terminate a symmetric order traversal of a splay tree */
+-extern void ipc_splay_traverse_finish(
+- ipc_splay_tree_t splay);
+-
+-#endif /* _IPC_IPC_SPLAY_H_ */
+diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
+index eb52e1c..efb07a4 100644
+--- a/ipc/mach_debug.c
++++ b/ipc/mach_debug.c
+@@ -46,7 +46,6 @@
+ #include <vm/vm_kern.h>
+ #include <ipc/ipc_space.h>
+ #include <ipc/ipc_port.h>
+-#include <ipc/ipc_hash.h>
+ #include <ipc/ipc_marequest.h>
+ #include <ipc/ipc_table.h>
+ #include <ipc/ipc_right.h>
+@@ -94,85 +93,6 @@ mach_port_get_srights(
+ }
+
+ /*
+- * Routine: host_ipc_hash_info
+- * Purpose:
+- * Return information about the global reverse hash table.
+- * Conditions:
+- * Nothing locked. Obeys CountInOut protocol.
+- * Returns:
+- * KERN_SUCCESS Returned information.
+- * KERN_INVALID_HOST The host is null.
+- * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+- */
+-
+-kern_return_t
+-host_ipc_hash_info(
+- host_t host,
+- hash_info_bucket_array_t *infop,
+- mach_msg_type_number_t *countp)
+-{
+- vm_offset_t addr;
+- vm_size_t size = 0; /* Suppress gcc warning */
+- hash_info_bucket_t *info;
+- unsigned int potential, actual;
+- kern_return_t kr;
+-
+- if (host == HOST_NULL)
+- return KERN_INVALID_HOST;
+-
+- /* start with in-line data */
+-
+- info = *infop;
+- potential = *countp;
+-
+- for (;;) {
+- actual = ipc_hash_info(info, potential);
+- if (actual <= potential)
+- break;
+-
+- /* allocate more memory */
+-
+- if (info != *infop)
+- kmem_free(ipc_kernel_map, addr, size);
+-
+- size = round_page(actual * sizeof *info);
+- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+- if (kr != KERN_SUCCESS)
+- return KERN_RESOURCE_SHORTAGE;
+-
+- info = (hash_info_bucket_t *) addr;
+- potential = size/sizeof *info;
+- }
+-
+- if (info == *infop) {
+- /* data fit in-line; nothing to deallocate */
+-
+- *countp = actual;
+- } else if (actual == 0) {
+- kmem_free(ipc_kernel_map, addr, size);
+-
+- *countp = 0;
+- } else {
+- vm_map_copy_t copy;
+- vm_size_t used;
+-
+- used = round_page(actual * sizeof *info);
+-
+- if (used != size)
+- kmem_free(ipc_kernel_map, addr + used, size - used);
+-
+- kr = vm_map_copyin(ipc_kernel_map, addr, used,
+- TRUE, &copy);
+- assert(kr == KERN_SUCCESS);
+-
+- *infop = (hash_info_bucket_t *) copy;
+- *countp = actual;
+- }
+-
+- return KERN_SUCCESS;
+-}
+-
+-/*
+ * Routine: host_ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+@@ -253,251 +173,6 @@ host_ipc_marequest_info(
+ }
+
+ /*
+- * Routine: mach_port_space_info
+- * Purpose:
+- * Returns information about an IPC space.
+- * Conditions:
+- * Nothing locked. Obeys CountInOut protocol.
+- * Returns:
+- * KERN_SUCCESS Returned information.
+- * KERN_INVALID_TASK The space is null.
+- * KERN_INVALID_TASK The space is dead.
+- * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+- */
+-
+-kern_return_t
+-mach_port_space_info(
+- ipc_space_t space,
+- ipc_info_space_t *infop,
+- ipc_info_name_array_t *tablep,
+- mach_msg_type_number_t *tableCntp,
+- ipc_info_tree_name_array_t *treep,
+- mach_msg_type_number_t *treeCntp)
+-{
+- ipc_info_name_t *table_info;
+- unsigned int table_potential, table_actual;
+- vm_offset_t table_addr;
+- vm_size_t table_size = 0; /* Suppress gcc warning */
+- ipc_info_tree_name_t *tree_info;
+- unsigned int tree_potential, tree_actual;
+- vm_offset_t tree_addr;
+- vm_size_t tree_size = 0; /* Suppress gcc warning */
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t tsize;
+- mach_port_index_t index;
+- kern_return_t kr;
+-
+- if (space == IS_NULL)
+- return KERN_INVALID_TASK;
+-
+- /* start with in-line memory */
+-
+- table_info = *tablep;
+- table_potential = *tableCntp;
+- tree_info = *treep;
+- tree_potential = *treeCntp;
+-
+- for (;;) {
+- is_read_lock(space);
+- if (!space->is_active) {
+- is_read_unlock(space);
+- if (table_info != *tablep)
+- kmem_free(ipc_kernel_map,
+- table_addr, table_size);
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+- return KERN_INVALID_TASK;
+- }
+-
+- table_actual = space->is_table_size;
+- tree_actual = space->is_tree_total;
+-
+- if ((table_actual <= table_potential) &&
+- (tree_actual <= tree_potential))
+- break;
+-
+- is_read_unlock(space);
+-
+- if (table_actual > table_potential) {
+- if (table_info != *tablep)
+- kmem_free(ipc_kernel_map,
+- table_addr, table_size);
+-
+- table_size = round_page(table_actual *
+- sizeof *table_info);
+- kr = kmem_alloc(ipc_kernel_map,
+- &table_addr, table_size);
+- if (kr != KERN_SUCCESS) {
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+-
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- table_info = (ipc_info_name_t *) table_addr;
+- table_potential = table_size/sizeof *table_info;
+- }
+-
+- if (tree_actual > tree_potential) {
+- if (tree_info != *treep)
+- kmem_free(ipc_kernel_map,
+- tree_addr, tree_size);
+-
+- tree_size = round_page(tree_actual *
+- sizeof *tree_info);
+- kr = kmem_alloc(ipc_kernel_map,
+- &tree_addr, tree_size);
+- if (kr != KERN_SUCCESS) {
+- if (table_info != *tablep)
+- kmem_free(ipc_kernel_map,
+- table_addr, table_size);
+-
+- return KERN_RESOURCE_SHORTAGE;
+- }
+-
+- tree_info = (ipc_info_tree_name_t *) tree_addr;
+- tree_potential = tree_size/sizeof *tree_info;
+- }
+- }
+- /* space is read-locked and active; we have enough wired memory */
+-
+- infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
+- infop->iis_table_size = space->is_table_size;
+- infop->iis_table_next = space->is_table_next->its_size;
+- infop->iis_tree_size = space->is_tree_total;
+- infop->iis_tree_small = space->is_tree_small;
+- infop->iis_tree_hash = space->is_tree_hash;
+-
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
+- ipc_info_name_t *iin = &table_info[index];
+- ipc_entry_t entry = &table[index];
+- ipc_entry_bits_t bits = entry->ie_bits;
+-
+- iin->iin_name = MACH_PORT_MAKEB(index, bits);
+- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+- iin->iin_compat = FALSE;
+- iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+- iin->iin_type = IE_BITS_TYPE(bits);
+- iin->iin_urefs = IE_BITS_UREFS(bits);
+- iin->iin_object = (vm_offset_t) entry->ie_object;
+- iin->iin_next = entry->ie_next;
+- iin->iin_hash = entry->ie_index;
+- }
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+- ipc_info_tree_name_t *iitn = &tree_info[index++];
+- ipc_info_name_t *iin = &iitn->iitn_name;
+- ipc_entry_t entry = &tentry->ite_entry;
+- ipc_entry_bits_t bits = entry->ie_bits;
+-
+- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+-
+- iin->iin_name = tentry->ite_name;
+- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+- iin->iin_compat = FALSE;
+- iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+- iin->iin_type = IE_BITS_TYPE(bits);
+- iin->iin_urefs = IE_BITS_UREFS(bits);
+- iin->iin_object = (vm_offset_t) entry->ie_object;
+- iin->iin_next = entry->ie_next;
+- iin->iin_hash = entry->ie_index;
+-
+- if (tentry->ite_lchild == ITE_NULL)
+- iitn->iitn_lchild = MACH_PORT_NULL;
+- else
+- iitn->iitn_lchild = tentry->ite_lchild->ite_name;
+-
+- if (tentry->ite_rchild == ITE_NULL)
+- iitn->iitn_rchild = MACH_PORT_NULL;
+- else
+- iitn->iitn_rchild = tentry->ite_rchild->ite_name;
+-
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+- is_read_unlock(space);
+-
+- if (table_info == *tablep) {
+- /* data fit in-line; nothing to deallocate */
+-
+- *tableCntp = table_actual;
+- } else if (table_actual == 0) {
+- kmem_free(ipc_kernel_map, table_addr, table_size);
+-
+- *tableCntp = 0;
+- } else {
+- vm_size_t size_used, rsize_used;
+- vm_map_copy_t copy;
+-
+- /* kmem_alloc doesn't zero memory */
+-
+- size_used = table_actual * sizeof *table_info;
+- rsize_used = round_page(size_used);
+-
+- if (rsize_used != table_size)
+- kmem_free(ipc_kernel_map,
+- table_addr + rsize_used,
+- table_size - rsize_used);
+-
+- if (size_used != rsize_used)
+- memset((void *) (table_addr + size_used), 0,
+- rsize_used - size_used);
+-
+- kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
+- TRUE, &copy);
+-
+- assert(kr == KERN_SUCCESS);
+-
+- *tablep = (ipc_info_name_t *) copy;
+- *tableCntp = table_actual;
+- }
+-
+- if (tree_info == *treep) {
+- /* data fit in-line; nothing to deallocate */
+-
+- *treeCntp = tree_actual;
+- } else if (tree_actual == 0) {
+- kmem_free(ipc_kernel_map, tree_addr, tree_size);
+-
+- *treeCntp = 0;
+- } else {
+- vm_size_t size_used, rsize_used;
+- vm_map_copy_t copy;
+-
+- /* kmem_alloc doesn't zero memory */
+-
+- size_used = tree_actual * sizeof *tree_info;
+- rsize_used = round_page(size_used);
+-
+- if (rsize_used != tree_size)
+- kmem_free(ipc_kernel_map,
+- tree_addr + rsize_used,
+- tree_size - rsize_used);
+-
+- if (size_used != rsize_used)
+- memset((void *) (tree_addr + size_used), 0,
+- rsize_used - size_used);
+-
+- kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
+- TRUE, &copy);
+-
+- assert(kr == KERN_SUCCESS);
+-
+- *treep = (ipc_info_tree_name_t *) copy;
+- *treeCntp = tree_actual;
+- }
+-
+- return KERN_SUCCESS;
+-}
+-
+-/*
+ * Routine: mach_port_dnrequest_info
+ * Purpose:
+ * Returns information about the dead-name requests
+diff --git a/ipc/mach_port.c b/ipc/mach_port.c
+index 4e89527..93a1248 100644
+--- a/ipc/mach_port.c
++++ b/ipc/mach_port.c
+@@ -150,10 +150,6 @@ mach_port_names(
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
+ {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t table;
+- ipc_entry_num_t tsize;
+- mach_port_index_t index;
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ mach_port_t *names;
+@@ -190,7 +186,7 @@ mach_port_names(
+
+ /* upper bound on number of names in the space */
+
+- bound = space->is_table_size + space->is_tree_total;
++ bound = space->is_size;
+ size_needed = round_page(bound * sizeof(mach_port_t));
+
+ if (size_needed <= size)
+@@ -235,33 +231,16 @@ mach_port_names(
+
+ timestamp = ipc_port_timestamp();
+
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
+- ipc_entry_t entry = &table[index];
++ ipc_entry_t entry;
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&space->is_map, &iter, entry) {
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+- mach_port_t name = MACH_PORT_MAKEB(index, bits);
+-
+- mach_port_names_helper(timestamp, entry, name,
++ mach_port_names_helper(timestamp, entry, entry->ie_name,
+ names, types, &actual);
+ }
+ }
+-
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+- ipc_entry_t entry = &tentry->ite_entry;
+- mach_port_t name = tentry->ite_name;
+-
+- assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
+-
+- mach_port_names_helper(timestamp, entry, name,
+- names, types, &actual);
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual == 0) {
+@@ -946,10 +925,7 @@ mach_port_get_set_status(
+ size = PAGE_SIZE; /* initial guess */
+
+ for (;;) {
+- ipc_tree_entry_t tentry;
+- ipc_entry_t entry, table;
+- ipc_entry_num_t tsize;
+- mach_port_index_t index;
++ ipc_entry_t entry;
+ mach_port_t *names;
+ ipc_pset_t pset;
+
+@@ -986,11 +962,9 @@ mach_port_get_set_status(
+ maxnames = size / sizeof(mach_port_t);
+ actual = 0;
+
+- table = space->is_table;
+- tsize = space->is_table_size;
+-
+- for (index = 0; index < tsize; index++) {
+- ipc_entry_t ientry = &table[index];
++ ipc_entry_t ientry;
++ struct rdxtree_iter iter;
++ rdxtree_for_each(&space->is_map, &iter, ientry) {
+ ipc_entry_bits_t bits = ientry->ie_bits;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+@@ -1002,22 +976,6 @@ mach_port_get_set_status(
+ }
+ }
+
+- for (tentry = ipc_splay_traverse_start(&space->is_tree);
+- tentry != ITE_NULL;
+- tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
+- ipc_entry_bits_t bits = tentry->ite_bits;
+-
+- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+-
+- if (bits & MACH_PORT_TYPE_RECEIVE) {
+- ipc_port_t port =
+- (ipc_port_t) tentry->ite_object;
+-
+- mach_port_gst_helper(pset, port, maxnames,
+- names, &actual);
+- }
+- }
+- ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual <= maxnames)
+diff --git a/ipc/port.h b/ipc/port.h
+index d359115..49af6e2 100644
+--- a/ipc/port.h
++++ b/ipc/port.h
+@@ -45,10 +45,7 @@
+ * mach_port_t must be an unsigned type. Port values
+ * have two parts, a generation number and an index.
+ * These macros encapsulate all knowledge of how
+- * a mach_port_t is laid out. However, ipc/ipc_entry.c
+- * implicitly assumes when it uses the splay tree functions
+- * that the generation number is in the low bits, so that
+- * names are ordered first by index and then by generation.
++ * a mach_port_t is laid out.
+ *
+ * If the size of generation numbers changes,
+ * be sure to update IE_BITS_GEN_MASK and friends
+--
+2.1.4
+
diff --git a/debian/patches/0006-ipc-inline-key-ipc-entry-lookup-functions.patch b/debian/patches/0006-ipc-inline-key-ipc-entry-lookup-functions.patch
new file mode 100644
index 0000000..5b4712a
--- /dev/null
+++ b/debian/patches/0006-ipc-inline-key-ipc-entry-lookup-functions.patch
@@ -0,0 +1,316 @@
+From 4956458e0d0095a235973a2f5341db2b68ec4a3f Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 1 Apr 2015 17:17:01 +0200
+Subject: [PATCH gnumach 6/6] ipc: inline key ipc entry lookup functions
+
+Declare functions looking up IPC entries that were previously inlined
+manually with `static inline' so that they will be inlined into the
+fast paths by the compiler.
+
+* ipc/ipc_entry.c (ipc_entry_lookup, ipc_entry_get,
+ipc_entry_dealloc): Move functions...
+* ipc/ipc_space.h: ... here, and declare them as `static inline'.
+* ipc/ipc_entry.h: Drop associated declarations.
+---
+ ipc/ipc_entry.c | 119 -------------------------------------------------------
+ ipc/ipc_entry.h | 9 -----
+ ipc/ipc_space.h | 120 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 120 insertions(+), 128 deletions(-)
+
+diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
+index 2d6e665..a5fe319 100644
+--- a/ipc/ipc_entry.c
++++ b/ipc/ipc_entry.c
+@@ -52,93 +52,6 @@
+ struct kmem_cache ipc_entry_cache;
+
+ /*
+- * Routine: ipc_entry_lookup
+- * Purpose:
+- * Searches for an entry, given its name.
+- * Conditions:
+- * The space must be read or write locked throughout.
+- * The space must be active.
+- */
+-
+-ipc_entry_t
+-ipc_entry_lookup(
+- ipc_space_t space,
+- mach_port_t name)
+-{
+- ipc_entry_t entry;
+-
+- assert(space->is_active);
+- entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
+- if (entry != IE_NULL
+- && IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+- entry = NULL;
+- assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+- return entry;
+-}
+-
+-/*
+- * Routine: ipc_entry_get
+- * Purpose:
+- * Tries to allocate an entry out of the space.
+- * Conditions:
+- * The space is write-locked and active throughout.
+- * An object may be locked. Will not allocate memory.
+- * Returns:
+- * KERN_SUCCESS A free entry was found.
+- * KERN_NO_SPACE No entry allocated.
+- */
+-
+-kern_return_t
+-ipc_entry_get(
+- ipc_space_t space,
+- mach_port_t *namep,
+- ipc_entry_t *entryp)
+-{
+- mach_port_t new_name;
+- ipc_entry_t free_entry;
+-
+- assert(space->is_active);
+-
+- /* Get entry from the free list. */
+- free_entry = space->is_free_list;
+- if (free_entry == IE_NULL)
+- return KERN_NO_SPACE;
+-
+- space->is_free_list = free_entry->ie_next_free;
+- space->is_free_list_size -= 1;
+-
+- /*
+- * Initialize the new entry. We need only
+- * increment the generation number and clear ie_request.
+- */
+-
+- {
+- mach_port_gen_t gen;
+-
+- assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+- gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+- free_entry->ie_bits = gen;
+- free_entry->ie_request = 0;
+- new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
+- }
+-
+- /*
+- * The new name can't be MACH_PORT_NULL because index
+- * is non-zero. It can't be MACH_PORT_DEAD because
+- * the table isn't allowed to grow big enough.
+- * (See comment in ipc/ipc_table.h.)
+- */
+-
+- assert(MACH_PORT_VALID(new_name));
+- assert(free_entry->ie_object == IO_NULL);
+-
+- space->is_size += 1;
+- *namep = new_name;
+- *entryp = free_entry;
+- return KERN_SUCCESS;
+-}
+-
+-/*
+ * Routine: ipc_entry_alloc
+ * Purpose:
+ * Allocate an entry out of the space.
+@@ -293,38 +206,6 @@ ipc_entry_alloc_name(
+ return KERN_SUCCESS;
+ }
+
+-/*
+- * Routine: ipc_entry_dealloc
+- * Purpose:
+- * Deallocates an entry from a space.
+- * Conditions:
+- * The space must be write-locked throughout.
+- * The space must be active.
+- */
+-
+-void
+-ipc_entry_dealloc(
+- ipc_space_t space,
+- mach_port_t name,
+- ipc_entry_t entry)
+-{
+- assert(space->is_active);
+- assert(entry->ie_object == IO_NULL);
+- assert(entry->ie_request == 0);
+-
+- if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
+- space->is_free_list_size += 1;
+- entry->ie_bits &= IE_BITS_GEN_MASK;
+- entry->ie_next_free = space->is_free_list;
+- space->is_free_list = entry;
+- } else {
+- rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
+- ie_free(entry);
+- }
+- space->is_size -= 1;
+-}
+-
+-
+ #if MACH_KDB
+ #include <ddb/db_output.h>
+ #include <kern/task.h>
+diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
+index 5c1f8fd..b429984 100644
+--- a/ipc/ipc_entry.h
++++ b/ipc/ipc_entry.h
+@@ -96,21 +96,12 @@ extern struct kmem_cache ipc_entry_cache;
+ #define ie_alloc() ((ipc_entry_t) kmem_cache_alloc(&ipc_entry_cache))
+ #define ie_free(e) kmem_cache_free(&ipc_entry_cache, (vm_offset_t) (e))
+
+-extern ipc_entry_t
+-ipc_entry_lookup(ipc_space_t space, mach_port_t name);
+-
+-extern kern_return_t
+-ipc_entry_get(ipc_space_t space, mach_port_t *namep, ipc_entry_t *entryp);
+-
+ extern kern_return_t
+ ipc_entry_alloc(ipc_space_t space, mach_port_t *namep, ipc_entry_t *entryp);
+
+ extern kern_return_t
+ ipc_entry_alloc_name(ipc_space_t space, mach_port_t name, ipc_entry_t *entryp);
+
+-extern void
+-ipc_entry_dealloc(ipc_space_t space, mach_port_t name, ipc_entry_t entry);
+-
+ ipc_entry_t
+ db_ipc_object_by_name(
+ task_t task,
+diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
+index 20b9d57..58fe47c 100644
+--- a/ipc/ipc_space.h
++++ b/ipc/ipc_space.h
+@@ -137,6 +137,126 @@ kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *);
+ kern_return_t ipc_space_create_special(struct ipc_space **);
+ void ipc_space_destroy(struct ipc_space *);
+
++/* IPC entry lookups. */
++
++/*
++ * Routine: ipc_entry_lookup
++ * Purpose:
++ * Searches for an entry, given its name.
++ * Conditions:
++ * The space must be read or write locked throughout.
++ * The space must be active.
++ */
++
++static inline ipc_entry_t
++ipc_entry_lookup(
++ ipc_space_t space,
++ mach_port_t name)
++{
++ ipc_entry_t entry;
++
++ assert(space->is_active);
++ entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
++ if (entry != IE_NULL
++ && IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
++ entry = NULL;
++ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
++ return entry;
++}
++
++/*
++ * Routine: ipc_entry_get
++ * Purpose:
++ * Tries to allocate an entry out of the space.
++ * Conditions:
++ * The space is write-locked and active throughout.
++ * An object may be locked. Will not allocate memory.
++ * Returns:
++ * KERN_SUCCESS A free entry was found.
++ * KERN_NO_SPACE No entry allocated.
++ */
++
++static inline kern_return_t
++ipc_entry_get(
++ ipc_space_t space,
++ mach_port_t *namep,
++ ipc_entry_t *entryp)
++{
++ mach_port_t new_name;
++ ipc_entry_t free_entry;
++
++ assert(space->is_active);
++
++ /* Get entry from the free list. */
++ free_entry = space->is_free_list;
++ if (free_entry == IE_NULL)
++ return KERN_NO_SPACE;
++
++ space->is_free_list = free_entry->ie_next_free;
++ space->is_free_list_size -= 1;
++
++ /*
++ * Initialize the new entry. We need only
++ * increment the generation number and clear ie_request.
++ */
++
++ {
++ mach_port_gen_t gen;
++
++ assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
++ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
++ free_entry->ie_bits = gen;
++ free_entry->ie_request = 0;
++ new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
++ }
++
++ /*
++ * The new name can't be MACH_PORT_NULL because index
++ * is non-zero. It can't be MACH_PORT_DEAD because
++ * the table isn't allowed to grow big enough.
++ * (See comment in ipc/ipc_table.h.)
++ */
++
++ assert(MACH_PORT_VALID(new_name));
++ assert(free_entry->ie_object == IO_NULL);
++
++ space->is_size += 1;
++ *namep = new_name;
++ *entryp = free_entry;
++ return KERN_SUCCESS;
++}
++
++/*
++ * Routine: ipc_entry_dealloc
++ * Purpose:
++ * Deallocates an entry from a space.
++ * Conditions:
++ * The space must be write-locked throughout.
++ * The space must be active.
++ */
++
++static inline void
++ipc_entry_dealloc(
++ ipc_space_t space,
++ mach_port_t name,
++ ipc_entry_t entry)
++{
++ assert(space->is_active);
++ assert(entry->ie_object == IO_NULL);
++ assert(entry->ie_request == 0);
++
++ if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
++ space->is_free_list_size += 1;
++ entry->ie_bits &= IE_BITS_GEN_MASK;
++ entry->ie_next_free = space->is_free_list;
++ space->is_free_list = entry;
++ } else {
++ rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
++ ie_free(entry);
++ }
++ space->is_size -= 1;
++}
++
+ /* Reverse lookups. */
+
+ /* Cast a pointer to a suitable key. */
+--
+2.1.4
+
diff --git a/debian/patches/series b/debian/patches/series
index 400b7a4..315c2bc 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -12,3 +12,9 @@ error-handling0001-kern-gracefully-handle-resource-shortage.patch
error-handling0002-vm-gracefully-handle-resource-shortage.patch
error-handling0003-kern-gracefully-handle-resource-shortage.patch
sysenter0001-yyy-sysenter-prototype.patch
+0001-kern-import-macros.h-from-x15.patch
+0002-kern-add-radix-tree-library.patch
+0003-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch
+0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch
+0005-ipc-replace-the-IPC-table-with-a-radix-tree.patch
+0006-ipc-inline-key-ipc-entry-lookup-functions.patch