diff options
| author | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-03-18 12:28:28 +0100 |
|---|---|---|
| committer | Justus Winter <4winter@informatik.uni-hamburg.de> | 2015-03-18 12:28:28 +0100 |
| commit | 58e4660f73968c1c6c348b46249407d96a8c3e55 (patch) | |
| tree | f4f2fd6e26b05aca660e48299a0390636e831625 /debian | |
| parent | d87040e8479431e25dcc6e7735a320c786e4f0d6 (diff) | |
add patch series
Diffstat (limited to 'debian')
| -rw-r--r-- | debian/patches/0001-kern-add-radix-tree-library.patch | 1303 | ||||
| -rw-r--r-- | debian/patches/0002-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch | 535 | ||||
| -rw-r--r-- | debian/patches/0003-XXX-Unused-constant.patch | 24 | ||||
| -rw-r--r-- | debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch | 380 | ||||
| -rw-r--r-- | debian/patches/series | 4 |
5 files changed, 2246 insertions, 0 deletions
diff --git a/debian/patches/0001-kern-add-radix-tree-library.patch b/debian/patches/0001-kern-add-radix-tree-library.patch new file mode 100644 index 0000000..6e7f69a --- /dev/null +++ b/debian/patches/0001-kern-add-radix-tree-library.patch @@ -0,0 +1,1303 @@ +From dc1b08d00b277898fd56767c7c6c7426869f7129 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Mon, 16 Mar 2015 11:37:06 +0100 +Subject: [PATCH gnumach 1/4] kern: add radix tree library + +* Makefile.am (clib_routines): Steal `__ffsdi2'. +* Makefrag.am (libkernel_a_SOURCES): Add new files. +* kern/list.h: Do not define `structof', include `macros.h' instead. +* kern/rbtree.h: Likewise. +* kern/macros.h: New file. +* kern/rdxtree.c: Likewise. +* kern/rdxtree.h: Likewise. +* kern/rdxtree_i.h: Likewise. +* kern/startup.c (setup_main): Initialize radix tree library. +--- + Makefile.am | 1 + + Makefrag.am | 4 + + kern/list.h | 4 +- + kern/macros.h | 94 +++++++ + kern/rbtree.h | 5 +- + kern/rdxtree.c | 816 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ + kern/rdxtree.h | 184 +++++++++++++ + kern/rdxtree_i.h | 65 +++++ + kern/startup.c | 2 + + 9 files changed, 1168 insertions(+), 7 deletions(-) + create mode 100644 kern/macros.h + create mode 100644 kern/rdxtree.c + create mode 100644 kern/rdxtree.h + create mode 100644 kern/rdxtree_i.h + +diff --git a/Makefile.am b/Makefile.am +index 1afddab..3438ec1 100644 +--- a/Makefile.am ++++ b/Makefile.am +@@ -161,6 +161,7 @@ clib_routines := memcmp memcpy memmove \ + htonl htons ntohl ntohs \ + udivdi3 __udivdi3 \ + __rel_iplt_start __rel_iplt_end \ ++ __ffsdi2 \ + _START _start etext _edata end _end # actually ld magic, not libc. + gnumach-undef: gnumach.$(OBJEXT) + $(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@ +diff --git a/Makefrag.am b/Makefrag.am +index 9166143..15f422f 100644 +--- a/Makefrag.am ++++ b/Makefrag.am +@@ -172,6 +172,7 @@ libkernel_a_SOURCES += \ + kern/machine.c \ + kern/machine.h \ + kern/macro_help.h \ ++ kern/macros.h \ + kern/pc_sample.c \ + kern/pc_sample.h \ + kern/printf.c \ +@@ -186,6 +187,9 @@ libkernel_a_SOURCES += \ + kern/rbtree.c \ + kern/rbtree.h \ + kern/rbtree_i.h \ ++ kern/rdxtree.c \ ++ kern/rdxtree.h \ ++ kern/rdxtree_i.h \ + kern/refcount.h \ + kern/slab.c \ + kern/slab.h \ +diff --git a/kern/list.h b/kern/list.h +index ad782a8..be92762 100644 +--- a/kern/list.h ++++ b/kern/list.h +@@ -31,9 +31,7 @@ + + #include <stddef.h> + #include <sys/types.h> +- +-#define structof(ptr, type, member) \ +- ((type *)((char *)ptr - offsetof(type, member))) ++#include <kern/macros.h> + + /* + * Structure used as both head and node. +diff --git a/kern/macros.h b/kern/macros.h +new file mode 100644 +index 0000000..f6e88dd +--- /dev/null ++++ b/kern/macros.h +@@ -0,0 +1,94 @@ ++/* ++ * Copyright (c) 2009-2015 Richard Braun. ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Helper macros. ++ */ ++ ++#ifndef _MACROS_H ++#define _MACROS_H ++ ++#if !defined(__GNUC__) || (__GNUC__ < 4) ++#error "GCC 4+ required" ++#endif ++ ++#include <stddef.h> ++ ++#define MACRO_BEGIN ({ ++#define MACRO_END }) ++ ++#define __QUOTE(x) #x ++#define QUOTE(x) __QUOTE(x) ++ ++#define STRLEN(x) (sizeof(x) - 1) ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++ ++#define MIN(a, b) ((a) < (b) ? (a) : (b)) ++#define MAX(a, b) ((a) > (b) ? (a) : (b)) ++ ++#define DIV_CEIL(n, d) (((n) + (d) - 1) / (d)) ++ ++#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0) ++#define ISP2(x) P2ALIGNED(x, x) ++#define P2ALIGN(x, a) ((x) & -(a)) ++#define P2ROUND(x, a) (-(-(x) & -(a))) ++#define P2END(x, a) (-(~(x) & -(a))) ++ ++#define structof(ptr, type, member) \ ++ ((type *)((char *)(ptr) - offsetof(type, member))) ++ ++#define alignof(x) __alignof__(x) ++ ++#define likely(expr) __builtin_expect(!!(expr), 1) ++#define unlikely(expr) __builtin_expect(!!(expr), 0) ++ ++#define barrier() asm volatile("" : : : "memory") ++ ++#define __noreturn __attribute__((noreturn)) ++#define __alias(x) __attribute__((alias(x))) ++ ++#define __format_printf(fmt, args) \ ++ __attribute__((format(printf, fmt, args))) ++ ++/* ++ * The following macros may be provided by the C environment. ++ */ ++ ++#ifndef __aligned ++#define __aligned(x) __attribute__((aligned(x))) ++#endif ++ ++#ifndef __always_inline ++#define __always_inline inline __attribute__((always_inline)) ++#endif ++ ++#ifndef __section ++#define __section(x) __attribute__((section(x))) ++#endif ++ ++#ifndef __packed ++#define __packed __attribute__((packed)) ++#endif ++ ++#endif /* _MACROS_H */ +diff --git a/kern/rbtree.h b/kern/rbtree.h +index f577f7e..4ee0e15 100644 +--- a/kern/rbtree.h ++++ b/kern/rbtree.h +@@ -31,12 +31,9 @@ + + #include <stddef.h> + #include <kern/assert.h> +-#include <kern/macro_help.h> ++#include <kern/macros.h> + #include <sys/types.h> + +-#define structof(ptr, type, member) \ +- ((type *)((char *)ptr - offsetof(type, member))) +- + /* + * Indexes of the left and right nodes in the children array of a node. + */ +diff --git a/kern/rdxtree.c b/kern/rdxtree.c +new file mode 100644 +index 0000000..1233291 +--- /dev/null ++++ b/kern/rdxtree.c +@@ -0,0 +1,816 @@ ++/* ++ * Copyright (c) 2011-2014 Richard Braun. ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include <kern/assert.h> ++#include <kern/slab.h> ++#include <mach/kern_return.h> ++#include <sys/types.h> ++//#include <limits.h> ++#include <stddef.h> ++//#include <stdlib.h> ++#include <string.h> ++ ++//#include "error.h" ++#include "macros.h" ++#include "rdxtree.h" ++#include "rdxtree_i.h" ++ ++#define CHAR_BIT 8U ++#define ERR_SUCCESS KERN_SUCCESS ++//XXX ++#define ERR_BUSY KERN_NO_SPACE ++#define ERR_NOMEM KERN_RESOURCE_SHORTAGE ++ ++/* ++ * Mask applied on an entry to obtain its address. ++ */ ++#define RDXTREE_ENTRY_ADDR_MASK (~0x3UL) ++ ++/* ++ * Global properties used to shape radix trees. ++ */ ++#define RDXTREE_RADIX 6 ++#define RDXTREE_RADIX_SIZE (1UL << RDXTREE_RADIX) ++#define RDXTREE_RADIX_MASK (RDXTREE_RADIX_SIZE - 1) ++ ++#if RDXTREE_RADIX < 6 ++typedef unsigned long rdxtree_bm_t; ++#define rdxtree_ffs(x) __builtin_ffsl(x) ++#elif RDXTREE_RADIX == 6 /* RDXTREE_RADIX < 6 */ ++typedef unsigned long long rdxtree_bm_t; ++#define rdxtree_ffs(x) __builtin_ffsll(x) ++#else /* RDXTREE_RADIX < 6 */ ++#error "radix too high" ++#endif /* RDXTREE_RADIX < 6 */ ++ ++/* ++ * Allocation bitmap size in bits. ++ */ ++#define RDXTREE_BM_SIZE (sizeof(rdxtree_bm_t) * CHAR_BIT) ++ ++/* ++ * Empty/full allocation bitmap words. ++ */ ++#define RDXTREE_BM_EMPTY ((rdxtree_bm_t)0) ++#define RDXTREE_BM_FULL \ ++ ((~(rdxtree_bm_t)0) >> (RDXTREE_BM_SIZE - RDXTREE_RADIX_SIZE)) ++ ++/* ++ * These macros can be replaced by actual functions in an environment ++ * that provides lockless synchronization such as RCU. ++ */ ++#define llsync_assign_ptr(ptr, value) ((ptr) = (value)) ++#define llsync_read_ptr(ptr) (ptr) ++ ++/* ++ * Radix tree node. ++ * ++ * The height of a tree is the number of nodes to traverse until stored ++ * pointers are reached. A height of 0 means the entries of a node (or the ++ * tree root) directly point to stored pointers. ++ * ++ * The index is valid if and only if the parent isn't NULL. ++ * ++ * Concerning the allocation bitmap, a bit is set when the node it denotes, ++ * or one of its children, can be used to allocate an entry. Conversely, a bit ++ * is clear when the matching node and all of its children have no free entry. ++ * ++ * In order to support safe lockless lookups, in particular during a resize, ++ * each node includes the height of its subtree, which is invariant during ++ * the entire node lifetime. Since the tree height does vary, it can't be ++ * used to determine whether the tree root is a node or a stored pointer. ++ * This implementation assumes that all nodes and stored pointers are at least ++ * 4-byte aligned, and uses the least significant bit of entries to indicate ++ * the pointer type. This bit is set for internal nodes, and clear for stored ++ * pointers so that they can be accessed from slots without conversion. ++ */ ++struct rdxtree_node { ++ struct rdxtree_node *parent; ++ unsigned int index; ++ unsigned int height; ++ unsigned int nr_entries; ++ rdxtree_bm_t alloc_bm; ++ void *entries[RDXTREE_RADIX_SIZE]; ++}; ++ ++/* ++ * We allocate nodes using the slab allocator. ++ */ ++static struct kmem_cache rdxtree_node_cache; ++ ++void ++rdxtree_cache_init(void) ++{ ++ kmem_cache_init(&rdxtree_node_cache, "rdxtree_node", ++ sizeof(struct rdxtree_node), 0, NULL, NULL, NULL, 0); ++ struct rdxtree tree = RDXTREE_INITIALIZER; ++ int i; ++ for (i = 1; i < 1000; i++) ++ rdxtree_insert (&tree, i, i*4); ++} ++ ++#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES ++unsigned int rdxtree_fail_node_creation_threshold; ++unsigned int rdxtree_nr_node_creations; ++#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */ ++ ++static inline int ++rdxtree_check_alignment(const void *ptr) ++{ ++ return ((unsigned long)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0; ++} ++ ++static inline void * ++rdxtree_entry_addr(void *entry) ++{ ++ return (void *)((unsigned long)entry & RDXTREE_ENTRY_ADDR_MASK); ++} ++ ++static inline int ++rdxtree_entry_is_node(const void *entry) ++{ ++ return ((unsigned long)entry & 1) != 0; ++} ++ ++static inline void * ++rdxtree_node_to_entry(struct rdxtree_node *node) ++{ ++ return (void *)((unsigned long)node | 1); ++} ++ ++static int ++rdxtree_node_create(struct rdxtree_node **nodep, unsigned int height) ++{ ++ struct rdxtree_node *node; ++ ++#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES ++ if (rdxtree_fail_node_creation_threshold != 0) { ++ rdxtree_nr_node_creations++; ++ ++ if (rdxtree_nr_node_creations == rdxtree_fail_node_creation_threshold) ++ return ERR_NOMEM; ++ } ++#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */ ++ ++ node = kmem_cache_alloc(&rdxtree_node_cache); ++ ++ if (node == NULL) ++ return ERR_NOMEM; ++ ++ assert(rdxtree_check_alignment(node)); ++ node->parent = NULL; ++ node->height = height; ++ node->nr_entries = 0; ++ node->alloc_bm = RDXTREE_BM_FULL; ++ memset(node->entries, 0, sizeof(node->entries)); ++ *nodep = node; ++ return 0; ++} ++ ++static void ++rdxtree_node_schedule_destruction(struct rdxtree_node *node) ++{ ++ /* ++ * This function is intended to use the appropriate interface to defer ++ * destruction until all read-side references are dropped in an ++ * environment that provides lockless synchronization. ++ * ++ * Otherwise, it simply "schedules" destruction immediately. ++ */ ++ kmem_cache_free(&rdxtree_node_cache, (vm_offset_t) node); ++} ++ ++static inline void ++rdxtree_node_link(struct rdxtree_node *node, struct rdxtree_node *parent, ++ unsigned int index) ++{ ++ node->parent = parent; ++ node->index = index; ++} ++ ++static inline void ++rdxtree_node_unlink(struct rdxtree_node *node) ++{ ++ assert(node->parent != NULL); ++ node->parent = NULL; ++} ++ ++static inline int ++rdxtree_node_full(struct rdxtree_node *node) ++{ ++ return (node->nr_entries == ARRAY_SIZE(node->entries)); ++} ++ ++static inline int ++rdxtree_node_empty(struct rdxtree_node *node) ++{ ++ return (node->nr_entries == 0); ++} ++ ++static inline void ++rdxtree_node_insert(struct rdxtree_node *node, unsigned int index, ++ void *entry) ++{ ++ assert(index < ARRAY_SIZE(node->entries)); ++ assert(node->entries[index] == NULL); ++ ++ node->nr_entries++; ++ llsync_assign_ptr(node->entries[index], entry); ++} ++ ++static inline void ++rdxtree_node_insert_node(struct rdxtree_node *node, unsigned int index, ++ struct rdxtree_node *child) ++{ ++ rdxtree_node_insert(node, index, rdxtree_node_to_entry(child)); ++} ++ ++static inline void ++rdxtree_node_remove(struct rdxtree_node *node, unsigned int index) ++{ ++ assert(index < ARRAY_SIZE(node->entries)); ++ assert(node->entries[index] != NULL); ++ ++ node->nr_entries--; ++ llsync_assign_ptr(node->entries[index], NULL); ++} ++ ++static inline void * ++rdxtree_node_find(struct rdxtree_node *node, unsigned int index, int get_slot) ++{ ++ void *ptr; ++ ++ while (index < ARRAY_SIZE(node->entries)) { ++ ptr = rdxtree_entry_addr(node->entries[index]); ++ ++ if (ptr != NULL) ++ return get_slot ? (void *)&node->entries[index] : ptr; ++ ++ index++; ++ } ++ ++ return NULL; ++} ++ ++static inline void ++rdxtree_node_bm_set(struct rdxtree_node *node, unsigned int index) ++{ ++ node->alloc_bm |= (rdxtree_bm_t)1 << index; ++} ++ ++static inline void ++rdxtree_node_bm_clear(struct rdxtree_node *node, unsigned int index) ++{ ++ node->alloc_bm &= ~((rdxtree_bm_t)1 << index); ++} ++ ++static inline int ++rdxtree_node_bm_is_set(struct rdxtree_node *node, unsigned int index) ++{ ++ return (node->alloc_bm & ((rdxtree_bm_t)1 << index)); ++} ++ ++static inline int ++rdxtree_node_bm_empty(struct rdxtree_node *node) ++{ ++ return (node->alloc_bm == RDXTREE_BM_EMPTY); ++} ++ ++static inline unsigned int ++rdxtree_node_bm_first(struct rdxtree_node *node) ++{ ++ return rdxtree_ffs(node->alloc_bm) - 1; ++} ++ ++static inline unsigned long long ++rdxtree_max_key(unsigned int height) ++{ ++ size_t shift; ++ ++ shift = RDXTREE_RADIX * height; ++ ++ if (likely(shift < (sizeof(unsigned long long) * CHAR_BIT))) ++ return (1ULL << shift) - 1; ++ else ++ return ~0ULL; ++} ++ ++static void ++rdxtree_shrink(struct rdxtree *tree) ++{ ++ struct rdxtree_node *node; ++ void *entry; ++ ++ while (tree->height > 0) { ++ node = rdxtree_entry_addr(tree->root); ++ ++ if (node->nr_entries != 1) ++ break; ++ ++ entry = node->entries[0]; ++ ++ if (entry == NULL) ++ break; ++ ++ tree->height--; ++ ++ if (tree->height > 0) ++ rdxtree_node_unlink(rdxtree_entry_addr(entry)); ++ ++ llsync_assign_ptr(tree->root, entry); ++ rdxtree_node_schedule_destruction(node); ++ } ++} ++ ++static int ++rdxtree_grow(struct rdxtree *tree, unsigned long long key) ++{ ++ struct rdxtree_node *root, *node; ++ unsigned int new_height; ++ int error; ++ ++ new_height = tree->height + 1; ++ ++ while (key > rdxtree_max_key(new_height)) ++ new_height++; ++ ++ if (tree->root == NULL) { ++ tree->height = new_height; ++ return ERR_SUCCESS; ++ } ++ ++ root = rdxtree_entry_addr(tree->root); ++ ++ do { ++ error = rdxtree_node_create(&node, tree->height); ++ ++ if (error) { ++ rdxtree_shrink(tree); ++ return error; ++ } ++ ++ if (tree->height == 0) ++ rdxtree_node_bm_clear(node, 0); ++ else { ++ rdxtree_node_link(root, node, 0); ++ ++ if (rdxtree_node_bm_empty(root)) ++ rdxtree_node_bm_clear(node, 0); ++ } ++ ++ rdxtree_node_insert(node, 0, tree->root); ++ tree->height++; ++ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node)); ++ root = node; ++ } while (new_height > tree->height); ++ ++ return ERR_SUCCESS; ++} ++ ++static void ++rdxtree_cleanup(struct rdxtree *tree, struct rdxtree_node *node) ++{ ++ struct rdxtree_node *prev; ++ ++ for (;;) { ++ if (likely(!rdxtree_node_empty(node))) { ++ if (unlikely(node->parent == NULL)) ++ rdxtree_shrink(tree); ++ ++ break; ++ } ++ ++ if (node->parent == NULL) { ++ tree->height = 0; ++ llsync_assign_ptr(tree->root, NULL); ++ rdxtree_node_schedule_destruction(node); ++ break; ++ } ++ ++ prev = node; ++ node = node->parent; ++ rdxtree_node_unlink(prev); ++ rdxtree_node_remove(node, prev->index); ++ rdxtree_node_schedule_destruction(prev); ++ } ++} ++ ++static void ++rdxtree_insert_bm_clear(struct rdxtree_node *node, unsigned int index) ++{ ++ for (;;) { ++ rdxtree_node_bm_clear(node, index); ++ ++ if (!rdxtree_node_full(node) || (node->parent == NULL)) ++ break; ++ ++ index = node->index; ++ node = node->parent; ++ } ++} ++ ++int ++rdxtree_insert_common(struct rdxtree *tree, unsigned long long key, ++ void *ptr, void ***slotp) ++{ ++ struct rdxtree_node *node, *prev; ++ unsigned int height, shift, index = index; ++ int error; ++ ++ assert(ptr != NULL); ++ assert(rdxtree_check_alignment(ptr)); ++ ++ if (unlikely(key > rdxtree_max_key(tree->height))) { ++ error = rdxtree_grow(tree, key); ++ ++ if (error) ++ return error; ++ } ++ ++ height = tree->height; ++ ++ if (unlikely(height == 0)) { ++ if (tree->root != NULL) ++ return ERR_BUSY; ++ ++ llsync_assign_ptr(tree->root, ptr); ++ ++ if (slotp != NULL) ++ *slotp = &tree->root; ++ ++ return ERR_SUCCESS; ++ } ++ ++ node = rdxtree_entry_addr(tree->root); ++ shift = (height - 1) * RDXTREE_RADIX; ++ prev = NULL; ++ ++ do { ++ if (node == NULL) { ++ error = rdxtree_node_create(&node, height - 1); ++ ++ if (error) { ++ if (prev == NULL) ++ tree->height = 0; ++ else ++ rdxtree_cleanup(tree, prev); ++ ++ return error; ++ } ++ ++ if (prev == NULL) ++ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node)); ++ else { ++ rdxtree_node_link(node, prev, index); ++ rdxtree_node_insert_node(prev, index, node); ++ } ++ } ++ ++ prev = node; ++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK; ++ node = rdxtree_entry_addr(prev->entries[index]); ++ shift -= RDXTREE_RADIX; ++ height--; ++ } while (height > 0); ++ ++ if (unlikely(node != NULL)) ++ return ERR_BUSY; ++ ++ rdxtree_node_insert(prev, index, ptr); ++ rdxtree_insert_bm_clear(prev, index); ++ ++ if (slotp != NULL) ++ *slotp = &prev->entries[index]; ++ ++ return ERR_SUCCESS; ++} ++ ++int ++rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr, ++ unsigned long long *keyp, void ***slotp) ++{ ++ struct rdxtree_node *node, *prev; ++ unsigned long long key; ++ unsigned int height, shift, index = index; ++ int error; ++ ++ assert(ptr != NULL); ++ assert(rdxtree_check_alignment(ptr)); ++ ++ height = tree->height; ++ ++ if (unlikely(height == 0)) { ++ if (tree->root == NULL) { ++ llsync_assign_ptr(tree->root, ptr); ++ *keyp = 0; ++ ++ if (slotp != NULL) ++ *slotp = &tree->root; ++ ++ return ERR_SUCCESS; ++ } ++ ++ goto grow; ++ } ++ ++ node = rdxtree_entry_addr(tree->root); ++ key = 0; ++ shift = (height - 1) * RDXTREE_RADIX; ++ prev = NULL; ++ ++ do { ++ if (node == NULL) { ++ error = rdxtree_node_create(&node, height - 1); ++ ++ if (error) { ++ rdxtree_cleanup(tree, prev); ++ return error; ++ } ++ ++ rdxtree_node_link(node, prev, index); ++ rdxtree_node_insert_node(prev, index, node); ++ } ++ ++ prev = node; ++ index = rdxtree_node_bm_first(node); ++ ++ if (index == (unsigned int)-1) ++ goto grow; ++ ++ key |= (unsigned long long)index << shift; ++ node = rdxtree_entry_addr(node->entries[index]); ++ shift -= RDXTREE_RADIX; ++ height--; ++ } while (height > 0); ++ ++ rdxtree_node_insert(prev, index, ptr); ++ rdxtree_insert_bm_clear(prev, index); ++ ++ if (slotp != NULL) ++ *slotp = &prev->entries[index]; ++ ++ goto out; ++ ++grow: ++ key = rdxtree_max_key(height) + 1; ++ error = rdxtree_insert_common(tree, key, ptr, slotp); ++ ++ if (error) ++ return error; ++ ++out: ++ *keyp = key; ++ return ERR_SUCCESS; ++} ++ ++static void ++rdxtree_remove_bm_set(struct rdxtree_node *node, unsigned int index) ++{ ++ do { ++ rdxtree_node_bm_set(node, index); ++ ++ if (node->parent == NULL) ++ break; ++ ++ index = node->index; ++ node = node->parent; ++ } while (!rdxtree_node_bm_is_set(node, index)); ++} ++ ++void * ++rdxtree_remove(struct rdxtree *tree, unsigned long long key) ++{ ++ struct rdxtree_node *node, *prev; ++ unsigned int height, shift, index; ++ ++ height = tree->height; ++ ++ if (unlikely(key > rdxtree_max_key(height))) ++ return NULL; ++ ++ node = rdxtree_entry_addr(tree->root); ++ ++ if (unlikely(height == 0)) { ++ llsync_assign_ptr(tree->root, NULL); ++ return node; ++ } ++ ++ shift = (height - 1) * RDXTREE_RADIX; ++ ++ do { ++ if (node == NULL) ++ return NULL; ++ ++ prev = node; ++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK; ++ node = rdxtree_entry_addr(node->entries[index]); ++ shift -= RDXTREE_RADIX; ++ height--; ++ } while (height > 0); ++ ++ if (node == NULL) ++ return NULL; ++ ++ rdxtree_node_remove(prev, index); ++ rdxtree_remove_bm_set(prev, index); ++ rdxtree_cleanup(tree, prev); ++ return node; ++} ++ ++void * ++rdxtree_lookup_common(const struct rdxtree *tree, unsigned long long key, ++ int get_slot) ++{ ++ struct rdxtree_node *node, *prev; ++ unsigned int height, shift, index; ++ void *entry; ++ ++ entry = llsync_read_ptr(tree->root); ++ ++ if (entry == NULL) { ++ node = NULL; ++ height = 0; ++ } else { ++ node = rdxtree_entry_addr(entry); ++ height = rdxtree_entry_is_node(entry) ? node->height + 1 : 0; ++ } ++ ++ if (key > rdxtree_max_key(height)) ++ return NULL; ++ ++ if (height == 0) { ++ if (node == NULL) ++ return NULL; ++ ++ return get_slot ? (void *)&tree->root : node; ++ } ++ ++ shift = (height - 1) * RDXTREE_RADIX; ++ ++ do { ++ if (node == NULL) ++ return NULL; ++ ++ prev = node; ++ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK; ++ entry = llsync_read_ptr(node->entries[index]); ++ node = rdxtree_entry_addr(entry); ++ shift -= RDXTREE_RADIX; ++ height--; ++ } while (height > 0); ++ ++ if (node == NULL) ++ return NULL; ++ ++ return get_slot ? (void *)&prev->entries[index] : node; ++} ++ ++void * ++rdxtree_replace_slot(void **slot, void *ptr) ++{ ++ void *old; ++ ++ assert(ptr != NULL); ++ assert(rdxtree_check_alignment(ptr)); ++ ++ old = *slot; ++ assert(old != NULL); ++ assert(rdxtree_check_alignment(old)); ++ llsync_assign_ptr(*slot, ptr); ++ return old; ++} ++ ++static struct rdxtree_node * ++rdxtree_walk(struct rdxtree *tree, struct rdxtree_node *node) ++{ ++ struct rdxtree_node *prev, *child; ++ unsigned int height, index; ++ ++ if (node == NULL) { ++ height = tree->height; ++ node = rdxtree_entry_addr(tree->root); ++ ++ while (height > 1) { ++ node = rdxtree_node_find(node, 0, 0); ++ height--; ++ } ++ ++ return node; ++ } ++ ++ height = 0; ++ ++ for (;;) { ++ prev = node->parent; ++ ++ if (prev == NULL) ++ return NULL; ++ ++ index = node->index; ++ child = rdxtree_node_find(prev, index + 1, 0); ++ ++ if (child != NULL) ++ break; ++ ++ height++; ++ node = prev; ++ } ++ ++ node = child; ++ ++ while (height > 0) { ++ node = rdxtree_node_find(node, 0, 0); ++ height--; ++ } ++ ++ return node; ++} ++ ++void * ++rdxtree_iter_next(struct rdxtree *tree, struct rdxtree_iter *iter) ++{ ++ unsigned int index; ++ ++ if (tree->height == 0) { ++ if (iter->slot != NULL) ++ return NULL; ++ ++ iter->slot = &tree->root; ++ return *iter->slot; ++ } ++ ++ if (iter->node != NULL) { ++ index = iter->slot - ((struct rdxtree_node *)iter->node)->entries; ++ iter->slot = rdxtree_node_find(iter->node, index + 1, 1); ++ } ++ ++ if (iter->slot == NULL) { ++ iter->node = rdxtree_walk(tree, iter->node); ++ ++ if (iter->node != NULL) ++ iter->slot = rdxtree_node_find(iter->node, 0, 1); ++ } ++ ++ if (iter->slot == NULL) ++ return NULL; ++ ++ return *iter->slot; ++} ++ ++void ++rdxtree_remove_all(struct rdxtree *tree) ++{ ++ struct rdxtree_node *node, *parent, *next; ++ unsigned int height, index; ++ ++ height = tree->height; ++ ++ if (height == 0) { ++ if (tree->root != NULL) ++ llsync_assign_ptr(tree->root, NULL); ++ ++ return; ++ } ++ ++ node = rdxtree_walk(tree, NULL); ++ ++ do { ++ next = rdxtree_walk(tree, node); ++ ++ parent = node->parent; ++ ++ if (parent != NULL) { ++ index = node->index; ++ rdxtree_node_remove(parent, index); ++ rdxtree_remove_bm_set(parent, index); ++ rdxtree_cleanup(tree, parent); ++ node->parent = NULL; ++ } ++ ++ rdxtree_node_schedule_destruction(node); ++ ++ node = next; ++ } while (node != NULL); ++} +diff --git a/kern/rdxtree.h b/kern/rdxtree.h +new file mode 100644 +index 0000000..c834627 +--- /dev/null ++++ b/kern/rdxtree.h +@@ -0,0 +1,184 @@ ++/* ++ * Copyright (c) 2011-2014 Richard Braun. ++ * All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * 1. Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * 2. Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in the ++ * documentation and/or other materials provided with the distribution. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT ++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF ++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ * ++ * ++ * Radix tree. ++ * ++ * In addition to the standard insertion operation, this implementation ++ * can allocate keys for the caller at insertion time. ++ */ ++ ++#ifndef _RDXTREE_H ++#define _RDXTREE_H ++ ++#include <stddef.h> ++ ++/* ++ * Initialize the node cache. ++ */ ++void ++rdxtree_cache_init(void); ++ ++/* ++ * Radix tree. ++ */ ++struct rdxtree; ++ ++/* ++ * Radix tree iterator. ++ */ ++struct rdxtree_iter; ++ ++/* ++ * Static tree initializer. ++ */ ++#define RDXTREE_INITIALIZER { 0, NULL } ++ ++#include "rdxtree_i.h" ++ ++/* ++ * Initialize a tree. ++ */ ++static inline void ++rdxtree_init(struct rdxtree *tree) ++{ ++ tree->height = 0; ++ tree->root = NULL; ++} ++ ++/* ++ * Insert a pointer in a tree. ++ * ++ * The ptr parameter must not be NULL. ++ */ ++static inline int ++rdxtree_insert(struct rdxtree *tree, unsigned long long key, void *ptr) ++{ ++ return rdxtree_insert_common(tree, key, ptr, NULL); ++} ++ ++/* ++ * Insert a pointer in a tree and obtain its slot. ++ * ++ * The ptr and slotp parameters must not be NULL. If successful, the slot of ++ * the newly inserted pointer is stored at the address pointed to by the slotp ++ * parameter. ++ */ ++static inline int ++rdxtree_insert_slot(struct rdxtree *tree, unsigned long long key, void *ptr, ++ void ***slotp) ++{ ++ return rdxtree_insert_common(tree, key, ptr, slotp); ++} ++ ++/* ++ * Insert a pointer in a tree, for which a new key is allocated. ++ * ++ * The ptr and keyp parameters must not be NULL. The newly allocated key is ++ * stored at the address pointed to by the keyp parameter. ++ */ ++static inline int ++rdxtree_insert_alloc(struct rdxtree *tree, void *ptr, unsigned long long *keyp) ++{ ++ return rdxtree_insert_alloc_common(tree, ptr, keyp, NULL); ++} ++ ++/* ++ * Insert a pointer in a tree, for which a new key is allocated, and obtain ++ * its slot. ++ * ++ * The ptr, keyp and slotp parameters must not be NULL. The newly allocated ++ * key is stored at the address pointed to by the keyp parameter while the ++ * slot of the inserted pointer is stored at the address pointed to by the ++ * slotp parameter. ++ */ ++static inline int ++rdxtree_insert_alloc_slot(struct rdxtree *tree, void *ptr, ++ unsigned long long *keyp, void ***slotp) ++{ ++ return rdxtree_insert_alloc_common(tree, ptr, keyp, slotp); ++} ++ ++/* ++ * Remove a pointer from a tree. ++ * ++ * The matching pointer is returned if successful, NULL otherwise. ++ */ ++void * rdxtree_remove(struct rdxtree *tree, unsigned long long key); ++ ++/* ++ * Look up a pointer in a tree. ++ * ++ * The matching pointer is returned if successful, NULL otherwise. ++ */ ++static inline void * ++rdxtree_lookup(const struct rdxtree *tree, unsigned long long key) ++{ ++ return rdxtree_lookup_common(tree, key, 0); ++} ++ ++/* ++ * Look up a slot in a tree. ++ * ++ * A slot is a pointer to a stored pointer in a tree. It can be used as ++ * a placeholder for fast replacements to avoid multiple lookups on the same ++ * key. ++ * ++ * A slot for the matching pointer is returned if successful, NULL otherwise. ++ * ++ * See rdxtree_replace_slot(). ++ */ ++static inline void ** ++rdxtree_lookup_slot(const struct rdxtree *tree, unsigned long long key) ++{ ++ return rdxtree_lookup_common(tree, key, 1); ++} ++ ++/* ++ * Replace a pointer in a tree. ++ * ++ * The ptr parameter must not be NULL. The previous pointer is returned. ++ * ++ * See rdxtree_lookup_slot(). ++ */ ++void * rdxtree_replace_slot(void **slot, void *ptr); ++ ++/* ++ * Forge a loop to process all pointers of a tree. ++ */ ++#define rdxtree_for_each(tree, iter, ptr) \ ++for (rdxtree_iter_init(iter), ptr = rdxtree_iter_next(tree, iter); \ ++ ptr != NULL; \ ++ ptr = rdxtree_iter_next(tree, iter)) ++ ++/* ++ * Remove all pointers from a tree. ++ * ++ * The common way to destroy a tree and its pointers is to loop over all ++ * the pointers using rdxtree_for_each(), freeing them, then call this ++ * function. ++ */ ++void rdxtree_remove_all(struct rdxtree *tree); ++ ++#endif /* _RDXTREE_H */ +diff --git a/kern/rdxtree_i.h b/kern/rdxtree_i.h +new file mode 100644 +index 0000000..4ba27d8 +--- /dev/null ++++ b/kern/rdxtree_i.h +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (c) 2013-2014 Richard Braun. ++ * ++ * This program is free software: you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation, either version 3 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program. If not, see <http://www.gnu.org/licenses/>. ++ */ ++ ++#ifndef _RDXTREE_I_H ++#define _RDXTREE_I_H ++ ++/* ++ * Radix tree. ++ */ ++struct rdxtree { ++ unsigned int height; ++ void *root; ++}; ++ ++/* ++ * Radix tree iterator. ++ */ ++struct rdxtree_iter { ++ void *node; ++ void **slot; ++}; ++ ++/* ++ * Initialize an iterator. ++ */ ++static inline void ++rdxtree_iter_init(struct rdxtree_iter *iter) ++{ ++ iter->node = NULL; ++ iter->slot = NULL; ++} ++ ++int rdxtree_insert_common(struct rdxtree *tree, unsigned long long key, ++ void *ptr, void ***slotp); ++ ++int rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr, ++ unsigned long long *keyp, void ***slotp); ++ ++void * rdxtree_lookup_common(const struct rdxtree *tree, unsigned long long key, ++ int get_slot); ++ ++/* ++ * Walk over pointers in a tree. ++ * ++ * Move the iterator to the next pointer in the given tree. ++ * ++ * The next pointer is returned if there is one, NULL otherwise. ++ */ ++void * rdxtree_iter_next(struct rdxtree *tree, struct rdxtree_iter *iter); ++ ++#endif /* _RDXTREE_I_H */ +diff --git a/kern/startup.c b/kern/startup.c +index 71cd04d..f9f0c34 100644 +--- a/kern/startup.c ++++ b/kern/startup.c +@@ -41,6 +41,7 @@ + #include <kern/mach_clock.h> + #include <kern/printf.h> + #include <kern/processor.h> ++#include <kern/rdxtree.h> + #include <kern/sched_prim.h> + #include <kern/task.h> + #include <kern/thread.h> +@@ -112,6 +113,7 @@ void setup_main(void) + + sched_init(); + vm_mem_bootstrap(); ++ rdxtree_cache_init(); + ipc_bootstrap(); + vm_mem_init(); + ipc_init(); +-- +2.1.4 + diff --git a/debian/patches/0002-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch b/debian/patches/0002-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch new file mode 100644 index 0000000..7b5bcf8 --- /dev/null +++ b/debian/patches/0002-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch @@ -0,0 +1,535 @@ +From 070d9df51684793a02e027b6702cfede130d11c3 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Mon, 16 Mar 2015 12:52:24 +0100 +Subject: [PATCH gnumach 2/4] ipc: undo manual inlining of `ipc_entry_X' + functions + +Today we can rely on the compiler to inline functions. Undoing this +manual optimization is a first step to replace the ipc tables. + +* ipc/mach_msg.c (mach_msg_trap): Undo the manual inlining of +`ipc_entry_lookup', `ipc_entry_dealloc', and `ipc_entry_get'. +* ipc/ipc_kmsg.c (ipc_kmsg_copyin_header, ipc_kmsg_copyout_header): Likewise. +* kern/exception.c (exception_raise): Likewise. +* kern/ipc_mig.c (fast_send_right_lookup): Likewise. +--- + ipc/ipc_kmsg.c | 107 +++++++++++------------------------------- + ipc/mach_msg.c | 139 ++++++++----------------------------------------------- + kern/exception.c | 18 ++----- + kern/ipc_mig.c | 14 +++--- + 4 files changed, 57 insertions(+), 221 deletions(-) + +diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c +index 66643fd..a4a3b42 100644 +--- a/ipc/ipc_kmsg.c ++++ b/ipc/ipc_kmsg.c +@@ -698,24 +698,14 @@ ipc_kmsg_copyin_header( + if (!space->is_active) + goto abort_async; + +- /* optimized ipc_entry_lookup */ +- +- { +- mach_port_index_t index = MACH_PORT_INDEX(dest_name); +- mach_port_gen_t gen = MACH_PORT_GEN(dest_name); +- +- if (index >= space->is_table_size) ++ entry = ipc_entry_lookup (space, dest_name); ++ if (entry == IE_NULL) + goto abort_async; +- +- entry = &space->is_table[index]; + bits = entry->ie_bits; + +- /* check generation number and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != +- (gen | MACH_PORT_TYPE_SEND)) ++ /* check type bits */ ++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND) + goto abort_async; +- } + + /* optimized ipc_right_copyin */ + +@@ -750,8 +740,6 @@ ipc_kmsg_copyin_header( + + case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_MAKE_SEND_ONCE): { +- ipc_entry_num_t size; +- ipc_entry_t table; + ipc_entry_t entry; + ipc_entry_bits_t bits; + ipc_port_t dest_port, reply_port; +@@ -762,51 +750,28 @@ ipc_kmsg_copyin_header( + if (!space->is_active) + goto abort_request; + +- size = space->is_table_size; +- table = space->is_table; +- +- /* optimized ipc_entry_lookup of dest_name */ +- +- { +- mach_port_index_t index = MACH_PORT_INDEX(dest_name); +- mach_port_gen_t gen = MACH_PORT_GEN(dest_name); +- +- if (index >= size) ++ entry = ipc_entry_lookup (space, dest_name); ++ if (entry == IE_NULL) + goto abort_request; +- +- entry = &table[index]; + bits = entry->ie_bits; + +- /* check generation number and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != +- (gen | MACH_PORT_TYPE_SEND)) ++ /* check type bits */ ++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND) + goto abort_request; +- } + + assert(IE_BITS_UREFS(bits) > 0); + + dest_port = (ipc_port_t) entry->ie_object; + assert(dest_port != IP_NULL); + +- /* optimized ipc_entry_lookup of reply_name */ +- +- { +- mach_port_index_t index = MACH_PORT_INDEX(reply_name); +- mach_port_gen_t gen = MACH_PORT_GEN(reply_name); +- +- if (index >= size) ++ entry = ipc_entry_lookup (space, reply_name); ++ if (entry == IE_NULL) + goto abort_request; +- +- entry = &table[index]; + bits = entry->ie_bits; + +- /* check generation number and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_RECEIVE)) != +- (gen | MACH_PORT_TYPE_RECEIVE)) ++ /* check type bits */ ++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_RECEIVE) + goto abort_request; +- } + + reply_port = (ipc_port_t) entry->ie_object; + assert(reply_port != IP_NULL); +@@ -853,9 +818,6 @@ ipc_kmsg_copyin_header( + } + + case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): { +- mach_port_index_t index; +- mach_port_gen_t gen; +- ipc_entry_t table; + ipc_entry_t entry; + ipc_entry_bits_t bits; + ipc_port_t dest_port; +@@ -869,24 +831,13 @@ ipc_kmsg_copyin_header( + if (!space->is_active) + goto abort_reply; + +- /* optimized ipc_entry_lookup */ +- +- table = space->is_table; +- +- index = MACH_PORT_INDEX(dest_name); +- gen = MACH_PORT_GEN(dest_name); +- +- if (index >= space->is_table_size) ++ entry = ipc_entry_lookup (space, dest_name); ++ if (entry == IE_NULL) + goto abort_reply; +- +- entry = &table[index]; + bits = entry->ie_bits; + +- /* check generation number, collision bit, and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK|IE_BITS_COLLISION| +- MACH_PORT_TYPE_SEND_ONCE)) != +- (gen | MACH_PORT_TYPE_SEND_ONCE)) ++ /* check and type bits */ ++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND_ONCE) + goto abort_reply; + + /* optimized ipc_right_copyin */ +@@ -910,12 +861,8 @@ ipc_kmsg_copyin_header( + assert(dest_port->ip_sorights > 0); + ip_unlock(dest_port); + +- /* optimized ipc_entry_dealloc */ +- +- entry->ie_next = table->ie_next; +- table->ie_next = index; +- entry->ie_bits = gen; + entry->ie_object = IO_NULL; ++ ipc_entry_dealloc (space, dest_name, entry); + is_write_unlock(space); + + msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) | +@@ -1814,8 +1761,6 @@ ipc_kmsg_copyout_header( + + case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, + MACH_MSG_TYPE_PORT_SEND_ONCE): { +- ipc_entry_t table; +- mach_port_index_t index; + ipc_entry_t entry; + ipc_port_t reply = (ipc_port_t) msg->msgh_local_port; + mach_port_t dest_name, reply_name; +@@ -1827,8 +1772,7 @@ ipc_kmsg_copyout_header( + break; + + is_write_lock(space); +- if (!space->is_active || +- ((index = (table = space->is_table)->ie_next) == 0)) { ++ if (!space->is_active || space->is_table->ie_next == 0) { + is_write_unlock(space); + break; + } +@@ -1858,11 +1802,14 @@ ipc_kmsg_copyout_header( + assert(reply->ip_sorights > 0); + ip_unlock(reply); + +- /* optimized ipc_entry_get */ +- +- entry = &table[index]; +- table->ie_next = entry->ie_next; +- entry->ie_request = 0; ++ kern_return_t kr; ++ kr = ipc_entry_get (space, &reply_name, &entry); ++ if (kr) { ++ ip_unlock(reply); ++ ip_unlock(dest); ++ is_write_unlock(space); ++ break; ++ } + + { + mach_port_gen_t gen; +@@ -1870,8 +1817,6 @@ ipc_kmsg_copyout_header( + assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0); + gen = entry->ie_bits + IE_BITS_GEN_ONE; + +- reply_name = MACH_PORT_MAKE(index, gen); +- + /* optimized ipc_right_copyout */ + + entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1); +diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c +index 1e122c7..5fb9b53 100644 +--- a/ipc/mach_msg.c ++++ b/ipc/mach_msg.c +@@ -482,16 +482,7 @@ mach_msg_trap( + switch (kmsg->ikm_header.msgh_bits) { + case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, + MACH_MSG_TYPE_MAKE_SEND_ONCE): { +- ipc_entry_t table; +- ipc_entry_num_t size; + ipc_port_t reply_port; +- +- /* sending a request message */ +- +- { +- mach_port_index_t index; +- mach_port_gen_t gen; +- + { + mach_port_t reply_name = + kmsg->ikm_header.msgh_local_port; +@@ -499,68 +490,30 @@ mach_msg_trap( + if (reply_name != rcv_name) + goto slow_copyin; + +- /* optimized ipc_entry_lookup of reply_name */ +- +- index = MACH_PORT_INDEX(reply_name); +- gen = MACH_PORT_GEN(reply_name); +- } +- + is_read_lock(space); + assert(space->is_active); + +- size = space->is_table_size; +- table = space->is_table; +- +- if (index >= size) +- goto abort_request_copyin; +- +- { + ipc_entry_t entry; +- ipc_entry_bits_t bits; +- +- entry = &table[index]; +- bits = entry->ie_bits; +- +- /* check generation number and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK| +- MACH_PORT_TYPE_RECEIVE)) != +- (gen | MACH_PORT_TYPE_RECEIVE)) ++ entry = ipc_entry_lookup (space, reply_name); ++ if (entry == IE_NULL) + goto abort_request_copyin; +- + reply_port = (ipc_port_t) entry->ie_object; + assert(reply_port != IP_NULL); + } +- } +- +- /* optimized ipc_entry_lookup of dest_name */ +- +- { +- mach_port_index_t index; +- mach_port_gen_t gen; + + { + mach_port_t dest_name = + kmsg->ikm_header.msgh_remote_port; + +- index = MACH_PORT_INDEX(dest_name); +- gen = MACH_PORT_GEN(dest_name); +- } +- +- if (index >= size) +- goto abort_request_copyin; +- +- { + ipc_entry_t entry; + ipc_entry_bits_t bits; +- +- entry = &table[index]; ++ entry = ipc_entry_lookup (space, dest_name); ++ if (entry == IE_NULL) ++ goto abort_request_copyin; + bits = entry->ie_bits; + +- /* check generation number and type bit */ +- +- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != +- (gen | MACH_PORT_TYPE_SEND)) ++ /* check type bits */ ++ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND) + goto abort_request_copyin; + + assert(IE_BITS_UREFS(bits) > 0); +@@ -568,7 +521,6 @@ mach_msg_trap( + dest_port = (ipc_port_t) entry->ie_object; + assert(dest_port != IP_NULL); + } +- } + + /* + * To do an atomic copyin, need simultaneous +@@ -649,9 +601,6 @@ mach_msg_trap( + } + + case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): { +- ipc_entry_num_t size; +- ipc_entry_t table; +- + /* sending a reply message */ + + { +@@ -665,35 +614,18 @@ mach_msg_trap( + is_write_lock(space); + assert(space->is_active); + +- /* optimized ipc_entry_lookup */ +- +- size = space->is_table_size; +- table = space->is_table; +- + { + ipc_entry_t entry; +- mach_port_gen_t gen; +- mach_port_index_t index; +- +- { + mach_port_t dest_name = + kmsg->ikm_header.msgh_remote_port; + +- index = MACH_PORT_INDEX(dest_name); +- gen = MACH_PORT_GEN(dest_name); +- } +- +- if (index >= size) ++ entry = ipc_entry_lookup (space, dest_name); ++ if (entry == IE_NULL) + goto abort_reply_dest_copyin; + +- entry = &table[index]; +- +- /* check generation, collision bit, and type bit */ +- +- if ((entry->ie_bits & (IE_BITS_GEN_MASK| +- IE_BITS_COLLISION| +- MACH_PORT_TYPE_SEND_ONCE)) != +- (gen | MACH_PORT_TYPE_SEND_ONCE)) ++ /* check type bits */ ++ if (IE_BITS_TYPE (entry->ie_bits) != ++ MACH_PORT_TYPE_SEND_ONCE) + goto abort_reply_dest_copyin; + + /* optimized ipc_right_copyin */ +@@ -716,13 +648,8 @@ mach_msg_trap( + } + + assert(dest_port->ip_sorights > 0); +- +- /* optimized ipc_entry_dealloc */ +- +- entry->ie_next = table->ie_next; +- table->ie_next = index; +- entry->ie_bits = gen; + entry->ie_object = IO_NULL; ++ ipc_entry_dealloc (space, dest_name, entry); + } + + kmsg->ikm_header.msgh_bits = +@@ -735,31 +662,16 @@ mach_msg_trap( + + assert(dest_port->ip_receiver != ipc_space_kernel); + +- /* optimized ipc_entry_lookup/ipc_mqueue_copyin */ ++ /* optimized ipc_mqueue_copyin */ + + { + ipc_entry_t entry; + ipc_entry_bits_t bits; +- +- { +- mach_port_index_t index; +- mach_port_gen_t gen; +- +- index = MACH_PORT_INDEX(rcv_name); +- gen = MACH_PORT_GEN(rcv_name); +- +- if (index >= size) ++ entry = ipc_entry_lookup (space, rcv_name); ++ if (entry == IE_NULL) + goto abort_reply_rcv_copyin; +- +- entry = &table[index]; + bits = entry->ie_bits; + +- /* check generation number */ +- +- if ((bits & IE_BITS_GEN_MASK) != gen) +- goto abort_reply_rcv_copyin; +- } +- + /* check type bits; looking for receive or set */ + + if (bits & MACH_PORT_TYPE_PORT_SET) { +@@ -1072,21 +984,12 @@ mach_msg_trap( + ip_unlock(reply_port); + + { +- ipc_entry_t table; + ipc_entry_t entry; +- mach_port_index_t index; +- +- /* optimized ipc_entry_get */ +- +- table = space->is_table; +- index = table->ie_next; +- +- if (index == 0) ++ kern_return_t kr; ++ kr = ipc_entry_get (space, &reply_name, &entry); ++ if (kr) + goto abort_request_copyout; +- +- entry = &table[index]; +- table->ie_next = entry->ie_next; +- entry->ie_request = 0; ++ assert (entry != NULL); + + { + mach_port_gen_t gen; +@@ -1094,8 +997,6 @@ mach_msg_trap( + assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0); + gen = entry->ie_bits + IE_BITS_GEN_ONE; + +- reply_name = MACH_PORT_MAKE(index, gen); +- + /* optimized ipc_right_copyout */ + + entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1); +diff --git a/kern/exception.c b/kern/exception.c +index 7954fba..6e84c0a 100644 +--- a/kern/exception.c ++++ b/kern/exception.c +@@ -603,30 +603,18 @@ exception_raise( + ip_unlock(reply_port); + + { +- ipc_entry_t table; ++ kern_return_t kr; + ipc_entry_t entry; +- mach_port_index_t index; +- +- /* optimized ipc_entry_get */ +- +- table = space->is_table; +- index = table->ie_next; + +- if (index == 0) ++ kr = ipc_entry_get (space, &exc->Head.msgh_remote_port, &entry); ++ if (kr) + goto abort_copyout; +- +- entry = &table[index]; +- table->ie_next = entry->ie_next; +- entry->ie_request = 0; +- + { + mach_port_gen_t gen; + + assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0); + gen = entry->ie_bits + IE_BITS_GEN_ONE; + +- exc->Head.msgh_remote_port = MACH_PORT_MAKE(index, gen); +- + /* optimized ipc_right_copyout */ + + entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1); +diff --git a/kern/ipc_mig.c b/kern/ipc_mig.c +index cc61ec7..22dac42 100644 +--- a/kern/ipc_mig.c ++++ b/kern/ipc_mig.c +@@ -310,16 +310,18 @@ mig_strncpy(dest, src, len) + MACRO_BEGIN \ + ipc_space_t space = current_space(); \ + ipc_entry_t entry; \ +- mach_port_index_t index = MACH_PORT_INDEX(name); \ + \ + is_read_lock(space); \ + assert(space->is_active); \ + \ +- if ((index >= space->is_table_size) || \ +- (((entry = &space->is_table[index])->ie_bits & \ +- (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != \ +- (MACH_PORT_GEN(name) | MACH_PORT_TYPE_SEND))) { \ +- is_read_unlock(space); \ ++ entry = ipc_entry_lookup (space, name); \ ++ if (entry == IE_NULL) { \ ++ is_read_unlock (space); \ ++ abort; \ ++ } \ ++ \ ++ if (IE_BITS_TYPE (entry->ie_bits) != MACH_PORT_TYPE_SEND) { \ ++ is_read_unlock (space); \ + abort; \ + } \ + \ +-- +2.1.4 + diff --git a/debian/patches/0003-XXX-Unused-constant.patch b/debian/patches/0003-XXX-Unused-constant.patch new file mode 100644 index 0000000..5838e30 --- /dev/null +++ b/debian/patches/0003-XXX-Unused-constant.patch @@ -0,0 +1,24 @@ +From 3e1ab8a2956ea240a12c8efa15ccb5db0023da9d Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Mon, 16 Mar 2015 17:02:17 +0100 +Subject: [PATCH gnumach 3/4] XXX: Unused constant + +--- + ipc/ipc_entry.h | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h +index cb6d3f9..f5251f7 100644 +--- a/ipc/ipc_entry.h ++++ b/ipc/ipc_entry.h +@@ -98,6 +98,7 @@ typedef struct ipc_entry { + + #define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */ + ++/* XXX: Unused. */ + #define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */ + + #define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */ +-- +2.1.4 + diff --git a/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch b/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch new file mode 100644 index 0000000..e34145b --- /dev/null +++ b/debian/patches/0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch @@ -0,0 +1,380 @@ +From e95526191e43dbe32dba8cd3d5cb95cd468791c8 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Wed, 18 Mar 2015 12:25:26 +0100 +Subject: [PATCH gnumach 4/4] ipc: replace reverse hash table with a radix tree + +* ipc/ipc_entry.c +* ipc/ipc_hash.c +* ipc/ipc_space.c +* ipc/ipc_space.h +--- + ipc/ipc_entry.c | 5 +- + ipc/ipc_hash.c | 184 ++++++++------------------------------------------------ + ipc/ipc_space.c | 3 + + ipc/ipc_space.h | 57 ++++++++++++++++++ + 4 files changed, 87 insertions(+), 162 deletions(-) + +diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c +index e78f74e..5b9fd98 100644 +--- a/ipc/ipc_entry.c ++++ b/ipc/ipc_entry.c +@@ -618,6 +618,7 @@ ipc_entry_grow_table(ipc_space_t space) + is_write_unlock(space); + thread_wakeup((event_t) space); + it_entries_free(its, table); ++ ipc_reverse_remove_all(space); + is_write_lock(space); + return KERN_SUCCESS; + } +@@ -641,9 +642,6 @@ ipc_entry_grow_table(ipc_space_t space) + memcpy(table, otable, + osize * sizeof(struct ipc_entry)); + +- for (i = 0; i < osize; i++) +- table[i].ie_index = 0; +- + (void) memset((void *) (table + osize), 0, + (size - osize) * sizeof(struct ipc_entry)); + +@@ -651,6 +649,7 @@ ipc_entry_grow_table(ipc_space_t space) + * Put old entries into the reverse hash table. + */ + ++ ipc_reverse_remove_all(space); + for (i = 0; i < osize; i++) { + ipc_entry_t entry = &table[i]; + +diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c +index c2c6d6e..87952a7 100644 +--- a/ipc/ipc_hash.c ++++ b/ipc/ipc_hash.c +@@ -296,37 +296,19 @@ ipc_hash_global_delete( + } + + /* +- * Each space has a local reverse hash table, which holds +- * entries from the space's table. In fact, the hash table +- * just uses a field (ie_index) in the table itself. +- * +- * The local hash table is an open-addressing hash table, +- * which means that when a collision occurs, instead of +- * throwing the entry into a bucket, the entry is rehashed +- * to another position in the table. In this case the rehash +- * is very simple: linear probing (ie, just increment the position). +- * This simple rehash makes deletions tractable (they're still a pain), +- * but it means that collisions tend to build up into clumps. +- * +- * Because at least one entry in the table (index 0) is always unused, +- * there will always be room in the reverse hash table. If a table +- * with n slots gets completely full, the reverse hash table will +- * have one giant clump of n-1 slots and one free slot somewhere. +- * Because entries are only entered into the reverse table if they +- * are pure send rights (not receive, send-once, port-set, +- * or dead-name rights), and free entries of course aren't entered, +- * I expect the reverse hash table won't get unreasonably full. +- * +- * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2, +- * pp. 135-142.) may be desirable here. They can dramatically help +- * unsuccessful lookups. But unsuccessful lookups are almost always +- * followed by insertions, and those slow down somewhat. They +- * also can help deletions somewhat. Successful lookups aren't affected. +- * So possibly a small win; probably nothing significant. ++ * Each space has a local reverse mapping from objects to entries ++ * from the space's table. This used to be a hash table. + */ + +-#define IH_LOCAL_HASH(obj, size) \ +- ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) & (size - 1)) ++#define IPC_LOCAL_HASH_INVARIANT(S, O, N, E) \ ++ MACRO_BEGIN \ ++ assert(IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND || \ ++ IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_SEND_RECEIVE || \ ++ IE_BITS_TYPE((E)->ie_bits) == MACH_PORT_TYPE_NONE); \ ++ assert((E)->ie_object == (O)); \ ++ assert((E)->ie_index == (N)); \ ++ assert(&(S)->is_table[N] == (E)); \ ++ MACRO_END + + /* + * Routine: ipc_hash_local_lookup +@@ -345,37 +327,15 @@ ipc_hash_local_lookup( + mach_port_t *namep, + ipc_entry_t *entryp) + { +- ipc_entry_t table; +- ipc_entry_num_t size; +- mach_port_index_t hindex, index; +- + assert(space != IS_NULL); + assert(obj != IO_NULL); + +- table = space->is_table; +- size = space->is_table_size; +- hindex = IH_LOCAL_HASH(obj, size); +- +- /* +- * Ideally, table[hindex].ie_index is the name we want. +- * However, must check ie_object to verify this, +- * because collisions can happen. In case of a collision, +- * search farther along in the clump. +- */ +- +- while ((index = table[hindex].ie_index) != 0) { +- ipc_entry_t entry = &table[index]; +- +- if (entry->ie_object == obj) { +- *namep = MACH_PORT_MAKEB(index, entry->ie_bits); +- *entryp = entry; +- return TRUE; +- } +- +- if (++hindex == size) +- hindex = 0; ++ *entryp = ipc_reverse_lookup(space, obj); ++ if (*entryp != IE_NULL) { ++ *namep = (*entryp)->ie_index; ++ IPC_LOCAL_HASH_INVARIANT(space, obj, *namep, *entryp); ++ return TRUE; + } +- + return FALSE; + } + +@@ -394,33 +354,15 @@ ipc_hash_local_insert( + mach_port_index_t index, + ipc_entry_t entry) + { +- ipc_entry_t table; +- ipc_entry_num_t size; +- mach_port_index_t hindex; +- ++ kern_return_t kr; + assert(index != 0); + assert(space != IS_NULL); + assert(obj != IO_NULL); + +- table = space->is_table; +- size = space->is_table_size; +- hindex = IH_LOCAL_HASH(obj, size); +- +- assert(entry == &table[index]); +- assert(entry->ie_object == obj); +- +- /* +- * We want to insert at hindex, but there may be collisions. +- * If a collision occurs, search for the end of the clump +- * and insert there. +- */ +- +- while (table[hindex].ie_index != 0) { +- if (++hindex == size) +- hindex = 0; +- } +- +- table[hindex].ie_index = index; ++ entry->ie_index = index; ++ IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry); ++ kr = ipc_reverse_insert(space, obj, entry); ++ assert(kr == 0); + } + + /* +@@ -438,90 +380,14 @@ ipc_hash_local_delete( + mach_port_index_t index, + ipc_entry_t entry) + { +- ipc_entry_t table; +- ipc_entry_num_t size; +- mach_port_index_t hindex, dindex; +- ++ ipc_entry_t removed; + assert(index != MACH_PORT_NULL); + assert(space != IS_NULL); + assert(obj != IO_NULL); + +- table = space->is_table; +- size = space->is_table_size; +- hindex = IH_LOCAL_HASH(obj, size); +- +- assert(entry == &table[index]); +- assert(entry->ie_object == obj); +- +- /* +- * First check we have the right hindex for this index. +- * In case of collision, we have to search farther +- * along in this clump. +- */ +- +- while (table[hindex].ie_index != index) { +- if (table[hindex].ie_index == 0) +- { +- static int gak = 0; +- if (gak == 0) +- { +- printf("gak! entry wasn't in hash table!\n"); +- gak = 1; +- } +- return; +- } +- if (++hindex == size) +- hindex = 0; +- } +- +- /* +- * Now we want to set table[hindex].ie_index = 0. +- * But if we aren't the last index in a clump, +- * this might cause problems for lookups of objects +- * farther along in the clump that are displaced +- * due to collisions. Searches for them would fail +- * at hindex instead of succeeding. +- * +- * So we must check the clump after hindex for objects +- * that are so displaced, and move one up to the new hole. +- * +- * hindex - index of new hole in the clump +- * dindex - index we are checking for a displaced object +- * +- * When we move a displaced object up into the hole, +- * it creates a new hole, and we have to repeat the process +- * until we get to the end of the clump. +- */ +- +- for (dindex = hindex; index != 0; hindex = dindex) { +- for (;;) { +- mach_port_index_t tindex; +- ipc_object_t tobj; +- +- if (++dindex == size) +- dindex = 0; +- assert(dindex != hindex); +- +- /* are we at the end of the clump? */ +- +- index = table[dindex].ie_index; +- if (index == 0) +- break; +- +- /* is this a displaced object? */ +- +- tobj = table[index].ie_object; +- assert(tobj != IO_NULL); +- tindex = IH_LOCAL_HASH(tobj, size); +- +- if ((dindex < hindex) ? +- ((dindex < tindex) && (tindex <= hindex)) : +- ((dindex < tindex) || (tindex <= hindex))) +- break; +- } +- +- table[hindex].ie_index = index; +- } ++ IPC_LOCAL_HASH_INVARIANT(space, obj, index, entry); ++ removed = ipc_reverse_remove(space, obj); ++ assert(removed == entry); + } + + /* +diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c +index ab55e83..dcc96b3 100644 +--- a/ipc/ipc_space.c ++++ b/ipc/ipc_space.c +@@ -148,6 +148,7 @@ ipc_space_create( + space->is_tree_total = 0; + space->is_tree_small = 0; + space->is_tree_hash = 0; ++ rdxtree_init(&space->is_reverse_map); + + *spacep = space; + return KERN_SUCCESS; +@@ -271,6 +272,8 @@ ipc_space_destroy( + } + ipc_splay_traverse_finish(&space->is_tree); + ++ rdxtree_remove_all(&space->is_reverse_map); ++ + /* + * Because the space is now dead, + * we must release the "active" reference for it. +diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h +index c4683d2..51c093b 100644 +--- a/ipc/ipc_space.h ++++ b/ipc/ipc_space.h +@@ -44,6 +44,7 @@ + #include <mach/mach_types.h> + #include <kern/macro_help.h> + #include <kern/lock.h> ++#include <kern/rdxtree.h> + #include <kern/slab.h> + #include <ipc/ipc_splay.h> + #include <ipc/ipc_types.h> +@@ -79,6 +80,8 @@ struct ipc_space { + ipc_entry_num_t is_tree_total; /* number of entries in the tree */ + ipc_entry_num_t is_tree_small; /* # of small entries in the tree */ + ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */ ++ struct rdxtree is_reverse_map; /* maps objects to entries */ ++ + }; + + #define IS_NULL ((ipc_space_t) 0) +@@ -135,4 +138,58 @@ kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *); + kern_return_t ipc_space_create_special(struct ipc_space **); + void ipc_space_destroy(struct ipc_space *); + ++/* Reverse lookups. */ ++ ++/* Cast a pointer to a suitable key. */ ++#define KEY(X) ((unsigned long long) (unsigned long) (X)) ++ ++/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must ++ be write-locked. */ ++static inline kern_return_t ++ipc_reverse_insert(ipc_space_t space, ++ ipc_object_t obj, ++ ipc_entry_t entry) ++{ ++ assert(space != IS_NULL); ++ assert(obj != IO_NULL); ++ return (kern_return_t) rdxtree_insert(&space->is_reverse_map, ++ KEY(obj), entry); ++} ++ ++/* Remove OBJ from the reverse mapping. SPACE must be ++ write-locked. */ ++static inline ipc_entry_t ++ipc_reverse_remove(ipc_space_t space, ++ ipc_object_t obj) ++{ ++ assert(space != IS_NULL); ++ assert(obj != IO_NULL); ++ return rdxtree_remove(&space->is_reverse_map, KEY(obj)); ++} ++ ++/* Remove all entries from the reverse mapping. SPACE must be ++ write-locked. */ ++static inline void ++ipc_reverse_remove_all(ipc_space_t space) ++{ ++ assert(space != IS_NULL); ++ rdxtree_remove_all(&space->is_reverse_map); ++ assert(space->is_reverse_map.height == 0); ++ assert(space->is_reverse_map.root == NULL); ++} ++ ++/* Return ENTRY related to OBJ, or NULL if no such entry is found in ++ the reverse mapping. SPACE must be read-locked or ++ write-locked. */ ++static inline ipc_entry_t ++ipc_reverse_lookup(ipc_space_t space, ++ ipc_object_t obj) ++{ ++ assert(space != IS_NULL); ++ assert(obj != IO_NULL); ++ return rdxtree_lookup(&space->is_reverse_map, KEY(obj)); ++} ++ ++#undef KEY ++ + #endif /* _IPC_IPC_SPACE_H_ */ +-- +2.1.4 + diff --git a/debian/patches/series b/debian/patches/series index 530b368..dcd851b 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -8,3 +8,7 @@ Add-some-padding-to-make-objects-fit-a-single-cache-.patch vm_cache_policy.patch vm_page_cleanq.patch task-load.patch +0001-kern-add-radix-tree-library.patch +0002-ipc-undo-manual-inlining-of-ipc_entry_X-functions.patch +0003-XXX-Unused-constant.patch +0004-ipc-replace-reverse-hash-table-with-a-radix-tree.patch |
