summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2014-05-29 15:04:27 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2014-05-29 15:04:27 +0200
commit85d26d010c0e27888f070418e293679fafe4b915 (patch)
tree6cefb247905f6b6ad39891ad0cdb03f05c8db9a3
parent05d82e6d32e1305e5a9c1903410ee0e616c2c2b4 (diff)
add patch series
-rw-r--r--debian/patches/0001-libdiskfs-add-diskfs_make_node_alloc-to-allocate-fat.patch149
-rw-r--r--debian/patches/0002-libnetfs-add-netfs_make_node_alloc-to-allocate-fat-n.patch138
-rw-r--r--debian/patches/0003-trans-fakeroot-use-fat-nodes-to-simplify-the-node-ca.patch149
-rw-r--r--debian/patches/0004-trans-fakeroot-use-netfs_node_netnode-instead-of-np-.patch386
-rw-r--r--debian/patches/0005-libports-use-a-global-hash-table-for-the-lookups.patch668
-rw-r--r--debian/patches/0006-libports-lock-less-reference-counting-for-port_info-.patch344
-rw-r--r--debian/patches/0007-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch210
-rw-r--r--debian/patches/0008-fatfs-use-a-seperate-lock-to-protect-nodehash.patch253
-rw-r--r--debian/patches/0009-isofs-use-a-seperate-lock-to-protect-node_cache.patch229
-rw-r--r--debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch298
-rw-r--r--debian/patches/0011-libdiskfs-lock-less-reference-counting-of-nodes.patch531
-rw-r--r--debian/patches/0012-tmpfs-use-a-thread-timeout.patch36
-rw-r--r--debian/patches/0013-libdiskfs-remove-the-statistics-code-from-the-name-c.patch147
-rw-r--r--debian/patches/0014-libdiskfs-use-a-hash-table-for-the-name-cache.patch451
-rw-r--r--debian/patches/series14
15 files changed, 4003 insertions, 0 deletions
diff --git a/debian/patches/0001-libdiskfs-add-diskfs_make_node_alloc-to-allocate-fat.patch b/debian/patches/0001-libdiskfs-add-diskfs_make_node_alloc-to-allocate-fat.patch
new file mode 100644
index 00000000..6a873cdf
--- /dev/null
+++ b/debian/patches/0001-libdiskfs-add-diskfs_make_node_alloc-to-allocate-fat.patch
@@ -0,0 +1,149 @@
+From 80485401a9a5e9df03bd3a1503bc5e59d1f2e5c1 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 16 May 2014 23:06:33 +0200
+Subject: [PATCH 01/14] libdiskfs: add diskfs_make_node_alloc to allocate fat
+ nodes
+
+libdiskfs has two kind of nodes, struct node and struct netnode.
+struct node is used to store libdiskfs specific data, while struct
+netnode contains user supplied data. Previously, both objects were
+allocated separatly, and a pointer from the node to the netnode
+provided a mapping from the former to the latter.
+
+Provide a function diskfs_make_node_alloc that allocates both nodes in
+a contiguous region.
+
+This reduces the memory allocation overhead when creating nodes. It
+also makes the relation between node and netnode a simple offset
+calculation. Provide two functions to compute the netnode address
+from the node address and vice-versa.
+
+Most notably, this makes implementing a cache on top of libdiskfs
+easier. Auxiliary data for the cache can be stored in the
+user-defined netnode, and the fat node can be used as the value.
+
+* libdiskfs/node-make.c (init_node): Move initialization here.
+(diskfs_make_node): Use init_node.
+(diskfs_make_node_alloc): New function to allocate fat nodes.
+* libdiskfs/diskfs.h (diskfs_make_node_alloc): New declaration.
+(_diskfs_sizeof_struct_node): Likewise.
+(diskfs_node_disknode): Compute disknode address from node address.
+(diskfs_disknode_node): And vice-versa.
+* libdiskfs/init-init.c (_diskfs_sizeof_struct_node): New variable.
+---
+ libdiskfs/diskfs.h | 27 +++++++++++++++++++++++++++
+ libdiskfs/init-init.c | 4 ++++
+ libdiskfs/node-make.c | 39 ++++++++++++++++++++++++++++++---------
+ 3 files changed, 61 insertions(+), 9 deletions(-)
+
+diff --git a/libdiskfs/diskfs.h b/libdiskfs/diskfs.h
+index ae1a150..2c68aa3 100644
+--- a/libdiskfs/diskfs.h
++++ b/libdiskfs/diskfs.h
+@@ -686,6 +686,33 @@ diskfs_notice_filechange (struct node *np, enum file_changed_type type,
+ The new node will have one hard reference and no light references. */
+ struct node *diskfs_make_node (struct disknode *dn);
+
++/* Create a new node structure. Also allocate SIZE bytes for the
++ disknode. The address of the disknode can be obtained using
++ diskfs_node_disknode. The new node will have one hard reference
++ and no light references. */
++struct node *diskfs_make_node_alloc (size_t size);
++
++/* To avoid breaking the ABI whenever sizeof (struct node) changes, we
++ explicitly provide the size. The following two functions will use
++ this value for offset calculations. */
++extern const size_t _diskfs_sizeof_struct_node;
++
++/* Return the address of the disknode for NODE. NODE must have been
++ allocated using diskfs_make_node_alloc. */
++static inline struct disknode *
++diskfs_node_disknode (struct node *node)
++{
++ return (struct disknode *) ((char *) node + _diskfs_sizeof_struct_node);
++}
++
++/* Return the address of the node for DISKNODE. DISKNODE must have
++ been allocated using diskfs_make_node_alloc. */
++static inline struct node *
++diskfs_disknode_node (struct disknode *disknode)
++{
++ return (struct node *) ((char *) disknode - _diskfs_sizeof_struct_node);
++}
++
+
+ /* The library also exports the following functions; they are not generally
+ useful unless you are redefining other functions the library provides. */
+diff --git a/libdiskfs/init-init.c b/libdiskfs/init-init.c
+index 35be7ed..7a7f248 100644
+--- a/libdiskfs/init-init.c
++++ b/libdiskfs/init-init.c
+@@ -25,6 +25,10 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+ #include <stdio.h>
+ #include <maptime.h>
+
++/* For safe inlining of diskfs_node_disknode and
++ diskfs_disknode_node. */
++size_t const _diskfs_sizeof_struct_node = sizeof (struct node);
++
+ mach_port_t diskfs_default_pager;
+ mach_port_t diskfs_auth_server_port;
+ volatile struct mapped_time_value *diskfs_mtime;
+diff --git a/libdiskfs/node-make.c b/libdiskfs/node-make.c
+index 2b6ef2a..ff0cc0d 100644
+--- a/libdiskfs/node-make.c
++++ b/libdiskfs/node-make.c
+@@ -19,16 +19,9 @@
+ #include <fcntl.h>
+
+
+-/* Create a and return new node structure with DN as its physical disknode.
+- The node will have one hard reference and no light references. */
+-struct node *
+-diskfs_make_node (struct disknode *dn)
++static struct node *
++init_node (struct node *np, struct disknode *dn)
+ {
+- struct node *np = malloc (sizeof (struct node));
+-
+- if (np == 0)
+- return 0;
+-
+ np->dn = dn;
+ np->dn_set_ctime = 0;
+ np->dn_set_atime = 0;
+@@ -52,3 +45,31 @@ diskfs_make_node (struct disknode *dn)
+
+ return np;
+ }
++
++/* Create a and return new node structure with DN as its physical disknode.
++ The node will have one hard reference and no light references. */
++struct node *
++diskfs_make_node (struct disknode *dn)
++{
++ struct node *np = malloc (sizeof (struct node));
++
++ if (np == 0)
++ return 0;
++
++ return init_node (np, dn);
++}
++
++/* Create a new node structure. Also allocate SIZE bytes for the
++ disknode. The address of the disknode can be obtained using
++ diskfs_node_disknode. The new node will have one hard reference
++ and no light references. */
++struct node *
++diskfs_make_node_alloc (size_t size)
++{
++ struct node *np = malloc (sizeof (struct node) + size);
++
++ if (np == NULL)
++ return NULL;
++
++ return init_node (np, diskfs_node_disknode (np));
++}
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0002-libnetfs-add-netfs_make_node_alloc-to-allocate-fat-n.patch b/debian/patches/0002-libnetfs-add-netfs_make_node_alloc-to-allocate-fat-n.patch
new file mode 100644
index 00000000..a760b7ff
--- /dev/null
+++ b/debian/patches/0002-libnetfs-add-netfs_make_node_alloc-to-allocate-fat-n.patch
@@ -0,0 +1,138 @@
+From 94fecd72f41542c8dfa82bdf7b47742f8c29b321 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sun, 18 May 2014 13:34:12 +0200
+Subject: [PATCH 02/14] libnetfs: add netfs_make_node_alloc to allocate fat
+ nodes
+
+libnetfs has two kind of nodes, struct node and struct netnode.
+struct node is used to store libnetfs specific data, while struct
+netnode contains user supplied data. Previously, both objects were
+allocated separatly, and a pointer from the node to the netnode
+provided a mapping from the former to the latter.
+
+Provide a function netfs_make_node_alloc that allocates both nodes in
+a contiguous region.
+
+This reduces the memory allocation overhead when creating nodes. It
+also makes the relation between node and netnode a simple offset
+calculation. Provide two functions to compute the netnode address
+from the node address and vice-versa.
+
+Most notably, this makes implementing a cache on top of libnetfs
+easier. Auxiliary data for the cache can be stored in the
+user-defined netnode, and the fat node can be used as the value.
+
+* libnetfs/make-node.c (init_node): Move initialization here.
+(netfs_make_node): Use init_node.
+(netfs_make_node_alloc): New function to allocate fat nodes.
+* libnetfs/netfs.h (netfs_make_node_alloc): New declaration.
+(_netfs_sizeof_struct_node): Likewise.
+(netfs_node_netnode): Compute netnode address from node address.
+(netfs_netnode_node): And vice-versa.
+* libnetfs/init-init.c (_netfs_sizeof_struct_node): New variable.
+---
+ libnetfs/init-init.c | 3 +++
+ libnetfs/make-node.c | 29 +++++++++++++++++++++++------
+ libnetfs/netfs.h | 27 +++++++++++++++++++++++++++
+ 3 files changed, 53 insertions(+), 6 deletions(-)
+
+diff --git a/libnetfs/init-init.c b/libnetfs/init-init.c
+index e98b656..a088ad5 100644
+--- a/libnetfs/init-init.c
++++ b/libnetfs/init-init.c
+@@ -21,6 +21,9 @@
+
+ #include "netfs.h"
+
++/* For safe inlining of netfs_node_netnode and netfs_netnode_node. */
++size_t const _netfs_sizeof_struct_node = sizeof (struct node);
++
+ pthread_spinlock_t netfs_node_refcnt_lock = PTHREAD_SPINLOCK_INITIALIZER;
+
+ struct node *netfs_root_node = 0;
+diff --git a/libnetfs/make-node.c b/libnetfs/make-node.c
+index f20ada1..6bd8109 100644
+--- a/libnetfs/make-node.c
++++ b/libnetfs/make-node.c
+@@ -21,13 +21,9 @@
+ #include "netfs.h"
+ #include <hurd/fshelp.h>
+
+-struct node *
+-netfs_make_node (struct netnode *nn)
++static struct node *
++init_node (struct node *np, struct netnode *nn)
+ {
+- struct node *np = malloc (sizeof (struct node));
+- if (! np)
+- return NULL;
+-
+ np->nn = nn;
+
+ pthread_mutex_init (&np->lock, NULL);
+@@ -40,3 +36,24 @@ netfs_make_node (struct netnode *nn)
+
+ return np;
+ }
++
++struct node *
++netfs_make_node (struct netnode *nn)
++{
++ struct node *np = malloc (sizeof (struct node));
++ if (! np)
++ return NULL;
++
++ return init_node (np, nn);
++}
++
++struct node *
++netfs_make_node_alloc (size_t size)
++{
++ struct node *np = malloc (sizeof (struct node) + size);
++
++ if (np == NULL)
++ return NULL;
++
++ return init_node (np, netfs_node_netnode (np));
++}
+diff --git a/libnetfs/netfs.h b/libnetfs/netfs.h
+index aef4a3d..fbe2c60 100644
+--- a/libnetfs/netfs.h
++++ b/libnetfs/netfs.h
+@@ -372,6 +372,33 @@ extern int netfs_maxsymlinks;
+ If an error occurs, NULL is returned. */
+ struct node *netfs_make_node (struct netnode *);
+
++/* Create a new node structure. Also allocate SIZE bytes for the
++ netnode. The address of the netnode can be obtained using
++ netfs_node_netnode. The new node will have one hard reference and
++ no light references. If an error occurs, NULL is returned. */
++struct node *netfs_make_node_alloc (size_t size);
++
++/* To avoid breaking the ABI whenever sizeof (struct node) changes, we
++ explicitly provide the size. The following two functions will use
++ this value for offset calculations. */
++extern const size_t _netfs_sizeof_struct_node;
++
++/* Return the address of the netnode for NODE. NODE must have been
++ allocated using netfs_make_node_alloc. */
++static inline struct netnode *
++netfs_node_netnode (struct node *node)
++{
++ return (struct netnode *) ((char *) node + _netfs_sizeof_struct_node);
++}
++
++/* Return the address of the node for NETNODE. NETNODE must have been
++ allocated using netfs_make_node_alloc. */
++static inline struct node *
++netfs_netnode_node (struct netnode *netnode)
++{
++ return (struct node *) ((char *) netnode - _netfs_sizeof_struct_node);
++}
++
+ /* Whenever node->references is to be touched, this lock must be
+ held. Cf. netfs_nrele, netfs_nput, netfs_nref and netfs_drop_node. */
+ extern pthread_spinlock_t netfs_node_refcnt_lock;
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0003-trans-fakeroot-use-fat-nodes-to-simplify-the-node-ca.patch b/debian/patches/0003-trans-fakeroot-use-fat-nodes-to-simplify-the-node-ca.patch
new file mode 100644
index 00000000..2f106fba
--- /dev/null
+++ b/debian/patches/0003-trans-fakeroot-use-fat-nodes-to-simplify-the-node-ca.patch
@@ -0,0 +1,149 @@
+From 6349d15921134adb4491eb9ce87720a0281a7bd6 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sun, 18 May 2014 13:45:14 +0200
+Subject: [PATCH 03/14] trans/fakeroot: use fat nodes to simplify the node
+ cache
+
+Previously, fakeroot stored netnodes in the hash table. But we are
+not interested in a cache for netnodes, we need a node cache. So
+fakeroot kept pointers to the associated node object in each netnode
+object.
+
+Use fat netfs nodes, which combine node and netnode objects.
+
+* trans/fakeroot.c (struct netnode): Remove np.
+(idport_ihash): Fix ihash location pointer offset.
+(new_node): Allocate fat nodes, store the node pointer in the hash
+table.
+(netfs_node_norefs): Adjust accordingly.
+(netfs_S_dir_lookup): Likewise.
+---
+ trans/fakeroot.c | 36 ++++++++++++------------------------
+ 1 file changed, 12 insertions(+), 24 deletions(-)
+
+diff --git a/trans/fakeroot.c b/trans/fakeroot.c
+index 4175b55..59f8a86 100644
+--- a/trans/fakeroot.c
++++ b/trans/fakeroot.c
+@@ -47,7 +47,6 @@ static auth_t fakeroot_auth_port;
+
+ struct netnode
+ {
+- struct node *np; /* our node */
+ hurd_ihash_locp_t idport_locp;/* easy removal pointer in idport ihash */
+ mach_port_t idport; /* port from io_identity */
+ int openmodes; /* O_READ | O_WRITE | O_EXEC */
+@@ -64,7 +63,8 @@ struct netnode
+
+ pthread_mutex_t idport_ihash_lock = PTHREAD_MUTEX_INITIALIZER;
+ struct hurd_ihash idport_ihash
+- = HURD_IHASH_INITIALIZER (offsetof (struct netnode, idport_locp));
++= HURD_IHASH_INITIALIZER (sizeof (struct node)
++ + offsetof (struct netnode, idport_locp));
+
+
+ /* Make a new virtual node. Always consumes the ports. If
+@@ -74,8 +74,9 @@ new_node (file_t file, mach_port_t idport, int locked, int openmodes,
+ struct node **np)
+ {
+ error_t err;
+- struct netnode *nn = calloc (1, sizeof *nn);
+- if (nn == 0)
++ struct netnode *nn;
++ *np = netfs_make_node_alloc (sizeof *nn);
++ if (*np == 0)
+ {
+ mach_port_deallocate (mach_task_self (), file);
+ if (idport != MACH_PORT_NULL)
+@@ -84,6 +85,7 @@ new_node (file_t file, mach_port_t idport, int locked, int openmodes,
+ pthread_mutex_unlock (&idport_ihash_lock);
+ return ENOMEM;
+ }
++ nn = netfs_node_netnode (*np);
+ nn->file = file;
+ nn->openmodes = openmodes;
+ if (idport != MACH_PORT_NULL)
+@@ -97,35 +99,26 @@ new_node (file_t file, mach_port_t idport, int locked, int openmodes,
+ if (err)
+ {
+ mach_port_deallocate (mach_task_self (), file);
+- free (nn);
++ free (*np);
+ return err;
+ }
+ }
+
+ if (!locked)
+ pthread_mutex_lock (&idport_ihash_lock);
+- err = hurd_ihash_add (&idport_ihash, nn->idport, nn);
++ err = hurd_ihash_add (&idport_ihash, nn->idport, *np);
+ if (err)
+ goto lose;
+
+- *np = nn->np = netfs_make_node (nn);
+- if (*np == 0)
+- {
+- err = ENOMEM;
+- goto lose_hash;
+- }
+-
+ pthread_mutex_lock (&(*np)->lock);
+ pthread_mutex_unlock (&idport_ihash_lock);
+ return 0;
+
+- lose_hash:
+- hurd_ihash_locp_remove (&idport_ihash, nn->idport_locp);
+ lose:
+ pthread_mutex_unlock (&idport_ihash_lock);
+ mach_port_deallocate (mach_task_self (), nn->idport);
+ mach_port_deallocate (mach_task_self (), file);
+- free (nn);
++ free (*np);
+ return err;
+ }
+
+@@ -161,8 +154,6 @@ set_faked_attribute (struct node *np, unsigned int faked)
+ void
+ netfs_node_norefs (struct node *np)
+ {
+- assert (np->nn->np == np);
+-
+ pthread_mutex_unlock (&np->lock);
+ pthread_spin_unlock (&netfs_node_refcnt_lock);
+
+@@ -172,7 +163,6 @@ netfs_node_norefs (struct node *np)
+
+ mach_port_deallocate (mach_task_self (), np->nn->file);
+ mach_port_deallocate (mach_task_self (), np->nn->idport);
+- free (np->nn);
+ free (np);
+
+ pthread_spin_lock (&netfs_node_refcnt_lock);
+@@ -358,13 +348,12 @@ netfs_S_dir_lookup (struct protid *diruser,
+ refcount lock so that, if a node is found, its reference counter cannot
+ drop to 0 before we get our own reference. */
+ pthread_spin_lock (&netfs_node_refcnt_lock);
+- struct netnode *nn = hurd_ihash_find (&idport_ihash, idport);
+- if (nn != NULL)
++ np = hurd_ihash_find (&idport_ihash, idport);
++ if (np != NULL)
+ {
+- assert (nn->np->nn == nn);
+ /* We already know about this node. */
+
+- if (nn->np->references == 0)
++ if (np->references == 0)
+ {
+ /* But it might be in the process of being released. If so,
+ unlock the hash table to give the node a chance to actually
+@@ -376,7 +365,6 @@ netfs_S_dir_lookup (struct protid *diruser,
+ }
+
+ /* Otherwise, reference it right away. */
+- np = nn->np;
+ np->references++;
+ pthread_spin_unlock (&netfs_node_refcnt_lock);
+
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0004-trans-fakeroot-use-netfs_node_netnode-instead-of-np-.patch b/debian/patches/0004-trans-fakeroot-use-netfs_node_netnode-instead-of-np-.patch
new file mode 100644
index 00000000..feec7f16
--- /dev/null
+++ b/debian/patches/0004-trans-fakeroot-use-netfs_node_netnode-instead-of-np-.patch
@@ -0,0 +1,386 @@
+From f6730b267a90ad73116e50b027e869cbe0b01211 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sun, 18 May 2014 14:06:30 +0200
+Subject: [PATCH 04/14] trans/fakeroot: use netfs_node_netnode instead of
+ np->nn
+
+When using fat nodes, expressions of the form E->nn can be rewritten
+as netfs_node_netnode (E). This is much faster as it only involves a
+offset calculation. For reference, I used the following semantic
+patch to create the patch:
+
+@@
+expression E;
+@@
+
+- E->nn
++ netfs_node_netnode (E)
+
+* trans/fakeroot.c: Use netfs_node_netnode instead of np->nn.
+---
+ trans/fakeroot.c | 99 ++++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 53 insertions(+), 46 deletions(-)
+
+diff --git a/trans/fakeroot.c b/trans/fakeroot.c
+index 59f8a86..32a34ec 100644
+--- a/trans/fakeroot.c
++++ b/trans/fakeroot.c
+@@ -125,7 +125,7 @@ new_node (file_t file, mach_port_t idport, int locked, int openmodes,
+ static void
+ set_default_attributes (struct node *np)
+ {
+- np->nn->faked = FAKE_UID | FAKE_GID | FAKE_DEFAULT;
++ netfs_node_netnode (np)->faked = FAKE_UID | FAKE_GID | FAKE_DEFAULT;
+ np->nn_stat.st_uid = 0;
+ np->nn_stat.st_gid = 0;
+ }
+@@ -133,9 +133,9 @@ set_default_attributes (struct node *np)
+ static void
+ set_faked_attribute (struct node *np, unsigned int faked)
+ {
+- np->nn->faked |= faked;
++ netfs_node_netnode (np)->faked |= faked;
+
+- if (np->nn->faked & FAKE_DEFAULT)
++ if (netfs_node_netnode (np)->faked & FAKE_DEFAULT)
+ {
+ /* Now that the node has non-default faked attributes, they have to be
+ retained for future accesses. Account for the hash table reference.
+@@ -146,7 +146,7 @@ set_faked_attribute (struct node *np, unsigned int faked)
+ easy enough if it's ever needed, although scalability could be
+ improved. */
+ netfs_nref (np);
+- np->nn->faked &= ~FAKE_DEFAULT;
++ netfs_node_netnode (np)->faked &= ~FAKE_DEFAULT;
+ }
+ }
+
+@@ -158,11 +158,11 @@ netfs_node_norefs (struct node *np)
+ pthread_spin_unlock (&netfs_node_refcnt_lock);
+
+ pthread_mutex_lock (&idport_ihash_lock);
+- hurd_ihash_locp_remove (&idport_ihash, np->nn->idport_locp);
++ hurd_ihash_locp_remove (&idport_ihash, netfs_node_netnode (np)->idport_locp);
+ pthread_mutex_unlock (&idport_ihash_lock);
+
+- mach_port_deallocate (mach_task_self (), np->nn->file);
+- mach_port_deallocate (mach_task_self (), np->nn->idport);
++ mach_port_deallocate (mach_task_self (), netfs_node_netnode (np)->file);
++ mach_port_deallocate (mach_task_self (), netfs_node_netnode (np)->idport);
+ free (np);
+
+ pthread_spin_lock (&netfs_node_refcnt_lock);
+@@ -245,7 +245,8 @@ error_t
+ netfs_check_open_permissions (struct iouser *user, struct node *np,
+ int flags, int newnode)
+ {
+- return check_openmodes (np->nn, flags & (O_RDWR|O_EXEC), MACH_PORT_NULL);
++ return check_openmodes (netfs_node_netnode (np),
++ flags & (O_RDWR|O_EXEC), MACH_PORT_NULL);
+ }
+
+ error_t
+@@ -271,12 +272,12 @@ netfs_S_dir_lookup (struct protid *diruser,
+
+ dnp = diruser->po->np;
+
+- mach_port_t dir = dnp->nn->file;
++ mach_port_t dir = netfs_node_netnode (dnp)->file;
+ redo_lookup:
+ err = dir_lookup (dir, filename,
+ flags & (O_NOLINK|O_RDWR|O_EXEC|O_CREAT|O_EXCL|O_NONBLOCK),
+ mode, do_retry, retry_name, &file);
+- if (dir != dnp->nn->file)
++ if (dir != netfs_node_netnode (dnp)->file)
+ mach_port_deallocate (mach_task_self (), dir);
+ if (err)
+ return err;
+@@ -380,7 +381,8 @@ netfs_S_dir_lookup (struct protid *diruser,
+ pthread_mutex_unlock (&dnp->lock);
+ }
+
+- err = check_openmodes (np->nn, (flags & (O_RDWR|O_EXEC)), file);
++ err = check_openmodes (netfs_node_netnode (np),
++ (flags & (O_RDWR|O_EXEC)), file);
+ pthread_mutex_unlock (&idport_ihash_lock);
+ }
+ else
+@@ -448,17 +450,17 @@ error_t
+ netfs_validate_stat (struct node *np, struct iouser *cred)
+ {
+ struct stat st;
+- error_t err = io_stat (np->nn->file, &st);
++ error_t err = io_stat (netfs_node_netnode (np)->file, &st);
+ if (err)
+ return err;
+
+- if (np->nn->faked & FAKE_UID)
++ if (netfs_node_netnode (np)->faked & FAKE_UID)
+ st.st_uid = np->nn_stat.st_uid;
+- if (np->nn->faked & FAKE_GID)
++ if (netfs_node_netnode (np)->faked & FAKE_GID)
+ st.st_gid = np->nn_stat.st_gid;
+- if (np->nn->faked & FAKE_AUTHOR)
++ if (netfs_node_netnode (np)->faked & FAKE_AUTHOR)
+ st.st_author = np->nn_stat.st_author;
+- if (np->nn->faked & FAKE_MODE)
++ if (netfs_node_netnode (np)->faked & FAKE_MODE)
+ st.st_mode = np->nn_stat.st_mode;
+
+ np->nn_stat = st;
+@@ -528,7 +530,7 @@ netfs_attempt_chmod (struct iouser *cred, struct node *np, mode_t mode)
+
+ /* We don't bother with error checking since the fake mode change should
+ always succeed--worst case a later open will get EACCES. */
+- (void) file_chmod (np->nn->file, mode);
++ (void) file_chmod (netfs_node_netnode (np)->file, mode);
+ set_faked_attribute (np, FAKE_MODE);
+ np->nn_stat.st_mode = mode;
+ return 0;
+@@ -543,7 +545,7 @@ netfs_attempt_mksymlink (struct iouser *cred, struct node *np, char *name)
+ char trans[sizeof _HURD_SYMLINK + namelen];
+ memcpy (trans, _HURD_SYMLINK, sizeof _HURD_SYMLINK);
+ memcpy (&trans[sizeof _HURD_SYMLINK], name, namelen);
+- return file_set_translator (np->nn->file,
++ return file_set_translator (netfs_node_netnode (np)->file,
+ FS_TRANS_EXCL|FS_TRANS_SET,
+ FS_TRANS_EXCL|FS_TRANS_SET, 0,
+ trans, sizeof trans,
+@@ -562,7 +564,7 @@ netfs_attempt_mkdev (struct iouser *cred, struct node *np,
+ return ENOMEM;
+ else
+ {
+- error_t err = file_set_translator (np->nn->file,
++ error_t err = file_set_translator (netfs_node_netnode (np)->file,
+ FS_TRANS_EXCL|FS_TRANS_SET,
+ FS_TRANS_EXCL|FS_TRANS_SET, 0,
+ trans, translen + 1,
+@@ -576,7 +578,7 @@ netfs_attempt_mkdev (struct iouser *cred, struct node *np,
+ error_t
+ netfs_attempt_chflags (struct iouser *cred, struct node *np, int flags)
+ {
+- return file_chflags (np->nn->file, flags);
++ return file_chflags (netfs_node_netnode (np)->file, flags);
+ }
+
+ error_t
+@@ -602,25 +604,25 @@ netfs_attempt_utimes (struct iouser *cred, struct node *np,
+ else
+ m.tv.tv_sec = m.tv.tv_usec = -1;
+
+- return file_utimes (np->nn->file, a.tvt, m.tvt);
++ return file_utimes (netfs_node_netnode (np)->file, a.tvt, m.tvt);
+ }
+
+ error_t
+ netfs_attempt_set_size (struct iouser *cred, struct node *np, off_t size)
+ {
+- return file_set_size (np->nn->file, size);
++ return file_set_size (netfs_node_netnode (np)->file, size);
+ }
+
+ error_t
+ netfs_attempt_statfs (struct iouser *cred, struct node *np, struct statfs *st)
+ {
+- return file_statfs (np->nn->file, st);
++ return file_statfs (netfs_node_netnode (np)->file, st);
+ }
+
+ error_t
+ netfs_attempt_sync (struct iouser *cred, struct node *np, int wait)
+ {
+- return file_sync (np->nn->file, wait, 0);
++ return file_sync (netfs_node_netnode (np)->file, wait, 0);
+ }
+
+ error_t
+@@ -633,7 +635,7 @@ error_t
+ netfs_attempt_mkdir (struct iouser *user, struct node *dir,
+ char *name, mode_t mode)
+ {
+- return dir_mkdir (dir->nn->file, name, mode | S_IRWXU);
++ return dir_mkdir (netfs_node_netnode (dir)->file, name, mode | S_IRWXU);
+ }
+
+
+@@ -645,7 +647,7 @@ netfs_attempt_mkdir (struct iouser *user, struct node *dir,
+ error_t
+ netfs_attempt_unlink (struct iouser *user, struct node *dir, char *name)
+ {
+- return dir_unlink (dir->nn->file, name);
++ return dir_unlink (netfs_node_netnode (dir)->file, name);
+ }
+
+ error_t
+@@ -653,22 +655,22 @@ netfs_attempt_rename (struct iouser *user, struct node *fromdir,
+ char *fromname, struct node *todir,
+ char *toname, int excl)
+ {
+- return dir_rename (fromdir->nn->file, fromname,
+- todir->nn->file, toname, excl);
++ return dir_rename (netfs_node_netnode (fromdir)->file, fromname,
++ netfs_node_netnode (todir)->file, toname, excl);
+ }
+
+ error_t
+ netfs_attempt_rmdir (struct iouser *user,
+ struct node *dir, char *name)
+ {
+- return dir_rmdir (dir->nn->file, name);
++ return dir_rmdir (netfs_node_netnode (dir)->file, name);
+ }
+
+ error_t
+ netfs_attempt_link (struct iouser *user, struct node *dir,
+ struct node *file, char *name, int excl)
+ {
+- return dir_link (dir->nn->file, file->nn->file, name, excl);
++ return dir_link (netfs_node_netnode (dir)->file, netfs_node_netnode (file)->file, name, excl);
+ }
+
+ error_t
+@@ -676,7 +678,7 @@ netfs_attempt_mkfile (struct iouser *user, struct node *dir,
+ mode_t mode, struct node **np)
+ {
+ file_t newfile;
+- error_t err = dir_mkfile (dir->nn->file, O_RDWR|O_EXEC,
++ error_t err = dir_mkfile (netfs_node_netnode (dir)->file, O_RDWR|O_EXEC,
+ real_from_fake_mode (mode), &newfile);
+ pthread_mutex_unlock (&dir->lock);
+ if (err == 0)
+@@ -692,7 +694,8 @@ netfs_attempt_readlink (struct iouser *user, struct node *np, char *buf)
+ char transbuf[sizeof _HURD_SYMLINK + np->nn_stat.st_size + 1];
+ char *trans = transbuf;
+ size_t translen = sizeof transbuf;
+- error_t err = file_get_translator (np->nn->file, &trans, &translen);
++ error_t err = file_get_translator (netfs_node_netnode (np)->file,
++ &trans, &translen);
+ if (err == 0)
+ {
+ if (translen < sizeof _HURD_SYMLINK
+@@ -715,7 +718,8 @@ netfs_attempt_read (struct iouser *cred, struct node *np,
+ off_t offset, size_t *len, void *data)
+ {
+ char *buf = data;
+- error_t err = io_read (np->nn->file, &buf, len, offset, *len);
++ error_t err = io_read (netfs_node_netnode (np)->file,
++ &buf, len, offset, *len);
+ if (err == 0 && buf != data)
+ {
+ memcpy (data, buf, *len);
+@@ -728,7 +732,7 @@ error_t
+ netfs_attempt_write (struct iouser *cred, struct node *np,
+ off_t offset, size_t *len, void *data)
+ {
+- return io_write (np->nn->file, data, *len, offset, len);
++ return io_write (netfs_node_netnode (np)->file, data, *len, offset, len);
+ }
+
+ error_t
+@@ -744,7 +748,7 @@ netfs_get_dirents (struct iouser *cred, struct node *dir,
+ mach_msg_type_number_t *datacnt,
+ vm_size_t bufsize, int *amt)
+ {
+- return dir_readdir (dir->nn->file, data, datacnt,
++ return dir_readdir (netfs_node_netnode (dir)->file, data, datacnt,
+ entry, nentries, bufsize, amt);
+ }
+
+@@ -762,7 +766,7 @@ netfs_file_get_storage_info (struct iouser *cred,
+ mach_msg_type_number_t *data_len)
+ {
+ *ports_type = MACH_MSG_TYPE_MOVE_SEND;
+- return file_get_storage_info (np->nn->file,
++ return file_get_storage_info (netfs_node_netnode (np)->file,
+ ports, num_ports,
+ ints, num_ints,
+ offsets, num_offsets,
+@@ -795,8 +799,9 @@ netfs_S_file_exec (struct protid *user,
+ return EOPNOTSUPP;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = check_openmodes (user->po->np->nn, O_EXEC, MACH_PORT_NULL);
+- file = user->po->np->nn->file;
++ err = check_openmodes (netfs_node_netnode (user->po->np),
++ O_EXEC, MACH_PORT_NULL);
++ file = netfs_node_netnode (user->po->np)->file;
+ if (!err)
+ err = mach_port_mod_refs (mach_task_self (),
+ file, MACH_PORT_RIGHT_SEND, 1);
+@@ -806,7 +811,8 @@ netfs_S_file_exec (struct protid *user,
+ {
+ /* We cannot use MACH_MSG_TYPE_MOVE_SEND because we might need to
+ retry an interrupted call that would have consumed the rights. */
+- err = file_exec (user->po->np->nn->file, task, flags, argv, argvlen,
++ err = file_exec (netfs_node_netnode (user->po->np)->file,
++ task, flags, argv, argvlen,
+ envp, envplen, fds, MACH_MSG_TYPE_COPY_SEND, fdslen,
+ portarray, MACH_MSG_TYPE_COPY_SEND, portarraylen,
+ intarray, intarraylen, deallocnames, deallocnameslen,
+@@ -838,7 +844,7 @@ netfs_S_io_map (struct protid *user,
+ *rdobjtype = *wrobjtype = MACH_MSG_TYPE_MOVE_SEND;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = io_map (user->po->np->nn->file, rdobj, wrobj);
++ err = io_map (netfs_node_netnode (user->po->np)->file, rdobj, wrobj);
+ pthread_mutex_unlock (&user->po->np->lock);
+ return err;
+ }
+@@ -855,7 +861,7 @@ netfs_S_io_map_cntl (struct protid *user,
+ *objtype = MACH_MSG_TYPE_MOVE_SEND;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = io_map_cntl (user->po->np->nn->file, obj);
++ err = io_map_cntl (netfs_node_netnode (user->po->np)->file, obj);
+ pthread_mutex_unlock (&user->po->np->lock);
+ return err;
+ }
+@@ -876,7 +882,8 @@ netfs_S_io_identity (struct protid *user,
+ *idtype = *fsystype = MACH_MSG_TYPE_MOVE_SEND;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = io_identity (user->po->np->nn->file, id, fsys, fileno);
++ err = io_identity (netfs_node_netnode (user->po->np)->file,
++ id, fsys, fileno);
+ pthread_mutex_unlock (&user->po->np->lock);
+ return err;
+ }
+@@ -891,7 +898,7 @@ netfs_S_##name (struct protid *user) \
+ return EOPNOTSUPP; \
+ \
+ pthread_mutex_lock (&user->po->np->lock); \
+- err = name (user->po->np->nn->file); \
++ err = name (netfs_node_netnode (user->po->np)->file); \
+ pthread_mutex_unlock (&user->po->np->lock); \
+ return err; \
+ }
+@@ -913,7 +920,7 @@ netfs_S_io_prenotify (struct protid *user,
+ return EOPNOTSUPP;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = io_prenotify (user->po->np->nn->file, start, stop);
++ err = io_prenotify (netfs_node_netnode (user->po->np)->file, start, stop);
+ pthread_mutex_unlock (&user->po->np->lock);
+ return err;
+ }
+@@ -928,7 +935,7 @@ netfs_S_io_postnotify (struct protid *user,
+ return EOPNOTSUPP;
+
+ pthread_mutex_lock (&user->po->np->lock);
+- err = io_postnotify (user->po->np->nn->file, start, stop);
++ err = io_postnotify (netfs_node_netnode (user->po->np)->file, start, stop);
+ pthread_mutex_unlock (&user->po->np->lock);
+ return err;
+ }
+@@ -971,7 +978,7 @@ netfs_demuxer (mach_msg_header_t *inp,
+ | MACH_MSGH_BITS (MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSGH_BITS_REMOTE (inp->msgh_bits));
+ inp->msgh_local_port = inp->msgh_remote_port; /* reply port */
+- inp->msgh_remote_port = cred->po->np->nn->file;
++ inp->msgh_remote_port = netfs_node_netnode (cred->po->np)->file;
+ err = mach_msg (inp, MACH_SEND_MSG, inp->msgh_size, 0,
+ MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
+ MACH_PORT_NULL);
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0005-libports-use-a-global-hash-table-for-the-lookups.patch b/debian/patches/0005-libports-use-a-global-hash-table-for-the-lookups.patch
new file mode 100644
index 00000000..e21aab7a
--- /dev/null
+++ b/debian/patches/0005-libports-use-a-global-hash-table-for-the-lookups.patch
@@ -0,0 +1,668 @@
+From 5ad95e3dcbe92c2a333f3a8ff504f2dc3f893ac4 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 3 May 2014 03:53:41 +0200
+Subject: [PATCH 05/14] libports: use a global hash table for the lookups
+
+Previously, libports used a hash table per port bucket. This makes
+looking up a port difficult if one does not know the port bucket, as
+one has to iterate over all buckets and do a hash table lookup each.
+
+Having to iterate over the buckets makes it necessary to keep a list
+of all buckets, which has to be updated and protected by a lock as
+well.
+
+Also, the current code in _ports_bucket_class_iterate iterates over
+the hash table associated with the bucket given. When
+ports_class_iterate calls this common function, it obtains a reference
+to the bucket from one of the ports in the given class. This will not
+work if a class contains ports in different port buckets. This
+limitation is not documented as far as I can see. Again, having to
+maintain this list has its cost and requires serialization.
+
+Use a global hash table for lookups instead. Keep the per-bucket hash
+tables for efficient iteration over buckets. Furthermore, serialize
+access to all hash tables using a separate lock. Remove the linked
+lists of all buckets and all ports in a class.
+
+* libports/bucket-iterate.c (ports_bucket_iterate): Acquire
+_ports_htable_lock. Also, generalize ports_bucket_iterate so that it
+takes a pointer to a hash table as first argument.
+(ports_bucket_iterate): Ajust call to former function accordingly.
+* libports/class-iterate.c (ports_class_iterate): Just call the
+generalized _ports_bucket_class_iterate with the global hash table as
+argument.
+* libports/ports.h (struct port_info): Remove the port class links.
+(struct port_bucket): Remove the hash table, and the all buckets link.
+(_ports_all_buckets): Remove declaration.
+(_ports_htable): New global hash table.
+(_ports_htable_lock): Protected by this lock.
+* libports/claim-right.c: Adjust accordingly.
+* libports/complete-deallocate.c: Likewise.
+* libports/create-bucket.c: Likewise.
+* libports/create-class.c: Likewise.
+* libports/create-internal.c: Likewise.
+* libports/destroy-right.c: Likewise.
+* libports/import-port.c: Likewise.
+* libports/lookup-port.c: Likewise.
+* libports/reallocate-from-external.c: Likewise.
+* libports/reallocate-port.c: Likewise.
+* libports/transfer-right.c: Likewise.
+* libports/inhibit-all-rpcs.c: Iterate over the hash table.
+* libports/inhibit-bucket-rpcs.c: Likewise, but filter using bucket.
+* libports/inhibit-class-rpcs.c: Likewise, but filter using class.
+* libports/init.c (_ports_htable): Initialize.
+(_ports_htable_lock): Likewise.
+---
+ libports/bucket-iterate.c | 22 +++++++++++++++-------
+ libports/claim-right.c | 5 ++++-
+ libports/class-iterate.c | 10 +---------
+ libports/complete-deallocate.c | 7 +++----
+ libports/create-bucket.c | 6 ------
+ libports/create-class.c | 1 -
+ libports/create-internal.c | 19 +++++++++++++------
+ libports/destroy-right.c | 5 +++--
+ libports/import-port.c | 19 +++++++++++++------
+ libports/inhibit-all-rpcs.c | 27 +++++++++++++--------------
+ libports/inhibit-bucket-rpcs.c | 3 ++-
+ libports/inhibit-class-rpcs.c | 27 ++++++++++++++++++---------
+ libports/init.c | 7 ++++++-
+ libports/lookup-port.c | 23 +++++++++--------------
+ libports/ports.h | 22 +++++++++++++++++-----
+ libports/reallocate-from-external.c | 15 +++++++++++----
+ libports/reallocate-port.c | 9 ++++++++-
+ libports/transfer-right.c | 18 ++++++++++++++----
+ 18 files changed, 150 insertions(+), 95 deletions(-)
+
+diff --git a/libports/bucket-iterate.c b/libports/bucket-iterate.c
+index babc204..88f082f 100644
+--- a/libports/bucket-iterate.c
++++ b/libports/bucket-iterate.c
+@@ -25,7 +25,7 @@
+ /* Internal entrypoint for both ports_bucket_iterate and ports_class_iterate.
+ If CLASS is non-null, call FUN only for ports in that class. */
+ error_t
+-_ports_bucket_class_iterate (struct port_bucket *bucket,
++_ports_bucket_class_iterate (struct hurd_ihash *ht,
+ struct port_class *class,
+ error_t (*fun)(void *))
+ {
+@@ -36,23 +36,24 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
+ error_t err;
+
+ pthread_mutex_lock (&_ports_lock);
++ pthread_rwlock_rdlock (&_ports_htable_lock);
+
+- if (bucket->htable.nr_items == 0)
++ if (ht->nr_items == 0)
+ {
+- pthread_mutex_unlock (&_ports_lock);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ return 0;
+ }
+
+- nr_items = bucket->htable.nr_items;
++ nr_items = ht->nr_items;
+ p = malloc (nr_items * sizeof *p);
+ if (p == NULL)
+ {
+- pthread_mutex_unlock (&_ports_lock);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ return ENOMEM;
+ }
+
+ n = 0;
+- HURD_IHASH_ITERATE (&bucket->htable, arg)
++ HURD_IHASH_ITERATE (ht, arg)
+ {
+ struct port_info *const pi = arg;
+
+@@ -63,8 +64,15 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
+ n++;
+ }
+ }
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+
++ if (n == 0)
++ {
++ free (p);
++ return 0;
++ }
++
+ if (n != nr_items)
+ {
+ /* We allocated too much. Release unused memory. */
+@@ -89,5 +97,5 @@ error_t
+ ports_bucket_iterate (struct port_bucket *bucket,
+ error_t (*fun)(void *))
+ {
+- return _ports_bucket_class_iterate (bucket, 0, fun);
++ return _ports_bucket_class_iterate (&bucket->htable, NULL, fun);
+ }
+diff --git a/libports/claim-right.c b/libports/claim-right.c
+index 4851ea3..85592ff 100644
+--- a/libports/claim-right.c
++++ b/libports/claim-right.c
+@@ -34,10 +34,13 @@ ports_claim_right (void *portstruct)
+ if (ret == MACH_PORT_NULL)
+ return ret;
+
+- pthread_mutex_lock (&_ports_lock);
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ err = mach_port_move_member (mach_task_self (), ret, MACH_PORT_NULL);
+ assert_perror (err);
++ pthread_mutex_lock (&_ports_lock);
+ pi->port_right = MACH_PORT_NULL;
+ if (pi->flags & PORT_HAS_SENDRIGHTS)
+ {
+diff --git a/libports/class-iterate.c b/libports/class-iterate.c
+index 1f8878a..df33818 100644
+--- a/libports/class-iterate.c
++++ b/libports/class-iterate.c
+@@ -23,13 +23,5 @@ error_t
+ ports_class_iterate (struct port_class *class,
+ error_t (*fun)(void *))
+ {
+- pthread_mutex_lock (&_ports_lock);
+- if (class->ports != 0)
+- {
+- struct port_bucket *bucket = class->ports->bucket;
+- pthread_mutex_unlock (&_ports_lock);
+- return _ports_bucket_class_iterate (bucket, class, fun);
+- }
+- pthread_mutex_unlock (&_ports_lock);
+- return 0;
++ return _ports_bucket_class_iterate (&_ports_htable, class, fun);
+ }
+diff --git a/libports/complete-deallocate.c b/libports/complete-deallocate.c
+index 8ce095b..4768dab 100644
+--- a/libports/complete-deallocate.c
++++ b/libports/complete-deallocate.c
+@@ -29,16 +29,15 @@ _ports_complete_deallocate (struct port_info *pi)
+
+ if (pi->port_right)
+ {
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ pi->port_right = MACH_PORT_NULL;
+ }
+
+- *pi->prevp = pi->next;
+- if (pi->next)
+- pi->next->prevp = pi->prevp;
+-
+ pi->bucket->count--;
+ pi->class->count--;
+
+diff --git a/libports/create-bucket.c b/libports/create-bucket.c
+index 52d50c3..2c5f1b6 100644
+--- a/libports/create-bucket.c
++++ b/libports/create-bucket.c
+@@ -48,11 +48,5 @@ ports_create_bucket ()
+
+ hurd_ihash_init (&ret->htable, offsetof (struct port_info, hentry));
+ ret->rpcs = ret->flags = ret->count = 0;
+-
+- pthread_mutex_lock (&_ports_lock);
+- ret->next = _ports_all_buckets;
+- _ports_all_buckets = ret;
+- pthread_mutex_unlock (&_ports_lock);
+-
+ return ret;
+ }
+diff --git a/libports/create-class.c b/libports/create-class.c
+index 12c8add..782f52b 100644
+--- a/libports/create-class.c
++++ b/libports/create-class.c
+@@ -39,7 +39,6 @@ ports_create_class (void (*clean_routine)(void *),
+ cl->dropweak_routine = dropweak_routine;
+ cl->flags = 0;
+ cl->rpcs = 0;
+- cl->ports = NULL;
+ cl->count = 0;
+ cl->uninhibitable_rpcs = ports_default_uninhibitable_rpcs;
+
+diff --git a/libports/create-internal.c b/libports/create-internal.c
+index 8551297..8543986 100644
+--- a/libports/create-internal.c
++++ b/libports/create-internal.c
+@@ -81,15 +81,22 @@ _ports_create_port_internal (struct port_class *class,
+ goto loop;
+ }
+
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, pi);
++ if (err)
++ {
++ pthread_rwlock_unlock (&_ports_htable_lock);
++ goto lose;
++ }
+ err = hurd_ihash_add (&bucket->htable, port, pi);
+ if (err)
+- goto lose;
++ {
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
++ goto lose;
++ }
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+- pi->next = class->ports;
+- pi->prevp = &class->ports;
+- if (class->ports)
+- class->ports->prevp = &pi->next;
+- class->ports = pi;
+ bucket->count++;
+ class->count++;
+ pthread_mutex_unlock (&_ports_lock);
+diff --git a/libports/destroy-right.c b/libports/destroy-right.c
+index 65e19c7..448b379 100644
+--- a/libports/destroy-right.c
++++ b/libports/destroy-right.c
+@@ -30,12 +30,13 @@ ports_destroy_right (void *portstruct)
+
+ if (pi->port_right != MACH_PORT_NULL)
+ {
+- pthread_mutex_lock (&_ports_lock);
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ err = mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+- pthread_mutex_unlock (&_ports_lock);
+
+ pi->port_right = MACH_PORT_NULL;
+
+diff --git a/libports/import-port.c b/libports/import-port.c
+index 226f47e..2660672 100644
+--- a/libports/import-port.c
++++ b/libports/import-port.c
+@@ -75,15 +75,22 @@ ports_import_port (struct port_class *class, struct port_bucket *bucket,
+ goto loop;
+ }
+
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, pi);
++ if (err)
++ {
++ pthread_rwlock_unlock (&_ports_htable_lock);
++ goto lose;
++ }
+ err = hurd_ihash_add (&bucket->htable, port, pi);
+ if (err)
+- goto lose;
++ {
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
++ goto lose;
++ }
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+- pi->next = class->ports;
+- pi->prevp = &class->ports;
+- if (class->ports)
+- class->ports->prevp = &pi->next;
+- class->ports = pi;
+ bucket->count++;
+ class->count++;
+ pthread_mutex_unlock (&_ports_lock);
+diff --git a/libports/inhibit-all-rpcs.c b/libports/inhibit-all-rpcs.c
+index d4a54ba..27e2ec5 100644
+--- a/libports/inhibit-all-rpcs.c
++++ b/libports/inhibit-all-rpcs.c
+@@ -36,24 +36,23 @@ ports_inhibit_all_rpcs ()
+ struct port_bucket *bucket;
+ int this_one = 0;
+
+- for (bucket = _ports_all_buckets; bucket; bucket = bucket->next)
++ pthread_rwlock_rdlock (&_ports_htable_lock);
++ HURD_IHASH_ITERATE (&_ports_htable, portstruct)
+ {
+- HURD_IHASH_ITERATE (&bucket->htable, portstruct)
++ struct rpc_info *rpc;
++ struct port_info *pi = portstruct;
++
++ for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+ {
+- struct rpc_info *rpc;
+- struct port_info *pi = portstruct;
+-
+- for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+- {
+- /* Avoid cancelling the calling thread if it's currently
+- handling a RPC. */
+- if (rpc->thread == hurd_thread_self ())
+- this_one = 1;
+- else
+- hurd_thread_cancel (rpc->thread);
+- }
++ /* Avoid cancelling the calling thread if it's currently
++ handling a RPC. */
++ if (rpc->thread == hurd_thread_self ())
++ this_one = 1;
++ else
++ hurd_thread_cancel (rpc->thread);
+ }
+ }
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+ while (_ports_total_rpcs > this_one)
+ {
+diff --git a/libports/inhibit-bucket-rpcs.c b/libports/inhibit-bucket-rpcs.c
+index 965aa03..82efdf5 100644
+--- a/libports/inhibit-bucket-rpcs.c
++++ b/libports/inhibit-bucket-rpcs.c
+@@ -35,6 +35,7 @@ ports_inhibit_bucket_rpcs (struct port_bucket *bucket)
+ {
+ int this_one = 0;
+
++ pthread_rwlock_rdlock (&_ports_htable_lock);
+ HURD_IHASH_ITERATE (&bucket->htable, portstruct)
+ {
+ struct rpc_info *rpc;
+@@ -49,7 +50,7 @@ ports_inhibit_bucket_rpcs (struct port_bucket *bucket)
+ hurd_thread_cancel (rpc->thread);
+ }
+ }
+-
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+ while (bucket->rpcs > this_one)
+ {
+diff --git a/libports/inhibit-class-rpcs.c b/libports/inhibit-class-rpcs.c
+index 7ee8653..9a87a5f 100644
+--- a/libports/inhibit-class-rpcs.c
++++ b/libports/inhibit-class-rpcs.c
+@@ -36,15 +36,24 @@ ports_inhibit_class_rpcs (struct port_class *class)
+ struct rpc_info *rpc;
+ int this_one = 0;
+
+- for (pi = class->ports; pi; pi = pi->next)
+- for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+- {
+- /* Avoid cancelling the calling thread. */
+- if (rpc->thread == hurd_thread_self ())
+- this_one = 1;
+- else
+- hurd_thread_cancel (rpc->thread);
+- }
++ pthread_rwlock_rdlock (&_ports_htable_lock);
++ HURD_IHASH_ITERATE (&_ports_htable, portstruct)
++ {
++ struct rpc_info *rpc;
++ struct port_info *pi = portstruct;
++ if (pi->class != class)
++ continue;
++
++ for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
++ {
++ /* Avoid cancelling the calling thread. */
++ if (rpc->thread == hurd_thread_self ())
++ this_one = 1;
++ else
++ hurd_thread_cancel (rpc->thread);
++ }
++ }
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+ while (class->rpcs > this_one)
+ {
+diff --git a/libports/init.c b/libports/init.c
+index 3ef5388..4a68cb8 100644
+--- a/libports/init.c
++++ b/libports/init.c
+@@ -19,9 +19,14 @@
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+ #include "ports.h"
++#include <stddef.h>
+
+ pthread_mutex_t _ports_lock = PTHREAD_MUTEX_INITIALIZER;
+ pthread_cond_t _ports_block = PTHREAD_COND_INITIALIZER;
+-struct port_bucket *_ports_all_buckets;
++
++struct hurd_ihash _ports_htable =
++ HURD_IHASH_INITIALIZER (offsetof (struct port_info, ports_htable_entry));
++pthread_rwlock_t _ports_htable_lock = PTHREAD_RWLOCK_INITIALIZER;
++
+ int _ports_total_rpcs;
+ int _ports_flags;
+diff --git a/libports/lookup-port.c b/libports/lookup-port.c
+index f79f6f0..858ee11 100644
+--- a/libports/lookup-port.c
++++ b/libports/lookup-port.c
+@@ -26,27 +26,22 @@ ports_lookup_port (struct port_bucket *bucket,
+ mach_port_t port,
+ struct port_class *class)
+ {
+- struct port_info *pi = 0;
+-
++ struct port_info *pi;
++
+ pthread_mutex_lock (&_ports_lock);
++ pthread_rwlock_rdlock (&_ports_htable_lock);
+
+- if (bucket)
+- pi = hurd_ihash_find (&bucket->htable, port);
+- else
+- for (bucket = _ports_all_buckets; bucket; bucket = bucket->next)
+- {
+- pi = hurd_ihash_find (&bucket->htable, port);
+- if (pi)
+- break;
+- }
+-
+- if (pi && class && pi->class != class)
++ pi = hurd_ihash_find (&_ports_htable, port);
++ if (pi
++ && ((class && pi->class != class)
++ || (bucket && pi->bucket != bucket)))
+ pi = 0;
+
+ if (pi)
+ pi->refcnt++;
+
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+-
++
+ return pi;
+ }
+diff --git a/libports/ports.h b/libports/ports.h
+index 7f13124..6922162 100644
+--- a/libports/ports.h
++++ b/libports/ports.h
+@@ -48,7 +48,7 @@ struct port_info
+ struct rpc_info *current_rpcs;
+ struct port_bucket *bucket;
+ hurd_ihash_locp_t hentry;
+- struct port_info *next, **prevp; /* links on port_class list */
++ hurd_ihash_locp_t ports_htable_entry;
+ };
+ typedef struct port_info *port_info_t;
+
+@@ -61,11 +61,12 @@ typedef struct port_info *port_info_t;
+ struct port_bucket
+ {
+ mach_port_t portset;
++ /* Per-bucket hash table used for fast iteration. Access must be
++ serialized using _ports_htable_lock. */
+ struct hurd_ihash htable;
+ int rpcs;
+ int flags;
+ int count;
+- struct port_bucket *next;
+ };
+ /* FLAGS above are the following: */
+ #define PORT_BUCKET_INHIBITED PORTS_INHIBITED
+@@ -78,7 +79,6 @@ struct port_class
+ {
+ int flags;
+ int rpcs;
+- struct port_info *ports;
+ int count;
+ void (*clean_routine) (void *);
+ void (*dropweak_routine) (void *);
+@@ -277,7 +277,7 @@ error_t ports_class_iterate (struct port_class *class,
+ error_t (*fun)(void *port));
+
+ /* Internal entrypoint for above two. */
+-error_t _ports_bucket_class_iterate (struct port_bucket *bucket,
++error_t _ports_bucket_class_iterate (struct hurd_ihash *ht,
+ struct port_class *class,
+ error_t (*fun)(void *port));
+
+@@ -402,7 +402,19 @@ extern kern_return_t
+ /* Private data */
+ extern pthread_mutex_t _ports_lock;
+ extern pthread_cond_t _ports_block;
+-extern struct port_bucket *_ports_all_buckets;
++
++/* A global hash table mapping port names to port_info objects. This
++ table is used for port lookups and to iterate over classes.
++
++ A port in this hash table carries an implicit light reference.
++ When the reference counts reach zero, we call
++ _ports_complete_deallocate. There we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the hash table. */
++extern struct hurd_ihash _ports_htable;
++/* Access to the hash table is protected by this lock. */
++extern pthread_rwlock_t _ports_htable_lock;
++
+ extern int _ports_total_rpcs;
+ extern int _ports_flags;
+ #define _PORTS_INHIBITED PORTS_INHIBITED
+diff --git a/libports/reallocate-from-external.c b/libports/reallocate-from-external.c
+index 8cccb2a..9944b39 100644
+--- a/libports/reallocate-from-external.c
++++ b/libports/reallocate-from-external.c
+@@ -43,8 +43,11 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
+-
++ pthread_rwlock_unlock (&_ports_htable_lock);
++
+ if ((pi->flags & PORT_HAS_SENDRIGHTS) && !stat.mps_srights)
+ {
+ dropref = 1;
+@@ -59,11 +62,15 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ pi->port_right = receive;
+ pi->cancel_threshold = 0;
+ pi->mscount = stat.mps_mscount;
+-
+- err = hurd_ihash_add (&pi->bucket->htable, receive, pi);
++
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, receive, pi);
+ assert_perror (err);
++ err = hurd_ihash_add (&pi->bucket->htable, receive, pi);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+-
++ assert_perror (err);
++
+ mach_port_move_member (mach_task_self (), receive, pi->bucket->portset);
+
+ if (stat.mps_srights)
+diff --git a/libports/reallocate-port.c b/libports/reallocate-port.c
+index d2adaeb..cc534eb 100644
+--- a/libports/reallocate-port.c
++++ b/libports/reallocate-port.c
+@@ -36,7 +36,10 @@ ports_reallocate_port (void *portstruct)
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+
+ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE,
+ &pi->port_right);
+@@ -48,9 +51,13 @@ ports_reallocate_port (void *portstruct)
+ }
+ pi->cancel_threshold = 0;
+ pi->mscount = 0;
+- err = hurd_ihash_add (&pi->bucket->htable, pi->port_right, pi);
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, pi->port_right, pi);
+ assert_perror (err);
++ err = hurd_ihash_add (&pi->bucket->htable, pi->port_right, pi);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
++ assert_perror (err);
+
+ err = mach_port_move_member (mach_task_self (), pi->port_right,
+ pi->bucket->portset);
+diff --git a/libports/transfer-right.c b/libports/transfer-right.c
+index 72488a9..3f48290 100644
+--- a/libports/transfer-right.c
++++ b/libports/transfer-right.c
+@@ -41,7 +41,10 @@ ports_transfer_right (void *tostruct,
+ port = frompi->port_right;
+ if (port != MACH_PORT_NULL)
+ {
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, frompi->ports_htable_entry);
+ hurd_ihash_locp_remove (&frompi->bucket->htable, frompi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ frompi->port_right = MACH_PORT_NULL;
+ if (frompi->flags & PORT_HAS_SENDRIGHTS)
+ {
+@@ -54,7 +57,10 @@ ports_transfer_right (void *tostruct,
+ /* Destroy the existing right in TOPI. */
+ if (topi->port_right != MACH_PORT_NULL)
+ {
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, topi->ports_htable_entry);
+ hurd_ihash_locp_remove (&topi->bucket->htable, topi->hentry);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ err = mach_port_mod_refs (mach_task_self (), topi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+@@ -74,10 +80,16 @@ ports_transfer_right (void *tostruct,
+ topi->port_right = port;
+ topi->cancel_threshold = frompi->cancel_threshold;
+ topi->mscount = frompi->mscount;
+-
++
++ pthread_mutex_unlock (&_ports_lock);
++
+ if (port)
+ {
++ pthread_rwlock_wrlock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, topi);
++ assert_perror (err);
+ err = hurd_ihash_add (&topi->bucket->htable, port, topi);
++ pthread_rwlock_unlock (&_ports_htable_lock);
+ assert_perror (err);
+ if (topi->bucket != frompi->bucket)
+ {
+@@ -86,9 +98,7 @@ ports_transfer_right (void *tostruct,
+ assert_perror (err);
+ }
+ }
+-
+- pthread_mutex_unlock (&_ports_lock);
+-
++
+ /* Take care of any lowered reference counts. */
+ if (dereffrompi)
+ ports_port_deref (frompi);
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0006-libports-lock-less-reference-counting-for-port_info-.patch b/debian/patches/0006-libports-lock-less-reference-counting-for-port_info-.patch
new file mode 100644
index 00000000..a637ae1d
--- /dev/null
+++ b/debian/patches/0006-libports-lock-less-reference-counting-for-port_info-.patch
@@ -0,0 +1,344 @@
+From f026f03c27951532a64439650aa98196e0a5457a Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 3 May 2014 01:02:35 +0200
+Subject: [PATCH 06/14] libports: lock-less reference counting for port_info
+ objects
+
+* libports/ports.h (struct port_info): Use the new type.
+* libports/lookup-port.c: No need to lock _ports_lock anymore.
+* libports/bucket-iterate.c: Likewise.
+* libports/complete-deallocate.c: Check if someone reacquired a
+reference through a hash table lookup.
+* libports/create-internal.c: Use the new reference counting primitives.
+* libports/get-right.c: Likewise.
+* libports/import-port.c: Likewise.
+* libports/port-deref-weak.c: Likewise.
+* libports/port-deref.c: Likewise.
+* libports/port-ref-weak.c: Likewise.
+* libports/port-ref.c: Likewise.
+* libports/reallocate-from-external.c: Likewise.
+* libports/transfer-right.c: Likewise.
+* utils/rpctrace.c: Likewise.
+---
+ libports/bucket-iterate.c | 4 +---
+ libports/complete-deallocate.c | 14 ++++++++++++++
+ libports/create-internal.c | 3 +--
+ libports/get-right.c | 2 +-
+ libports/import-port.c | 3 +--
+ libports/lookup-port.c | 4 +---
+ libports/port-deref-weak.c | 10 +++-------
+ libports/port-deref.c | 34 ++++++++++++++++------------------
+ libports/port-ref-weak.c | 8 +++-----
+ libports/port-ref.c | 8 +++-----
+ libports/ports.h | 4 ++--
+ libports/reallocate-from-external.c | 2 +-
+ libports/transfer-right.c | 2 +-
+ utils/rpctrace.c | 10 ++++++++--
+ 14 files changed, 56 insertions(+), 52 deletions(-)
+
+diff --git a/libports/bucket-iterate.c b/libports/bucket-iterate.c
+index 88f082f..bf6857a 100644
+--- a/libports/bucket-iterate.c
++++ b/libports/bucket-iterate.c
+@@ -35,7 +35,6 @@ _ports_bucket_class_iterate (struct hurd_ihash *ht,
+ size_t i, n, nr_items;
+ error_t err;
+
+- pthread_mutex_lock (&_ports_lock);
+ pthread_rwlock_rdlock (&_ports_htable_lock);
+
+ if (ht->nr_items == 0)
+@@ -59,13 +58,12 @@ _ports_bucket_class_iterate (struct hurd_ihash *ht,
+
+ if (class == 0 || pi->class == class)
+ {
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ p[n] = pi;
+ n++;
+ }
+ }
+ pthread_rwlock_unlock (&_ports_htable_lock);
+- pthread_mutex_unlock (&_ports_lock);
+
+ if (n == 0)
+ {
+diff --git a/libports/complete-deallocate.c b/libports/complete-deallocate.c
+index 4768dab..0d852f5 100644
+--- a/libports/complete-deallocate.c
++++ b/libports/complete-deallocate.c
+@@ -29,15 +29,29 @@ _ports_complete_deallocate (struct port_info *pi)
+
+ if (pi->port_right)
+ {
++ struct references result;
++
+ pthread_rwlock_wrlock (&_ports_htable_lock);
++ refcounts_references (&pi->refcounts, &result);
++ if (result.hard > 0 || result.weak > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&_ports_htable_lock);
++ return;
++ }
++
+ hurd_ihash_locp_remove (&_ports_htable, pi->ports_htable_entry);
+ hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
+ pthread_rwlock_unlock (&_ports_htable_lock);
++
+ mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ pi->port_right = MACH_PORT_NULL;
+ }
+
++ pthread_mutex_lock (&_ports_lock);
++
+ pi->bucket->count--;
+ pi->class->count--;
+
+diff --git a/libports/create-internal.c b/libports/create-internal.c
+index 8543986..2d85931 100644
+--- a/libports/create-internal.c
++++ b/libports/create-internal.c
+@@ -54,8 +54,7 @@ _ports_create_port_internal (struct port_class *class,
+ }
+
+ pi->class = class;
+- pi->refcnt = 1;
+- pi->weakrefcnt = 0;
++ refcounts_init (&pi->refcounts, 1, 0);
+ pi->cancel_threshold = 0;
+ pi->mscount = 0;
+ pi->flags = 0;
+diff --git a/libports/get-right.c b/libports/get-right.c
+index 89050c6..8681f46 100644
+--- a/libports/get-right.c
++++ b/libports/get-right.c
+@@ -41,7 +41,7 @@ ports_get_right (void *port)
+ if ((pi->flags & PORT_HAS_SENDRIGHTS) == 0)
+ {
+ pi->flags |= PORT_HAS_SENDRIGHTS;
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ err = mach_port_request_notification (mach_task_self (),
+ pi->port_right,
+ MACH_NOTIFY_NO_SENDERS,
+diff --git a/libports/import-port.c b/libports/import-port.c
+index 2660672..c337c85 100644
+--- a/libports/import-port.c
++++ b/libports/import-port.c
+@@ -48,8 +48,7 @@ ports_import_port (struct port_class *class, struct port_bucket *bucket,
+ return ENOMEM;
+
+ pi->class = class;
+- pi->refcnt = 1 + !!stat.mps_srights;
+- pi->weakrefcnt = 0;
++ refcounts_init (&pi->refcounts, 1 + !!stat.mps_srights, 0);
+ pi->cancel_threshold = 0;
+ pi->mscount = stat.mps_mscount;
+ pi->flags = stat.mps_srights ? PORT_HAS_SENDRIGHTS : 0;
+diff --git a/libports/lookup-port.c b/libports/lookup-port.c
+index 858ee11..cff0546 100644
+--- a/libports/lookup-port.c
++++ b/libports/lookup-port.c
+@@ -28,7 +28,6 @@ ports_lookup_port (struct port_bucket *bucket,
+ {
+ struct port_info *pi;
+
+- pthread_mutex_lock (&_ports_lock);
+ pthread_rwlock_rdlock (&_ports_htable_lock);
+
+ pi = hurd_ihash_find (&_ports_htable, port);
+@@ -38,10 +37,9 @@ ports_lookup_port (struct port_bucket *bucket,
+ pi = 0;
+
+ if (pi)
+- pi->refcnt++;
++ ports_port_ref (pi);
+
+ pthread_rwlock_unlock (&_ports_htable_lock);
+- pthread_mutex_unlock (&_ports_lock);
+
+ return pi;
+ }
+diff --git a/libports/port-deref-weak.c b/libports/port-deref-weak.c
+index beb4842..8432660 100644
+--- a/libports/port-deref-weak.c
++++ b/libports/port-deref-weak.c
+@@ -25,12 +25,8 @@ void
+ ports_port_deref_weak (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->weakrefcnt);
+- pi->weakrefcnt--;
+- if (pi->refcnt == 0 && pi->weakrefcnt == 0)
++ struct references result;
++ refcounts_deref_weak (&pi->refcounts, &result);
++ if (result.hard == 0 && result.weak == 0)
+ _ports_complete_deallocate (pi);
+- else
+- pthread_mutex_unlock (&_ports_lock);
+ }
+diff --git a/libports/port-deref.c b/libports/port-deref.c
+index cf9b238..b97dd13 100644
+--- a/libports/port-deref.c
++++ b/libports/port-deref.c
+@@ -25,26 +25,24 @@ void
+ ports_port_deref (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+- int trieddroppingweakrefs = 0;
+-
+- retry:
+-
+- pthread_mutex_lock (&_ports_lock);
+-
+- if (pi->refcnt == 1 && pi->weakrefcnt
+- && pi->class->dropweak_routine && !trieddroppingweakrefs)
++ struct references result;
++
++ if (pi->class->dropweak_routine)
+ {
+- pthread_mutex_unlock (&_ports_lock);
+- (*pi->class->dropweak_routine) (pi);
+- trieddroppingweakrefs = 1;
+- goto retry;
++ /* If we need to call the dropweak routine, we need to hold one
++ reference while doing so. We use a weak reference for this
++ purpose, which we acquire by demoting our hard reference to a
++ weak one. */
++ refcounts_demote (&pi->refcounts, &result);
++
++ if (result.hard == 0 && result.weak > 1)
++ (*pi->class->dropweak_routine) (pi);
++
++ refcounts_deref_weak (&pi->refcounts, &result);
+ }
+-
+- assert (pi->refcnt);
++ else
++ refcounts_deref (&pi->refcounts, &result);
+
+- pi->refcnt--;
+- if (pi->refcnt == 0 && pi->weakrefcnt == 0)
++ if (result.hard == 0 && result.weak == 0)
+ _ports_complete_deallocate (pi);
+- else
+- pthread_mutex_unlock (&_ports_lock);
+ }
+diff --git a/libports/port-ref-weak.c b/libports/port-ref-weak.c
+index c7d3c69..e4b7fc8 100644
+--- a/libports/port-ref-weak.c
++++ b/libports/port-ref-weak.c
+@@ -25,9 +25,7 @@ void
+ ports_port_ref_weak (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->refcnt || pi->weakrefcnt);
+- pi->weakrefcnt++;
+- pthread_mutex_unlock (&_ports_lock);
++ struct references result;
++ refcounts_ref_weak (&pi->refcounts, &result);
++ assert (result.hard > 0 || result.weak > 1);
+ }
+diff --git a/libports/port-ref.c b/libports/port-ref.c
+index 92b7118..761c50f 100644
+--- a/libports/port-ref.c
++++ b/libports/port-ref.c
+@@ -25,9 +25,7 @@ void
+ ports_port_ref (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->refcnt || pi->weakrefcnt);
+- pi->refcnt++;
+- pthread_mutex_unlock (&_ports_lock);
++ struct references result;
++ refcounts_ref (&pi->refcounts, &result);
++ assert (result.hard > 1 || result.weak > 0);
+ }
+diff --git a/libports/ports.h b/libports/ports.h
+index 6922162..40d3b43 100644
+--- a/libports/ports.h
++++ b/libports/ports.h
+@@ -27,6 +27,7 @@
+ #include <hurd/ihash.h>
+ #include <mach/notify.h>
+ #include <pthread.h>
++#include <refcount.h>
+
+ /* These are global values for common flags used in the various structures.
+ Not all of these are meaningful in all flag fields. */
+@@ -39,8 +40,7 @@
+ struct port_info
+ {
+ struct port_class *class;
+- int refcnt;
+- int weakrefcnt;
++ refcounts_t refcounts;
+ mach_port_mscount_t mscount;
+ mach_msg_seqno_t cancel_threshold;
+ int flags;
+diff --git a/libports/reallocate-from-external.c b/libports/reallocate-from-external.c
+index 9944b39..7205bd9 100644
+--- a/libports/reallocate-from-external.c
++++ b/libports/reallocate-from-external.c
+@@ -56,7 +56,7 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ else if (((pi->flags & PORT_HAS_SENDRIGHTS) == 0) && stat.mps_srights)
+ {
+ pi->flags |= PORT_HAS_SENDRIGHTS;
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ }
+
+ pi->port_right = receive;
+diff --git a/libports/transfer-right.c b/libports/transfer-right.c
+index 3f48290..776a8d2 100644
+--- a/libports/transfer-right.c
++++ b/libports/transfer-right.c
+@@ -72,7 +72,7 @@ ports_transfer_right (void *tostruct,
+ else if (((topi->flags & PORT_HAS_SENDRIGHTS) == 0) && hassendrights)
+ {
+ topi->flags |= PORT_HAS_SENDRIGHTS;
+- topi->refcnt++;
++ refcounts_ref (&topi->refcounts, NULL);
+ }
+ }
+
+diff --git a/utils/rpctrace.c b/utils/rpctrace.c
+index fc913e3..b11fea4 100644
+--- a/utils/rpctrace.c
++++ b/utils/rpctrace.c
+@@ -431,7 +431,9 @@ destroy_receiver_info (struct receiver_info *info)
+ while (send_wrapper)
+ {
+ struct sender_info *next = send_wrapper->next;
+- assert (TRACED_INFO (send_wrapper)->pi.refcnt == 1);
++ assert (
++ refcounts_hard_references (&TRACED_INFO (send_wrapper)->pi.refcounts)
++ == 1);
+ /* Reset the receive_right of the send wrapper in advance to avoid
+ * destroy_receiver_info is called when the port info is destroyed. */
+ send_wrapper->receive_right = NULL;
+@@ -848,7 +850,11 @@ rewrite_right (mach_port_t *right, mach_msg_type_name_t *type,
+ hurd_ihash_locp_remove (&traced_names, receiver_info->locp);
+
+ send_wrapper2 = get_send_wrapper (receiver_info, dest, &rr);
+- assert (TRACED_INFO (send_wrapper2)->pi.refcnt == 1);
++ assert (
++ refcounts_hard_references (
++ &TRACED_INFO (send_wrapper2)->pi.refcounts)
++ == 1);
++
+ name = TRACED_INFO (send_wrapper2)->name;
+ TRACED_INFO (send_wrapper2)->name = NULL;
+ /* send_wrapper2 isn't destroyed normally, so we need to unlink
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0007-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0007-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
new file mode 100644
index 00000000..726f8c62
--- /dev/null
+++ b/debian/patches/0007-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
@@ -0,0 +1,210 @@
+From b4fe5f7a878b0d2615d060ddd15fa442d8b2ec08 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 13:09:15 +0200
+Subject: [PATCH 07/14] ext2fs: use a seperate lock to protect nodehash
+
+Previously, ext2fs used diskfs_node_refcnt_lock to serialize access to
+the nodehash.
+
+Use a separate lock to protect nodehash. Adjust the reference
+counting accordingly. Every node in the nodehash carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the nodehash.
+
+* ext2fs/inode.c (nodecache_lock): New lock.
+(diskfs_cached_lookup): Use a separate lock to protect nodehash.
+Adjust the reference counting accordingly.
+(ifind): Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from nodehash...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ ext2fs/inode.c | 76 ++++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 53 insertions(+), 23 deletions(-)
+
+diff --git a/ext2fs/inode.c b/ext2fs/inode.c
+index ed78265..0b3fa8b 100644
+--- a/ext2fs/inode.c
++++ b/ext2fs/inode.c
+@@ -46,8 +46,18 @@
+ #define INOHASH(ino) (((unsigned)(ino))%INOHSZ)
+ #endif
+
++/* The nodehash is a cache of nodes.
++
++ Access to nodehash and nodehash_nr_items is protected by
++ nodecache_lock.
++
++ Every node in the nodehash carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the nodehash. */
+ static struct node *nodehash[INOHSZ];
+ static size_t nodehash_nr_items;
++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER;
+
+ static error_t read_node (struct node *np);
+
+@@ -71,24 +81,22 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_rwlock_unlock (&nodecache_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+ }
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ /* Format specific data for the new node. */
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- return ENOMEM;
+- }
++ return ENOMEM;
+ dn->dirents = 0;
+ dn->dir_idx = 0;
+ dn->pager = 0;
+@@ -102,14 +110,15 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ pthread_mutex_lock (&np->lock);
+
+ /* Put NP in NODEHASH. */
++ pthread_rwlock_wrlock (&nodecache_lock);
+ dn->hnext = nodehash[INOHASH(inum)];
+ if (dn->hnext)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
+-
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ /* Get the contents of NP off disk. */
+ err = read_node (np);
+@@ -140,14 +149,13 @@ ifind (ino_t inum)
+ {
+ struct node *np;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ {
+ if (np->cache_id != inum)
+ continue;
+
+- assert (np->references);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+ return np;
+ }
+ assert (0);
+@@ -158,11 +166,6 @@ ifind (ino_t inum)
+ void
+ diskfs_node_norefs (struct node *np)
+ {
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- nodehash_nr_items -= 1;
+-
+ if (np->dn->dirents)
+ free (np->dn->dirents);
+ assert (!np->dn->pager);
+@@ -180,6 +183,33 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_rwlock_wrlock (&nodecache_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ nodehash. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_rwlock_unlock (&nodecache_lock);
++ return;
++ }
++
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ np->dn->hnext = NULL;
++ nodehash_nr_items -= 1;
++ diskfs_nrele_light (np);
++ }
++ pthread_rwlock_unlock (&nodecache_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+@@ -556,12 +586,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ nodecache_lock, but we can't hold this while locking the
+ individual node locks). */
+ num_nodes = nodehash_nr_items;
+
+@@ -570,7 +600,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ node_list = malloc (num_nodes * sizeof (struct node *));
+ if (node_list == NULL)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+ ext2_debug ("unable to allocate temporary node table");
+ return ENOMEM;
+ }
+@@ -580,10 +610,10 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ for (node = nodehash[n]; node; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref_light (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+@@ -595,7 +625,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ err = (*fun)(node);
+ pthread_mutex_unlock (&node->lock);
+ }
+- diskfs_nrele (node);
++ diskfs_nrele_light (node);
+ }
+
+ free (node_list);
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0008-fatfs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0008-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
new file mode 100644
index 00000000..c38dd2a2
--- /dev/null
+++ b/debian/patches/0008-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
@@ -0,0 +1,253 @@
+From 5230d1c491b179c04388bff4d47111d4e709a32c Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:14:53 +0200
+Subject: [PATCH 08/14] fatfs: use a seperate lock to protect nodehash
+
+Previously, fatfs used diskfs_node_refcnt_lock to serialize access to
+the nodehash.
+
+Use a separate lock to protect nodehash. Adjust the reference
+counting accordingly. Every node in the nodehash carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the nodehash.
+
+* fatfs/inode.c (nodecache_lock): New lock.
+(diskfs_cached_lookup): Use a separate lock to protect nodehash.
+Adjust the reference counting accordingly.
+(ifind): Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from nodehash...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ fatfs/inode.c | 93 +++++++++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 62 insertions(+), 31 deletions(-)
+
+diff --git a/fatfs/inode.c b/fatfs/inode.c
+index ed6f3f0..1e5abdf 100644
+--- a/fatfs/inode.c
++++ b/fatfs/inode.c
+@@ -44,8 +44,18 @@
+ #define INOHASH(ino) (((unsigned)(ino))%INOHSZ)
+ #endif
+
++/* The nodehash is a cache of nodes.
++
++ Access to nodehash and nodehash_nr_items is protected by
++ nodecache_lock.
++
++ Every node in the nodehash carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the nodehash. */
+ static struct node *nodehash[INOHSZ];
+ static size_t nodehash_nr_items;
++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER;
+
+ static error_t read_node (struct node *np, vm_address_t buf);
+
+@@ -67,24 +77,23 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_rwlock_unlock (&nodecache_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+ }
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ /* Format specific data for the new node. */
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- return ENOMEM;
+- }
++ return ENOMEM;
++
+ dn->pager = 0;
+ dn->first = 0;
+ dn->last = 0;
+@@ -102,15 +111,16 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp)
+ pthread_mutex_lock (&np->lock);
+
+ /* Put NP in NODEHASH. */
++ pthread_rwlock_wrlock (&nodecache_lock);
+ dn->hnext = nodehash[INOHASH(inum)];
+ if (dn->hnext)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
++ pthread_rwlock_unlock (&nodecache_lock);
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+ /* Get the contents of NP off disk. */
+ err = read_node (np, 0);
+
+@@ -133,24 +143,23 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_rwlock_unlock (&nodecache_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+ }
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ /* Format specific data for the new node. */
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- return ENOMEM;
+- }
++ return ENOMEM;
++
+ dn->pager = 0;
+ dn->first = 0;
+ dn->last = 0;
+@@ -168,15 +177,16 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf)
+ pthread_mutex_lock (&np->lock);
+
+ /* Put NP in NODEHASH. */
++ pthread_rwlock_wrlock (&nodecache_lock);
+ dn->hnext = nodehash[INOHASH(inum)];
+ if (dn->hnext)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
++ pthread_rwlock_unlock (&nodecache_lock);
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+ /* Get the contents of NP off disk. */
+ err = read_node (np, buf);
+
+@@ -196,14 +206,13 @@ ifind (ino_t inum)
+ {
+ struct node *np;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ {
+ if (np->cache_id != inum)
+ continue;
+
+- assert (np->references);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+ return np;
+ }
+ assert (0);
+@@ -216,11 +225,6 @@ diskfs_node_norefs (struct node *np)
+ {
+ struct cluster_chain *last = np->dn->first;
+
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- nodehash_nr_items -= 1;
+-
+ while (last)
+ {
+ struct cluster_chain *next = last->next;
+@@ -251,6 +255,33 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_rwlock_wrlock (&nodecache_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ nodehash. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_rwlock_unlock (&nodecache_lock);
++ return;
++ }
++
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ np->dn->hnext = NULL;
++ nodehash_nr_items -= 1;
++ diskfs_nrele_light (np);
++ }
++ pthread_rwlock_unlock (&nodecache_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+@@ -554,12 +585,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ nodecache_lock, but we can't hold this while locking the
+ individual node locks). */
+
+ num_nodes = nodehash_nr_items;
+@@ -570,10 +601,10 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ for (node = nodehash[n]; node; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref_light (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+@@ -585,7 +616,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ err = (*fun)(node);
+ pthread_mutex_unlock (&node->lock);
+ }
+- diskfs_nrele (node);
++ diskfs_nrele_light (node);
+ }
+
+ return err;
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0009-isofs-use-a-seperate-lock-to-protect-node_cache.patch b/debian/patches/0009-isofs-use-a-seperate-lock-to-protect-node_cache.patch
new file mode 100644
index 00000000..4d58af66
--- /dev/null
+++ b/debian/patches/0009-isofs-use-a-seperate-lock-to-protect-node_cache.patch
@@ -0,0 +1,229 @@
+From 149a8871ff7fee7a420fee27d86552a2ceb14e6b Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:16:31 +0200
+Subject: [PATCH 09/14] isofs: use a seperate lock to protect node_cache
+
+Previously, isofs used diskfs_node_refcnt_lock to serialize access to
+the node_cache.
+
+Use a separate lock to protect node_cache. Adjust the reference
+counting accordingly. Every node in the node_cache carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the node_cache.
+
+* isofs/inode.c (nodecache_lock): New lock.
+(inode_cache_find): Use a separate lock to protect node_cache.
+Adjust the reference counting accordingly.
+(diskfs_cached_lookup): Likewise.
+(load_inode): Likewise.
+(cache_inode): Update comment accordingly.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from node_cache...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ isofs/inode.c | 81 +++++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 56 insertions(+), 25 deletions(-)
+
+diff --git a/isofs/inode.c b/isofs/inode.c
+index cdc05ae..ec874df 100644
+--- a/isofs/inode.c
++++ b/isofs/inode.c
+@@ -48,9 +48,19 @@ struct node_cache
+ struct node *np; /* if live */
+ };
+
++/* The node_cache is a cache of nodes.
++
++ Access to node_cache, node_cache_size, and node_cache_alloced is
++ protected by nodecache_lock.
++
++ Every node in the node_cache carries a light reference. When we
++ are asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the node_cache. */
+ static int node_cache_size = 0;
+ static int node_cache_alloced = 0;
+ struct node_cache *node_cache = 0;
++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER;
+
+ /* Forward */
+ static error_t read_disknode (struct node *,
+@@ -58,7 +68,7 @@ static error_t read_disknode (struct node *,
+
+
+ /* See if node with identifier ID is in the cache. If so, return it,
+- with one additional reference. diskfs_node_refcnt_lock must be held
++ with one additional reference. nodecache_lock must be held
+ on entry to the call, and will be released iff the node was found
+ in the cache. */
+ void
+@@ -71,8 +81,8 @@ inode_cache_find (off_t id, struct node **npp)
+ && node_cache[i].np)
+ {
+ *npp = node_cache[i].np;
+- (*npp)->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (*npp);
++ pthread_rwlock_unlock (&nodecache_lock);
+ pthread_mutex_lock (&(*npp)->lock);
+ return;
+ }
+@@ -92,7 +102,7 @@ use_file_start_id (struct dirrect *record, struct rrip_lookup *rr)
+ }
+
+ /* Enter NP into the cache. The directory entry we used is DR, the
+- cached Rock-Ridge info RR. diskfs_node_refcnt_lock must be held. */
++ cached Rock-Ridge info RR. nodecache_lock must be held. */
+ void
+ cache_inode (struct node *np, struct dirrect *record,
+ struct rrip_lookup *rr)
+@@ -155,7 +165,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ to avoid presenting zero cache ID's. */
+ id--;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+ assert (id < node_cache_size);
+
+ np = node_cache[id].np;
+@@ -174,7 +184,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ dn = malloc (sizeof (struct disknode));
+ if (!dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+ release_rrip (&rr);
+ return ENOMEM;
+ }
+@@ -185,16 +195,17 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ if (!np)
+ {
+ free (dn);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+ release_rrip (&rr);
+ return ENOMEM;
+ }
+ np->cache_id = id + 1; /* see above for rationale for increment */
+ pthread_mutex_lock (&np->lock);
+ c->np = np;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref_light (np);
++ pthread_rwlock_unlock (&nodecache_lock);
+
+- err = read_disknode (np, node_cache[id].dr, &rr);
++ err = read_disknode (np, dn->dr, &rr);
+ if (!err)
+ *npp = np;
+
+@@ -204,8 +215,8 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ }
+
+
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_rwlock_unlock (&nodecache_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+@@ -315,7 +326,7 @@ load_inode (struct node **npp, struct dirrect *record,
+ if (rr->valid & VALID_CL)
+ record = rr->realdirent;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&nodecache_lock);
+
+ /* First check the cache */
+ if (use_file_start_id (record, rr))
+@@ -323,19 +334,16 @@ load_inode (struct node **npp, struct dirrect *record,
+ else
+ inode_cache_find ((off_t) ((void *) record - (void *) disk_image), npp);
+
++ pthread_rwlock_unlock (&nodecache_lock);
++
+ if (*npp)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- return 0;
+- }
++ return 0;
+
+ /* Create a new node */
+ dn = malloc (sizeof (struct disknode));
+ if (!dn)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- return ENOMEM;
+- }
++ return ENOMEM;
++
+ dn->fileinfo = 0;
+ dn->dr = record;
+ dn->file_start = file_start;
+@@ -344,14 +352,14 @@ load_inode (struct node **npp, struct dirrect *record,
+ if (!np)
+ {
+ free (dn);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+ return ENOMEM;
+ }
+
+ pthread_mutex_lock (&np->lock);
+
++ pthread_rwlock_wrlock (&nodecache_lock);
+ cache_inode (np, record, rr);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&nodecache_lock);
+
+ err = read_disknode (np, record, rr);
+ *npp = np;
+@@ -505,9 +513,6 @@ error_t (*diskfs_read_symlink_hook) (struct node *, char *)
+ void
+ diskfs_node_norefs (struct node *np)
+ {
+- assert (node_cache[np->cache_id - 1].np == np);
+- node_cache[np->cache_id - 1].np = 0;
+-
+ if (np->dn->translator)
+ free (np->dn->translator);
+
+@@ -521,6 +526,32 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_rwlock_wrlock (&nodecache_lock);
++ if (np->cache_id != 0)
++ {
++ assert (node_cache[np->cache_id - 1].np == np);
++
++ /* Check if someone reacquired a reference through the
++ node_cache. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_rwlock_unlock (&nodecache_lock);
++ return;
++ }
++
++ node_cache[np->cache_id - 1].np = 0;
++ np->cache_id = 0;
++ diskfs_nrele_light (np);
++ }
++ pthread_rwlock_unlock (&nodecache_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch b/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
new file mode 100644
index 00000000..f93713a2
--- /dev/null
+++ b/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
@@ -0,0 +1,298 @@
+From 722f4057d86d13af9373d135f6832213a5d226aa Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:35:42 +0200
+Subject: [PATCH 10/14] tmpfs: use a seperate lock to protect all_nodes
+
+Previously, tmpfs used diskfs_node_refcnt_lock to serialize access to
+the all_nodes and some other related global state related to memory
+consumption.
+
+Use a separate lock to protect all_nodes, and atomic operations to
+access the state related to memory consumption. Adjust the reference
+counting accordingly. Every node in the all_nodes carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the all_nodes.
+
+* tmpfs/tmpfs.h (num_files, tmpfs_space_used): Use atomic operations
+for these variables.
+(adjust_used): Use atomic operations.
+(get_used): New convenience function to atomically retrieve
+tmpfs_space_used.
+* tmpfs/node.c (all_nodes_lock): New lock.
+(diskfs_alloc_node): Use a separate lock to protect all_nodes.
+Adjust the reference counting accordingly.
+(diskfs_free_node): Likewise.
+(diskfs_cached_lookup):Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from all_nodes...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+(diskfs_grow): Use atomic operations.
+* tmpfs/tmpfs.c (diskfs_set_statfs): Likewise.
+---
+ tmpfs/node.c | 87 ++++++++++++++++++++++++++++++++++++++++++-----------------
+ tmpfs/tmpfs.c | 6 ++---
+ tmpfs/tmpfs.h | 20 +++++++++-----
+ 3 files changed, 78 insertions(+), 35 deletions(-)
+
+diff --git a/tmpfs/node.c b/tmpfs/node.c
+index acc029a..3c8e66a 100644
+--- a/tmpfs/node.c
++++ b/tmpfs/node.c
+@@ -29,8 +29,18 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+ unsigned int num_files;
+ static unsigned int gen;
+
++/* all_nodes is a cache of nodes.
++
++ Access to all_nodes and all_nodes_nr_items is protected by
++ all_nodes_lock.
++
++ Every node in the all_nodes carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the all_nodes. */
+ struct node *all_nodes;
+ static size_t all_nodes_nr_items;
++pthread_rwlock_t all_nodes_lock = PTHREAD_RWLOCK_INITIALIZER;
+
+ error_t
+ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
+@@ -40,18 +50,17 @@ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
+ dn = calloc (1, sizeof *dn);
+ if (dn == 0)
+ return ENOSPC;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- if (round_page (tmpfs_space_used + sizeof *dn) / vm_page_size
++
++ if (round_page (get_used () + sizeof *dn) / vm_page_size
+ > tmpfs_page_limit)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&all_nodes_lock);
+ free (dn);
+ return ENOSPC;
+ }
+ dn->gen = gen++;
+- ++num_files;
+- tmpfs_space_used += sizeof *dn;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ __atomic_add_fetch (&num_files, 1, __ATOMIC_RELAXED);
++ adjust_used (sizeof *dn);
+
+ dn->type = IFTODT (mode & S_IFMT);
+ return diskfs_cached_lookup ((ino_t) (uintptr_t) dn, npp);
+@@ -75,15 +84,18 @@ diskfs_free_node (struct node *np, mode_t mode)
+ free (np->dn->u.lnk);
+ break;
+ }
++
++ pthread_rwlock_wrlock (&all_nodes_lock);
+ *np->dn->hprevp = np->dn->hnext;
+ if (np->dn->hnext != 0)
+ np->dn->hnext->dn->hprevp = np->dn->hprevp;
+ all_nodes_nr_items -= 1;
++ __atomic_sub_fetch (&num_files, 1, __ATOMIC_RELAXED);
++ adjust_used (-sizeof *np->dn);
++ pthread_rwlock_unlock (&all_nodes_lock);
++
+ free (np->dn);
+ np->dn = 0;
+-
+- --num_files;
+- tmpfs_space_used -= sizeof *np->dn;
+ }
+
+ void
+@@ -117,14 +129,6 @@ diskfs_node_norefs (struct node *np)
+ np->dn->u.chr = np->dn_stat.st_rdev;
+ break;
+ }
+-
+- /* Remove this node from the cache list rooted at `all_nodes'. */
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext != 0)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- all_nodes_nr_items -= 1;
+- np->dn->hnext = 0;
+- np->dn->hprevp = 0;
+ }
+
+ free (np);
+@@ -167,11 +171,14 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+
+ assert (npp);
+
++ pthread_rwlock_rdlock (&all_nodes_lock);
++
+ if (dn->hprevp != 0) /* There is already a node. */
+ {
+ np = *dn->hprevp;
+ assert (np->dn == dn);
+ assert (*dn->hprevp == np);
++ pthread_rwlock_unlock (&all_nodes_lock);
+
+ diskfs_nref (np);
+ }
+@@ -179,18 +186,20 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ /* Create the new node. */
+ {
+ struct stat *st;
++ pthread_rwlock_unlock (&all_nodes_lock);
+
+ np = diskfs_make_node (dn);
+ np->cache_id = (ino_t) (uintptr_t) dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_wrlock (&all_nodes_lock);
+ dn->hnext = all_nodes;
+ if (dn->hnext)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &all_nodes;
+ all_nodes = np;
+ all_nodes_nr_items += 1;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref_light (np);
++ pthread_rwlock_unlock (&all_nodes_lock);
+
+ st = &np->dn_stat;
+ memset (st, 0, sizeof *st);
+@@ -229,12 +238,12 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_rdlock (&all_nodes_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ all_nodes_lock, but we can't hold this while locking the
+ individual node locks). */
+
+ num_nodes = all_nodes_nr_items;
+@@ -243,10 +252,10 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ for (node = all_nodes; node != 0; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref_light (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_rwlock_unlock (&all_nodes_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+@@ -258,7 +267,7 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ err = (*fun) (node);
+ pthread_mutex_unlock (&node->lock);
+ }
+- diskfs_nrele (node);
++ diskfs_nrele_light (node);
+ }
+
+ return err;
+@@ -272,6 +281,34 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_rwlock_wrlock (&all_nodes_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ all_nodes. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_rwlock_unlock (&all_nodes_lock);
++ return;
++ }
++
++ /* Remove this node from the cache list rooted at `all_nodes'. */
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext != 0)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ all_nodes_nr_items -= 1;
++ np->dn->hnext = NULL;
++ np->dn->hprevp = NULL;
++ diskfs_nrele_light (np);
++ }
++ pthread_rwlock_unlock (&all_nodes_lock);
+ }
+
+ /* The user must define this funcction. Node NP has some light
+@@ -447,7 +484,7 @@ diskfs_grow (struct node *np, off_t size, struct protid *cred)
+
+ off_t set_size = size;
+ size = round_page (size);
+- if (round_page (tmpfs_space_used + size - np->allocsize)
++ if (round_page (get_used () + size - np->allocsize)
+ / vm_page_size > tmpfs_page_limit)
+ return ENOSPC;
+
+diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c
+index a45d343..5337e58 100644
+--- a/tmpfs/tmpfs.c
++++ b/tmpfs/tmpfs.c
+@@ -67,10 +67,8 @@ diskfs_set_statfs (struct statfs *st)
+ st->f_bsize = vm_page_size;
+ st->f_blocks = tmpfs_page_limit;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- st->f_files = num_files;
+- pages = round_page (tmpfs_space_used) / vm_page_size;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ st->f_files = __atomic_load_n (&num_files, __ATOMIC_RELAXED);
++ pages = round_page (get_used ()) / vm_page_size;
+
+ st->f_bfree = pages < tmpfs_page_limit ? tmpfs_page_limit - pages : 0;
+ st->f_bavail = st->f_bfree;
+diff --git a/tmpfs/tmpfs.h b/tmpfs/tmpfs.h
+index b3c636d..ad47200 100644
+--- a/tmpfs/tmpfs.h
++++ b/tmpfs/tmpfs.h
+@@ -69,17 +69,25 @@ struct tmpfs_dirent
+ char name[0];
+ };
+
+-extern unsigned int num_files;
+-extern off_t tmpfs_page_limit, tmpfs_space_used;
+-
++extern off_t tmpfs_page_limit;
+ extern mach_port_t default_pager;
+
++/* These two must be accessed using atomic operations. */
++extern unsigned int num_files;
++extern off_t tmpfs_space_used;
++
++/* Convenience function to adjust tmpfs_space_used. */
+ static inline void
+ adjust_used (off_t change)
+ {
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- tmpfs_space_used += change;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ __atomic_add_fetch (&num_files, change, __ATOMIC_RELAXED);
++}
++
++/* Convenience function to get tmpfs_space_used. */
++static inline off_t
++get_used (void)
++{
++ return __atomic_load_n (&num_files, __ATOMIC_RELAXED);
+ }
+
+ #endif
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0011-libdiskfs-lock-less-reference-counting-of-nodes.patch b/debian/patches/0011-libdiskfs-lock-less-reference-counting-of-nodes.patch
new file mode 100644
index 00000000..d6d59a68
--- /dev/null
+++ b/debian/patches/0011-libdiskfs-lock-less-reference-counting-of-nodes.patch
@@ -0,0 +1,531 @@
+From 7d6165636025dba6063f74e072f5133d444ed06b Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 14 May 2014 11:19:35 +0200
+Subject: [PATCH 11/14] libdiskfs: lock-less reference counting of nodes
+
+* libdiskfs/diskfs.h (struct node): Use refcounts_t for reference counting.
+(diskfs_node_refcnt_lock): Remove.
+(diskfs_node_norefs,diskfs_drop_node): Change comments accordingly.
+* libdiskfs/init-init.c: Likewise.
+* libdiskfs/node-drop.c: Likewise.
+* libdiskfs/node-make.c: Likewise.
+* libdiskfs/node-nput.c: Likewise.
+* libdiskfs/node-nputl.c: Likewise.
+* libdiskfs/node-nref.c: Likewise.
+* libdiskfs/node-nrefl.c: Likewise.
+* libdiskfs/node-nrele.c: Likewise.
+* libdiskfs/node-nrelel.c: Likewise.
+* ext2fs/inode.c: Likewise.
+* fatfs/inode.c: Likewise.
+* isofs/inode.c: Likewise.
+* tmpfs/node.c: Likewise.
+* doc/hurd.texi: Likewise.
+---
+ doc/hurd.texi | 11 ++--------
+ ext2fs/inode.c | 9 +++------
+ fatfs/inode.c | 21 ++++++-------------
+ isofs/inode.c | 9 +++------
+ libdiskfs/diskfs.h | 12 ++++-------
+ libdiskfs/init-init.c | 2 --
+ libdiskfs/node-drop.c | 9 +++------
+ libdiskfs/node-make.c | 3 +--
+ libdiskfs/node-nput.c | 54 +++++++++++++++++++------------------------------
+ libdiskfs/node-nputl.c | 12 ++++-------
+ libdiskfs/node-nref.c | 10 ++++-----
+ libdiskfs/node-nrefl.c | 6 +++---
+ libdiskfs/node-nrele.c | 48 +++++++++++++++++++++----------------------
+ libdiskfs/node-nrelel.c | 9 +++------
+ tmpfs/node.c | 9 +++------
+ 15 files changed, 83 insertions(+), 141 deletions(-)
+
+diff --git a/doc/hurd.texi b/doc/hurd.texi
+index 07ddfb4..6cafdb9 100644
+--- a/doc/hurd.texi
++++ b/doc/hurd.texi
+@@ -3780,10 +3780,6 @@ new thread and (eventually) get rid of the old one; the old thread won't
+ do any more syncs, regardless.
+ @end deftypefun
+
+-@deftypevar spin_lock_t diskfs_node_refcnt_lock
+-Pager reference count lock.
+-@end deftypevar
+-
+ @deftypevar int diskfs_readonly
+ Set to zero if the filesystem is currently writable.
+ @end deftypevar
+@@ -3818,9 +3814,7 @@ Every file or directory is a diskfs @dfn{node}. The following functions
+ help your diskfs callbacks manage nodes and their references:
+
+ @deftypefun void diskfs_drop_node (@w{struct node *@var{np}})
+-Node @var{np} now has no more references; clean all state. The
+-@var{diskfs_node_refcnt_lock} must be held, and will be released upon
+-return. @var{np} must be locked.
++Node @var{np} now has no more references; clean all state.
+ @end deftypefun
+
+ @deftypefun void diskfs_node_update (@w{struct node *@var{np}}, @w{int @var{wait}})
+@@ -4236,14 +4230,13 @@ without real users.
+ @deftypefun void diskfs_try_dropping_softrefs (@w{struct node *@var{np}})
+ Node @var{np} has some light references, but has just lost its last hard
+ references. Take steps so that if any light references can be freed,
+-they are. Both @var{diskfs_node_refcnt_lock} and @var{np} are locked.
++they are. @var{np} is locked.
+ This function will be called after @code{diskfs_lost_hardrefs}.
+ @end deftypefun
+
+ @deftypefun void diskfs_node_norefs (@w{struct node *@var{np}})
+ Node @var{np} has no more references; free local state, including
+ @code{*@var{np}} if it shouldn't be retained.
+-@var{diskfs_node_refcnt_lock} is held.
+ @end deftypefun
+
+ @deftypefun error_t diskfs_set_hypermetadata (@w{int @var{wait}}, @w{int @var{clean}})
+diff --git a/ext2fs/inode.c b/ext2fs/inode.c
+index 0b3fa8b..0538957 100644
+--- a/ext2fs/inode.c
++++ b/ext2fs/inode.c
+@@ -188,12 +188,9 @@ diskfs_try_dropping_softrefs (struct node *np)
+ {
+ /* Check if someone reacquired a reference through the
+ nodehash. */
+- unsigned int references;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- references = np->references;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+- if (references > 0)
++ struct references result;
++ refcounts_references (&np->refcounts, &result);
++ if (result.hard > 0 || result.weak > 1)
+ {
+ /* A reference was reacquired through a hash table lookup.
+ It's fine, we didn't touch anything yet. */
+diff --git a/fatfs/inode.c b/fatfs/inode.c
+index 1e5abdf..4089d32 100644
+--- a/fatfs/inode.c
++++ b/fatfs/inode.c
+@@ -235,14 +235,8 @@ diskfs_node_norefs (struct node *np)
+ if (np->dn->translator)
+ free (np->dn->translator);
+
+- /* It is safe to unlock diskfs_node_refcnt_lock here for a while because
+- all references to the node have been deleted. */
+ if (np->dn->dirnode)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- diskfs_nrele (np->dn->dirnode);
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- }
++ diskfs_nrele (np->dn->dirnode);
+
+ assert (!np->dn->pager);
+
+@@ -260,12 +254,9 @@ diskfs_try_dropping_softrefs (struct node *np)
+ {
+ /* Check if someone reacquired a reference through the
+ nodehash. */
+- unsigned int references;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- references = np->references;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+- if (references > 0)
++ struct references result;
++ refcounts_references (&np->refcounts, &result);
++ if (result.hard > 0 || result.weak > 1)
+ {
+ /* A reference was reacquired through a hash table lookup.
+ It's fine, we didn't touch anything yet. */
+@@ -370,7 +361,7 @@ read_node (struct node *np, vm_address_t buf)
+ /* Files in fatfs depend on the directory that hold the file. */
+ np->dn->dirnode = dp;
+ if (dp)
+- dp->references++;
++ refcounts_ref (&dp->refcounts, NULL);
+
+ pthread_rwlock_rdlock (&np->dn->dirent_lock);
+
+@@ -812,7 +803,7 @@ diskfs_alloc_node (struct node *dir, mode_t mode, struct node **node)
+
+ /* FIXME: We know that readnode couldn't put this in. */
+ np->dn->dirnode = dir;
+- dir->references++;
++ refcounts_ref (&dir->refcounts, NULL);
+
+ *node = np;
+ return 0;
+diff --git a/isofs/inode.c b/isofs/inode.c
+index ec874df..179dc5b 100644
+--- a/isofs/inode.c
++++ b/isofs/inode.c
+@@ -533,12 +533,9 @@ diskfs_try_dropping_softrefs (struct node *np)
+
+ /* Check if someone reacquired a reference through the
+ node_cache. */
+- unsigned int references;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- references = np->references;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+- if (references > 0)
++ struct references result;
++ refcounts_references (&np->refcounts, &result);
++ if (result.hard > 0 || result.weak > 1)
+ {
+ /* A reference was reacquired through a hash table lookup.
+ It's fine, we didn't touch anything yet. */
+diff --git a/libdiskfs/diskfs.h b/libdiskfs/diskfs.h
+index 2c68aa3..831a5ba 100644
+--- a/libdiskfs/diskfs.h
++++ b/libdiskfs/diskfs.h
+@@ -96,8 +96,7 @@ struct node
+
+ pthread_mutex_t lock;
+
+- int references; /* hard references */
+- int light_references; /* light references */
++ refcounts_t refcounts;
+
+ mach_port_t sockaddr; /* address for S_IFSOCK shortcut */
+
+@@ -198,8 +197,6 @@ extern volatile struct mapped_time_value *diskfs_mtime;
+ be done by format independent code. */
+ extern int diskfs_synchronous;
+
+-extern pthread_spinlock_t diskfs_node_refcnt_lock;
+-
+ extern int pager_port_type;
+
+ /* Whether the filesystem is currently writable or not. */
+@@ -448,7 +445,7 @@ error_t diskfs_alloc_node (struct node *dp, mode_t mode, struct node **np);
+ void diskfs_free_node (struct node *np, mode_t mode);
+
+ /* Node NP has no more references; free local state, including *NP
+- if it isn't to be retained. diskfs_node_refcnt_lock is held. */
++ if it isn't to be retained. */
+ void diskfs_node_norefs (struct node *np);
+
+ /* The user must define this function. Node NP has some light
+@@ -611,9 +608,8 @@ void diskfs_spawn_first_thread (ports_demuxer_type demuxer);
+ diskfs_init_completed once it has a valid proc and auth port. */
+ void diskfs_start_bootstrap ();
+
+-/* Node NP now has no more references; clean all state. The
+- _diskfs_node_refcnt_lock must be held, and will be released
+- upon return. NP must be locked. */
++/* Node NP now has no more references; clean all state. NP must be
++ locked. */
+ void diskfs_drop_node (struct node *np);
+
+ /* Set on disk fields from NP->dn_stat; update ctime, atime, and mtime
+diff --git a/libdiskfs/init-init.c b/libdiskfs/init-init.c
+index 7a7f248..6c94faa 100644
+--- a/libdiskfs/init-init.c
++++ b/libdiskfs/init-init.c
+@@ -41,8 +41,6 @@ int _diskfs_noatime;
+
+ struct hurd_port _diskfs_exec_portcell;
+
+-pthread_spinlock_t diskfs_node_refcnt_lock = PTHREAD_SPINLOCK_INITIALIZER;
+-
+ pthread_spinlock_t _diskfs_control_lock = PTHREAD_SPINLOCK_INITIALIZER;
+ int _diskfs_ncontrol_ports;
+
+diff --git a/libdiskfs/node-drop.c b/libdiskfs/node-drop.c
+index 83eb590..fab3cfa 100644
+--- a/libdiskfs/node-drop.c
++++ b/libdiskfs/node-drop.c
+@@ -31,9 +31,8 @@ free_modreqs (struct modreq *mr)
+ }
+
+
+-/* Node NP now has no more references; clean all state. The
+- diskfs_node_refcnt_lock must be held, and will be released
+- upon return. NP must be locked. */
++/* Node NP now has no more references; clean all state. NP must be
++ locked. */
+ void
+ diskfs_drop_node (struct node *np)
+ {
+@@ -60,8 +59,7 @@ diskfs_drop_node (struct node *np)
+ and an nput. The next time through, this routine
+ will notice that the size is zero, and not have to
+ do anything. */
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ refcounts_ref (&np->refcounts, NULL);
+ diskfs_truncate (np, 0);
+
+ /* Force allocsize to zero; if truncate consistently fails this
+@@ -94,5 +92,4 @@ diskfs_drop_node (struct node *np)
+ assert (!np->sockaddr);
+
+ diskfs_node_norefs (np);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+ }
+diff --git a/libdiskfs/node-make.c b/libdiskfs/node-make.c
+index ff0cc0d..c7ca3b0 100644
+--- a/libdiskfs/node-make.c
++++ b/libdiskfs/node-make.c
+@@ -29,8 +29,7 @@ init_node (struct node *np, struct disknode *dn)
+ np->dn_stat_dirty = 0;
+
+ pthread_mutex_init (&np->lock, NULL);
+- np->references = 1;
+- np->light_references = 0;
++ refcounts_init (&np->refcounts, 1, 0);
+ np->owner = 0;
+ np->sockaddr = MACH_PORT_NULL;
+
+diff --git a/libdiskfs/node-nput.c b/libdiskfs/node-nput.c
+index 5043ad1..2935ae2 100644
+--- a/libdiskfs/node-nput.c
++++ b/libdiskfs/node-nput.c
+@@ -26,56 +26,44 @@
+ void
+ diskfs_nput (struct node *np)
+ {
+- int tried_drop_softrefs = 0;
++ struct references result;
+
+- loop:
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- assert (np->references);
+- np->references--;
+- if (np->references + np->light_references == 0)
+- diskfs_drop_node (np);
+- else if (np->references == 0 && !tried_drop_softrefs)
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ /* While we call the diskfs_try_dropping_softrefs, we need to hold
++ one reference. We use a weak reference for this purpose, which
++ we acquire by demoting our hard reference to a weak one. */
++ refcounts_demote (&np->refcounts, &result);
+
++ if (result.hard == 0)
++ {
+ /* This is our cue that something akin to "last process closes file"
+ in the POSIX.1 sense happened, so make sure any pending node time
+ updates now happen in a timely fashion. */
+ diskfs_set_node_times (np);
+-
+ diskfs_lost_hardrefs (np);
+ if (!np->dn_stat.st_nlink)
+ {
+- /* There are no links. If there are soft references that
+- can be dropped, we can't let them postpone deallocation.
+- So attempt to drop them. But that's a user-supplied
+- routine, which might result in further recursive calls to
+- the ref-counting system. So we have to reacquire our
+- reference around the call to forestall disaster. */
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+ if (np->sockaddr != MACH_PORT_NULL)
+ {
+ mach_port_deallocate (mach_task_self (), np->sockaddr);
+ np->sockaddr = MACH_PORT_NULL;
+ }
+
++ /* There are no links. If there are soft references that
++ can be dropped, we can't let them postpone deallocation.
++ So attempt to drop them. But that's a user-supplied
++ routine, which might result in further recursive calls to
++ the ref-counting system. This is not a problem, as we
++ hold a weak reference ourselves. */
+ diskfs_try_dropping_softrefs (np);
+-
+- /* But there's no value in looping forever in this
+- routine; only try to drop soft refs once. */
+- tried_drop_softrefs = 1;
+-
+- /* Now we can drop the reference back... */
+- goto loop;
+ }
+ pthread_mutex_unlock (&np->lock);
+ }
+- else
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- pthread_mutex_unlock (&np->lock);
+- }
++
++ /* Finally get rid of our reference. */
++ refcounts_deref_weak (&np->refcounts, &result);
++
++ if (result.hard == 0 && result.weak == 0)
++ diskfs_drop_node (np);
++
++ pthread_mutex_unlock (&np->lock);
+ }
+diff --git a/libdiskfs/node-nputl.c b/libdiskfs/node-nputl.c
+index 1959665..8dac16e 100644
+--- a/libdiskfs/node-nputl.c
++++ b/libdiskfs/node-nputl.c
+@@ -25,14 +25,10 @@
+ void
+ diskfs_nput_light (struct node *np)
+ {
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- assert (np->light_references);
+- np->light_references--;
+- if (np->references + np->light_references == 0)
++ struct references result;
++ refcounts_deref_weak (&np->refcounts, &result);
++ if (result.hard == 0 && result.weak == 0)
+ diskfs_drop_node (np);
+ else
+- {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- pthread_mutex_unlock (&np->lock);
+- }
++ pthread_mutex_unlock (&np->lock);
+ }
+diff --git a/libdiskfs/node-nref.c b/libdiskfs/node-nref.c
+index 13cea05..89ffa4f 100644
+--- a/libdiskfs/node-nref.c
++++ b/libdiskfs/node-nref.c
+@@ -26,12 +26,10 @@
+ void
+ diskfs_nref (struct node *np)
+ {
+- int new_hardref;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- np->references++;
+- new_hardref = (np->references == 1);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+- if (new_hardref)
++ struct references result;
++ refcounts_ref (&np->refcounts, &result);
++ assert (result.hard > 1 || result.weak > 0);
++ if (result.hard == 1)
+ {
+ pthread_mutex_lock (&np->lock);
+ diskfs_new_hardrefs (np);
+diff --git a/libdiskfs/node-nrefl.c b/libdiskfs/node-nrefl.c
+index 9692247..b7af409 100644
+--- a/libdiskfs/node-nrefl.c
++++ b/libdiskfs/node-nrefl.c
+@@ -24,7 +24,7 @@
+ void
+ diskfs_nref_light (struct node *np)
+ {
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- np->light_references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ struct references result;
++ refcounts_ref_weak (&np->refcounts, &result);
++ assert (result.hard > 0 || result.weak > 1);
+ }
+diff --git a/libdiskfs/node-nrele.c b/libdiskfs/node-nrele.c
+index cc68089..d962846 100644
+--- a/libdiskfs/node-nrele.c
++++ b/libdiskfs/node-nrele.c
+@@ -28,38 +28,36 @@
+ void
+ diskfs_nrele (struct node *np)
+ {
+- int tried_drop_softrefs = 0;
++ struct references result;
+
+- loop:
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- assert (np->references);
+- np->references--;
+- if (np->references + np->light_references == 0)
+- {
+- pthread_mutex_lock (&np->lock);
+- diskfs_drop_node (np);
+- }
+- else if (np->references == 0)
++ /* While we call the diskfs_try_dropping_softrefs, we need to hold
++ one reference. We use a weak reference for this purpose, which
++ we acquire by demoting our hard reference to a weak one. */
++ refcounts_demote (&np->refcounts, &result);
++
++ if (result.hard == 0)
+ {
+ pthread_mutex_lock (&np->lock);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+ diskfs_lost_hardrefs (np);
+- if (!np->dn_stat.st_nlink && !tried_drop_softrefs)
++ if (!np->dn_stat.st_nlink)
+ {
+- /* Same issue here as in nput; see that for explanation */
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
++ /* There are no links. If there are soft references that
++ can be dropped, we can't let them postpone deallocation.
++ So attempt to drop them. But that's a user-supplied
++ routine, which might result in further recursive calls to
++ the ref-counting system. This is not a problem, as we
++ hold a weak reference ourselves. */
+ diskfs_try_dropping_softrefs (np);
+- tried_drop_softrefs = 1;
+-
+- /* Now we can drop the reference back... */
+- pthread_mutex_unlock (&np->lock);
+- goto loop;
+ }
+ pthread_mutex_unlock (&np->lock);
+ }
+- else
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ /* Finally get rid of our reference. */
++ refcounts_deref_weak (&np->refcounts, &result);
++
++ if (result.hard == 0 && result.weak == 0)
++ {
++ pthread_mutex_lock (&np->lock);
++ diskfs_drop_node (np);
++ }
+ }
+diff --git a/libdiskfs/node-nrelel.c b/libdiskfs/node-nrelel.c
+index ee53b22..dc4f920 100644
+--- a/libdiskfs/node-nrelel.c
++++ b/libdiskfs/node-nrelel.c
+@@ -26,14 +26,11 @@
+ void
+ diskfs_nrele_light (struct node *np)
+ {
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- assert (np->light_references);
+- np->light_references--;
+- if (np->references + np->light_references == 0)
++ struct references result;
++ refcounts_deref_weak (&np->refcounts, &result);
++ if (result.hard == 0 && result.weak == 0)
+ {
+ pthread_mutex_lock (&np->lock);
+ diskfs_drop_node (np);
+ }
+- else
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+ }
+diff --git a/tmpfs/node.c b/tmpfs/node.c
+index 3c8e66a..3485ade 100644
+--- a/tmpfs/node.c
++++ b/tmpfs/node.c
+@@ -286,12 +286,9 @@ diskfs_try_dropping_softrefs (struct node *np)
+ {
+ /* Check if someone reacquired a reference through the
+ all_nodes. */
+- unsigned int references;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+- references = np->references;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
+- if (references > 0)
++ struct references result;
++ refcounts_references (&np->refcounts, &result);
++ if (result.hard > 0 || result.weak > 1)
+ {
+ /* A reference was reacquired through a hash table lookup.
+ It's fine, we didn't touch anything yet. */
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0012-tmpfs-use-a-thread-timeout.patch b/debian/patches/0012-tmpfs-use-a-thread-timeout.patch
new file mode 100644
index 00000000..b84b567d
--- /dev/null
+++ b/debian/patches/0012-tmpfs-use-a-thread-timeout.patch
@@ -0,0 +1,36 @@
+From eaa8829ec0cd494c18f92996d439a59fc27020e5 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 28 May 2014 16:18:23 +0200
+Subject: [PATCH 12/14] tmpfs: use a thread timeout
+
+There is no need to keep all the threads around, just the master
+thread.
+
+* tmpfs/tmpfs (diskfs_thread_function): Use a thread timeout.
+---
+ tmpfs/tmpfs.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c
+index 5337e58..0aace25 100644
+--- a/tmpfs/tmpfs.c
++++ b/tmpfs/tmpfs.c
+@@ -294,13 +294,14 @@ diskfs_append_args (char **argz, size_t *argz_len)
+ static void *
+ diskfs_thread_function (void *demuxer)
+ {
++ static int thread_timeout = 1000 * 60 * 2; /* two minutes */
+ error_t err;
+
+ do
+ {
+ ports_manage_port_operations_multithread (diskfs_port_bucket,
+ (ports_demuxer_type) demuxer,
+- 0,
++ thread_timeout,
+ 0,
+ 0);
+ err = diskfs_shutdown (0);
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0013-libdiskfs-remove-the-statistics-code-from-the-name-c.patch b/debian/patches/0013-libdiskfs-remove-the-statistics-code-from-the-name-c.patch
new file mode 100644
index 00000000..a31323cd
--- /dev/null
+++ b/debian/patches/0013-libdiskfs-remove-the-statistics-code-from-the-name-c.patch
@@ -0,0 +1,147 @@
+From 95fd35da4e8485179ba71b9909f28d06f1c93a9b Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Wed, 28 May 2014 16:48:04 +0200
+Subject: [PATCH 13/14] libdiskfs: remove the statistics code from the name
+ cache
+
+The current name cache lookup operation completes in O(n) time. This
+means that making the cache too large would decrease the performance.
+Therefore it was required to tune the size, hence the need for
+statistics.
+
+We will use a data structure with worst case constant lookup times in
+the future, removing the need to fine tune the cache size.
+
+* libdiskfs/name-cache.c: Remove the statistics code from the name
+cache.
+---
+ libdiskfs/name-cache.c | 66 +-------------------------------------------------
+ 1 file changed, 1 insertion(+), 65 deletions(-)
+
+diff --git a/libdiskfs/name-cache.c b/libdiskfs/name-cache.c
+index a212a6d..25b5d0d 100644
+--- a/libdiskfs/name-cache.c
++++ b/libdiskfs/name-cache.c
+@@ -45,29 +45,12 @@ struct lookup_cache
+
+ /* Strlen of NAME. If this is zero, it's an unused entry. */
+ size_t name_len;
+-
+- /* XXX */
+- int stati;
+ };
+
+ /* The contents of the cache in no particular order */
+ static struct cacheq lookup_cache = { sizeof (struct lookup_cache) };
+
+ static pthread_spinlock_t cache_lock = PTHREAD_SPINLOCK_INITIALIZER;
+-
+-/* Buffer to hold statistics */
+-static struct stats
+-{
+- long pos_hits;
+- long neg_hits;
+- long miss;
+- long fetch_errors;
+-} statistics;
+-
+-#define PARTIAL_THRESH 100
+-#define NPARTIALS MAXCACHE / PARTIAL_THRESH
+-struct stats partial_stats [NPARTIALS];
+-
+
+ /* If there's an entry for NAME, of length NAME_LEN, in directory DIR in the
+ cache, return its entry, otherwise 0. CACHE_LOCK must be held. */
+@@ -85,10 +68,7 @@ find_cache (struct node *dir, const char *name, size_t name_len)
+ if (c->name_len == name_len
+ && c->dir_cache_id == dir->cache_id
+ && c->name[0] == name[0] && strcmp (c->name, name) == 0)
+- {
+- c->stati = i / 100;
+- return c;
+- }
++ return c;
+
+ return 0;
+ }
+@@ -152,46 +132,6 @@ diskfs_purge_lookup_cache (struct node *dp, struct node *np)
+ pthread_spin_unlock (&cache_lock);
+ }
+
+-/* Register a negative hit for an entry in the Nth stat class */
+-void
+-register_neg_hit (int n)
+-{
+- int i;
+-
+- statistics.neg_hits++;
+-
+- for (i = 0; i < n; i++)
+- partial_stats[i].miss++;
+- for (; i < NPARTIALS; i++)
+- partial_stats[i].neg_hits++;
+-}
+-
+-/* Register a positive hit for an entry in the Nth stat class */
+-void
+-register_pos_hit (int n)
+-{
+- int i;
+-
+- statistics.pos_hits++;
+-
+- for (i = 0; i < n; i++)
+- partial_stats[i].miss++;
+- for (; i < NPARTIALS; i++)
+- partial_stats[i].pos_hits++;
+-}
+-
+-/* Register a miss */
+-void
+-register_miss ()
+-{
+- int i;
+-
+- statistics.miss++;
+- for (i = 0; i < NPARTIALS; i++)
+- partial_stats[i].miss++;
+-}
+-
+-
+
+ /* Scan the cache looking for NAME inside DIR. If we don't know
+ anything entry at all, then return 0. If the entry is confirmed to
+@@ -214,14 +154,12 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ if (id == 0)
+ /* A negative cache entry. */
+ {
+- register_neg_hit (c->stati);
+ pthread_spin_unlock (&cache_lock);
+ return (struct node *)-1;
+ }
+ else if (id == dir->cache_id)
+ /* The cached node is the same as DIR. */
+ {
+- register_pos_hit (c->stati);
+ pthread_spin_unlock (&cache_lock);
+ diskfs_nref (dir);
+ return dir;
+@@ -232,7 +170,6 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ struct node *np;
+ error_t err;
+
+- register_pos_hit (c->stati);
+ pthread_spin_unlock (&cache_lock);
+
+ if (name[0] == '.' && name[1] == '.' && name[2] == '\0')
+@@ -259,7 +196,6 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ }
+ }
+
+- register_miss ();
+ pthread_spin_unlock (&cache_lock);
+
+ return 0;
+--
+2.0.0.rc2
+
diff --git a/debian/patches/0014-libdiskfs-use-a-hash-table-for-the-name-cache.patch b/debian/patches/0014-libdiskfs-use-a-hash-table-for-the-name-cache.patch
new file mode 100644
index 00000000..ef8f720a
--- /dev/null
+++ b/debian/patches/0014-libdiskfs-use-a-hash-table-for-the-name-cache.patch
@@ -0,0 +1,451 @@
+From 66dce065442599ed9713b06ce5ef901e212b2d03 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 29 May 2014 02:03:03 +0200
+Subject: [PATCH 14/14] libdiskfs: use a hash table for the name cache
+
+* libdiskfs/name-cache.c: XXX.
+---
+ libdiskfs/name-cache.c | 344 +++++++++++++++++++++++++++++++++++--------------
+ 1 file changed, 247 insertions(+), 97 deletions(-)
+
+diff --git a/libdiskfs/name-cache.c b/libdiskfs/name-cache.c
+index 25b5d0d..25b230a 100644
+--- a/libdiskfs/name-cache.c
++++ b/libdiskfs/name-cache.c
+@@ -1,6 +1,6 @@
+ /* Directory name lookup caching
+
+- Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
++ Copyright (C) 1996, 1997, 1998, 2014 Free Software Foundation, Inc.
+ Written by Michael I. Bushnell, p/BSG, & Miles Bader.
+
+ This file is part of the GNU Hurd.
+@@ -20,118 +20,264 @@
+ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
+
+ #include "priv.h"
++#include <assert.h>
+ #include <string.h>
+-#include <cacheq.h>
+
+-/* Maximum number of names to cache at once */
+-#define MAXCACHE 200
++/* The name-cache is implemented using a hash table.
+
+-/* Maximum length of file name we bother caching */
+-#define CACHE_NAME_LEN 100
++ Some design choices:
+
+-/* Cache entry */
+-struct lookup_cache
++ XXX
++ */
++
++/* Maximum number of names to cache. Must be a power of two. */
++#define CACHE_SIZE 256
++#define CACHE_MASK (CACHE_SIZE - 1)
++
++#define BUCKET_SIZE 4
++
++/* Cache bucket with BUCKET_SIZE entries. */
++struct cache_bucket
+ {
+- struct cacheq_hdr hdr;
++ /* Name of the node NODE_CACHE_ID in the directory DIR_CACHE_ID. If
++ NULL, the entry is unused. */
++ unsigned long name[BUCKET_SIZE];
+
+- /* Used to indentify nodes to the fs dependent code. 0 for NODE_CACHE_ID
+- means a `negative' entry -- recording that there's definitely no node with
+- this name. */
+- ino64_t dir_cache_id, node_cache_id;
++ /* The key. */
++ unsigned long key[BUCKET_SIZE];
+
+- /* Name of the node NODE_CACHE_ID in the directory DIR_CACHE_ID. Entries
+- with names too long to fit in this buffer aren't cached at all. */
+- char name[CACHE_NAME_LEN];
++ /* Used to indentify nodes to the fs dependent code. */
++ ino64_t dir_cache_id[BUCKET_SIZE];
+
+- /* Strlen of NAME. If this is zero, it's an unused entry. */
+- size_t name_len;
++ /* 0 for NODE_CACHE_ID means a `negative' entry -- recording that
++ there's definitely no node with this name. */
++ ino64_t node_cache_id[BUCKET_SIZE];
+ };
+
+-/* The contents of the cache in no particular order */
+-static struct cacheq lookup_cache = { sizeof (struct lookup_cache) };
++static struct cache_bucket name_cache[CACHE_SIZE];
+
+-static pthread_spinlock_t cache_lock = PTHREAD_SPINLOCK_INITIALIZER;
++static pthread_mutex_t cache_lock = PTHREAD_MUTEX_INITIALIZER;
+
+-/* If there's an entry for NAME, of length NAME_LEN, in directory DIR in the
+- cache, return its entry, otherwise 0. CACHE_LOCK must be held. */
+-static struct lookup_cache *
+-find_cache (struct node *dir, const char *name, size_t name_len)
++static inline char *
++charp (unsigned long value)
+ {
+- struct lookup_cache *c;
+- int i;
+-
+- /* Search the list. All unused entries are contiguous at the end of the
+- list, so we can stop searching when we see the first one. */
+- for (i = 0, c = lookup_cache.mru;
+- c && c->name_len;
+- c = c->hdr.next, i++)
+- if (c->name_len == name_len
+- && c->dir_cache_id == dir->cache_id
+- && c->name[0] == name[0] && strcmp (c->name, name) == 0)
+- return c;
++ return (char *) (value & ~3L);
++}
+
+- return 0;
++static inline unsigned long
++frequ (unsigned long value)
++{
++ return value & 3;
+ }
+
+-/* Node NP has just been found in DIR with NAME. If NP is null, that
+- means that this name has been confirmed as absent in the directory. */
+-void
+-diskfs_enter_lookup_cache (struct node *dir, struct node *np, const char *name)
++static inline void
++add_entry (struct cache_bucket *b, int i,
++ const char *name, unsigned long key,
++ ino64_t dir_cache_id, ino64_t node_cache_id)
+ {
+- struct lookup_cache *c;
+- size_t name_len = strlen (name);
++ if (b->name[i])
++ free (charp (b->name[i]));
+
+- if (name_len > CACHE_NAME_LEN - 1)
++ b->name[i] = (unsigned long) strdup (name);
++ assert ((b->name[i] & 3) == 0);
++ if (b->name[i] == 0)
+ return;
+
+- pthread_spin_lock (&cache_lock);
++ b->key[i] = key;
++ b->dir_cache_id[i] = dir_cache_id;
++ b->node_cache_id[i] = node_cache_id;
++}
++
++static inline void
++remove_entry (struct cache_bucket *b, int i)
++{
++ if (b->name[i])
++ free (charp (b->name[i]));
++ b->name[i] = 0;
++}
++
++static inline int
++valid_entry (struct cache_bucket *b, int i)
++{
++ return b->name[i] != 0;
++}
++
++/* This is the Murmur3 hash algorithm. */
++
++#define FORCE_INLINE inline __attribute__((always_inline))
++
++inline uint32_t rotl32 ( uint32_t x, int8_t r )
++{
++ return (x << r) | (x >> (32 - r));
++}
++
++#define ROTL32(x,y) rotl32(x,y)
++
++/* Block read - if your platform needs to do endian-swapping or can
++ only handle aligned reads, do the conversion here. */
++
++FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
++{
++ return p[i];
++}
++
++/* Finalization mix - force all bits of a hash block to avalanche. */
++
++FORCE_INLINE uint32_t fmix32 ( uint32_t h )
++{
++ h ^= h >> 16;
++ h *= 0x85ebca6b;
++ h ^= h >> 13;
++ h *= 0xc2b2ae35;
++ h ^= h >> 16;
++
++ return h;
++}
++
++/* The Murmur3 hash function. */
++void MurmurHash3_x86_32 ( const void * key, int len,
++ uint32_t seed, void * out )
++{
++ const uint8_t * data = (const uint8_t*)key;
++ const int nblocks = len / 4;
++
++ uint32_t h1 = seed;
++
++ const uint32_t c1 = 0xcc9e2d51;
++ const uint32_t c2 = 0x1b873593;
++
++ /* body */
++
++ const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
++
++ for(int i = -nblocks; i; i++)
++ {
++ uint32_t k1 = getblock32(blocks,i);
++
++ k1 *= c1;
++ k1 = ROTL32(k1,15);
++ k1 *= c2;
++
++ h1 ^= k1;
++ h1 = ROTL32(h1,13);
++ h1 = h1*5+0xe6546b64;
++ }
++
++ /* tail */
+
+- if (lookup_cache.length == 0)
+- /* There should always be an lru_cache; this being zero means that the
+- cache hasn't been initialized yet. Do so. */
+- cacheq_set_length (&lookup_cache, MAXCACHE);
++ const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
+
+- /* See if there's an old entry for NAME in DIR. If not, replace the least
+- recently used entry. */
+- c = find_cache (dir, name, name_len) ?: lookup_cache.lru;
++ uint32_t k1 = 0;
+
+- /* Fill C with the new entry. */
+- c->dir_cache_id = dir->cache_id;
+- c->node_cache_id = np ? np->cache_id : 0;
+- strcpy (c->name, name);
+- c->name_len = name_len;
++ switch(len & 3)
++ {
++ case 3: k1 ^= tail[2] << 16;
++ case 2: k1 ^= tail[1] << 8;
++ case 1: k1 ^= tail[0];
++ k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
++ };
+
+- /* Now C becomes the MRU entry! */
+- cacheq_make_mru (&lookup_cache, c);
++ /* finalization */
+
+- pthread_spin_unlock (&cache_lock);
++ h1 ^= len;
++
++ h1 = fmix32(h1);
++
++ *(uint32_t*)out = h1;
+ }
+
+-/* Purge all references in the cache to NP as a node inside
+- directory DP. */
+-void
+-diskfs_purge_lookup_cache (struct node *dp, struct node *np)
++static int replace;
++
++static inline int
++lookup (ino64_t dir_cache_id, const char *name, unsigned long key,
++ struct cache_bucket **bucket, int *index)
+ {
+- struct lookup_cache *c, *next;
++ int i;
++ unsigned long best = 3;
++ struct cache_bucket *b = *bucket = &name_cache[key & CACHE_MASK];
+
+- pthread_spin_lock (&cache_lock);
+- for (c = lookup_cache.mru; c; c = next)
++ for (i = 0; i < BUCKET_SIZE; i++)
+ {
+- /* Save C->hdr.next, since we may move C from this position. */
+- next = c->hdr.next;
++ unsigned long f = frequ (b->name[i]);
++
++ if (valid_entry (b, i)
++ && b->key[i] == key
++ && b->dir_cache_id[i] == dir_cache_id
++ && strcmp (charp (b->name[i]), name) == 0)
++ {
++ if (f < 3)
++ b->name[i] += 1;
++
++ *index = i;
++ return 1;
++ }
+
+- if (c->name_len
+- && c->dir_cache_id == dp->cache_id
+- && c->node_cache_id == np->cache_id)
++ if (f < best)
+ {
+- c->name_len = 0;
+- cacheq_make_lru (&lookup_cache, c); /* Use C as the next free
+- entry. */
++ best = f;
++ *index = i;
+ }
+ }
+- pthread_spin_unlock (&cache_lock);
++
++ if (best == 3)
++ {
++ *index = replace;
++ replace = (replace + 1) & (BUCKET_SIZE - 1);
++ }
++
++ return 0;
++}
++
++/* Hash the directory cache_id and the name. */
++static inline unsigned long
++hash (ino64_t dir_cache_id, const char *name)
++{
++ unsigned long h;
++ MurmurHash3_x86_32 (&dir_cache_id, sizeof dir_cache_id, 0, &h);
++ MurmurHash3_x86_32 (name, strlen (name), hash, &h);
++ return h;
++}
++
++/* Node NP has just been found in DIR with NAME. If NP is null, that
++ means that this name has been confirmed as absent in the directory. */
++void
++diskfs_enter_lookup_cache (struct node *dir, struct node *np, const char *name)
++{
++ unsigned long key = hash (dir->cache_id, name);
++ ino64_t value = np ? np->cache_id : 0;
++ struct cache_bucket *bucket;
++ int i = 0, found;
++
++ pthread_mutex_lock (&cache_lock);
++ found = lookup (dir->cache_id, name, key, &bucket, &i);
++ if (! found)
++ add_entry (bucket, i, name, key, dir->cache_id, value);
++ else
++ if (bucket->node_cache_id[i] != value)
++ bucket->node_cache_id[i] = value;
++
++ pthread_mutex_unlock (&cache_lock);
+ }
+
++/* Purge all references in the cache to NP as a node inside
++ directory DP. */
++void
++diskfs_purge_lookup_cache (struct node *dp, struct node *np)
++{
++ int i;
++ struct cache_bucket *b;
++
++ pthread_mutex_lock (&cache_lock);
++
++ for (b = &name_cache[0]; b < &name_cache[CACHE_SIZE]; b++)
++ for (i = 0; i < BUCKET_SIZE; i++)
++ if (valid_entry (b, i)
++ && b->dir_cache_id[i] == dp->cache_id
++ && b->node_cache_id[i] == np->cache_id)
++ remove_entry (b, i);
++
++ pthread_mutex_unlock (&cache_lock);
++}
+
+ /* Scan the cache looking for NAME inside DIR. If we don't know
+ anything entry at all, then return 0. If the entry is confirmed to
+@@ -140,27 +286,28 @@ diskfs_purge_lookup_cache (struct node *dp, struct node *np)
+ struct node *
+ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ {
+- struct lookup_cache *c;
+-
+- pthread_spin_lock (&cache_lock);
+-
+- c = find_cache (dir, name, strlen (name));
+- if (c)
++ unsigned long key = hash (dir->cache_id, name);
++ int lookup_parent = name[0] == '.' && name[1] == '.' && name[2] == '\0';
++ struct cache_bucket *bucket;
++ int i, found;
++
++ if (lookup_parent && dir == diskfs_root_node)
++ /* This is outside our file system, return cache miss. */
++ return NULL;
++
++ pthread_mutex_lock (&cache_lock);
++ found = lookup (dir->cache_id, name, key, &bucket, &i);
++ if (found)
+ {
+- int id = c->node_cache_id;
+-
+- cacheq_make_mru (&lookup_cache, c); /* Record C as recently used. */
++ ino64_t id = bucket->node_cache_id[i];
++ pthread_mutex_unlock (&cache_lock);
+
+ if (id == 0)
+ /* A negative cache entry. */
+- {
+- pthread_spin_unlock (&cache_lock);
+- return (struct node *)-1;
+- }
++ return (struct node *) -1;
+ else if (id == dir->cache_id)
+ /* The cached node is the same as DIR. */
+ {
+- pthread_spin_unlock (&cache_lock);
+ diskfs_nref (dir);
+ return dir;
+ }
+@@ -170,9 +317,7 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ struct node *np;
+ error_t err;
+
+- pthread_spin_unlock (&cache_lock);
+-
+- if (name[0] == '.' && name[1] == '.' && name[2] == '\0')
++ if (lookup_parent)
+ {
+ pthread_mutex_unlock (&dir->lock);
+ err = diskfs_cached_lookup (id, &np);
+@@ -181,14 +326,19 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ /* In the window where DP was unlocked, we might
+ have lost. So check the cache again, and see
+ if it's still there; if so, then we win. */
+- c = find_cache (dir, "..", 2);
+- if (!c || c->node_cache_id != id)
++ pthread_mutex_lock (&cache_lock);
++ found = lookup (dir->cache_id, name, key, &bucket, &i);
++ if (! found
++ || ! bucket->node_cache_id[i] != id)
+ {
++ pthread_mutex_unlock (&cache_lock);
++
+ /* Lose */
+ pthread_mutex_unlock (&np->lock);
+ diskfs_nrele (np);
+ return 0;
+ }
++ pthread_mutex_unlock (&cache_lock);
+ }
+ else
+ err = diskfs_cached_lookup (id, &np);
+@@ -196,7 +346,7 @@ diskfs_check_lookup_cache (struct node *dir, const char *name)
+ }
+ }
+
+- pthread_spin_unlock (&cache_lock);
++ pthread_mutex_unlock (&cache_lock);
+
+ return 0;
+ }
+--
+2.0.0.rc2
+
diff --git a/debian/patches/series b/debian/patches/series
index 2d703f80..ba48e9c4 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -41,3 +41,17 @@ xkb-compat.patch
mach-defpager-protected-payload.patch
+0001-libdiskfs-add-diskfs_make_node_alloc-to-allocate-fat.patch
+0002-libnetfs-add-netfs_make_node_alloc-to-allocate-fat-n.patch
+0003-trans-fakeroot-use-fat-nodes-to-simplify-the-node-ca.patch
+0004-trans-fakeroot-use-netfs_node_netnode-instead-of-np-.patch
+0005-libports-use-a-global-hash-table-for-the-lookups.patch
+0006-libports-lock-less-reference-counting-for-port_info-.patch
+0007-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
+0008-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
+0009-isofs-use-a-seperate-lock-to-protect-node_cache.patch
+0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
+0011-libdiskfs-lock-less-reference-counting-of-nodes.patch
+0012-tmpfs-use-a-thread-timeout.patch
+0013-libdiskfs-remove-the-statistics-code-from-the-name-c.patch
+0014-libdiskfs-use-a-hash-table-for-the-name-cache.patch