summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2014-05-13 20:09:05 +0200
committerJustus Winter <4winter@informatik.uni-hamburg.de>2014-05-13 20:09:05 +0200
commit8ff7c917a8dbe541fb4bf797e21acc6d0edc7ceb (patch)
treebaebeb11c8c44632601828f786b8eca1647e83ea
parent2ab6147feee29f561592bcf07bb54c6688eccef7 (diff)
new huge patch series
-rw-r--r--debian/patches/0001-libihash-fix-type-of-max_load.patch31
-rw-r--r--debian/patches/0002-libihash-use-an-integer-hash-function-on-the-keys.patch60
-rw-r--r--debian/patches/0003-libihash-use-linear-probing-and-fast-modulo-operatio.patch236
-rw-r--r--debian/patches/0004-libihash-use-fast-binary-scaling-to-determine-the-lo.patch116
-rw-r--r--debian/patches/0005-include-add-lock-less-reference-counting-primitives.patch213
-rw-r--r--debian/patches/0006-libdiskfs-lock-less-reference-counting-for-peropen-o.patch143
-rw-r--r--debian/patches/0007-libtrivfs-lock-less-reference-counting-for-trivfs_pe.patch172
-rw-r--r--debian/patches/0008-libports-use-a-single-hash-table.patch619
-rw-r--r--debian/patches/0009-libports-lock-less-reference-counting-for-port_info-.patch345
-rw-r--r--debian/patches/0010-ext2fs-improve-enable-disable-_caching.patch38
-rw-r--r--debian/patches/0011-fatfs-improve-enable-disable-_caching.patch48
-rw-r--r--debian/patches/0012-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch190
-rw-r--r--debian/patches/0013-fatfs-use-a-seperate-lock-to-protect-nodehash.patch222
-rw-r--r--debian/patches/0014-isofs-use-a-seperate-lock-to-protect-node_cache.patch222
-rw-r--r--debian/patches/0015-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch263
-rw-r--r--debian/patches/series15
16 files changed, 2933 insertions, 0 deletions
diff --git a/debian/patches/0001-libihash-fix-type-of-max_load.patch b/debian/patches/0001-libihash-fix-type-of-max_load.patch
new file mode 100644
index 00000000..557f0374
--- /dev/null
+++ b/debian/patches/0001-libihash-fix-type-of-max_load.patch
@@ -0,0 +1,31 @@
+From 6dcf53606fc7d46447176aab15954a897e19d6e5 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 19:00:04 +0200
+Subject: [PATCH 01/15] libihash: fix type of max_load
+
+Previously, int was used for the field max_load of struct hurd_ihash.
+There is no reason for this as far as I can tell. Furthermore,
+hurd_ihash_set_max_load takes an unsigned int max_load.
+
+* libihash/ihash.h (struct hurd_ihash): Use unsigned int for field
+max_load.
+---
+ libihash/ihash.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/libihash/ihash.h b/libihash/ihash.h
+index 6bdc925..a24f384 100644
+--- a/libihash/ihash.h
++++ b/libihash/ihash.h
+@@ -80,7 +80,7 @@ struct hurd_ihash
+ intptr_t locp_offset;
+
+ /* The maximum load factor in percent. */
+- int max_load;
++ unsigned int max_load;
+
+ /* When freeing or overwriting an element, this function is called
+ with the value as the first argument, and CLEANUP_DATA as the
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0002-libihash-use-an-integer-hash-function-on-the-keys.patch b/debian/patches/0002-libihash-use-an-integer-hash-function-on-the-keys.patch
new file mode 100644
index 00000000..820d0869
--- /dev/null
+++ b/debian/patches/0002-libihash-use-an-integer-hash-function-on-the-keys.patch
@@ -0,0 +1,60 @@
+From 2d898893815a980f1b821fcec267eb8e7ded678e Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 8 May 2014 15:45:00 +0200
+Subject: [PATCH 02/15] libihash: use an integer hash function on the keys
+
+Use an integer hash function to derive the index from the key. This
+should reduce the number of collisions.
+
+* libihash/ihash.c (hash_int32): New function.
+(find_index): Use hash_int32 on the key to derive the index.
+(add_one): Likewise.
+---
+ libihash/ihash.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/libihash/ihash.c b/libihash/ihash.c
+index d670fee..71ddc26 100644
+--- a/libihash/ihash.c
++++ b/libihash/ihash.c
+@@ -81,6 +81,19 @@ static const unsigned int ihash_nsizes = (sizeof ihash_sizes
+ / sizeof ihash_sizes[0]);
+
+
++/* This is the integer finalizer from MurmurHash3. */
++static inline uint32_t
++murmur3_mix32 (uint32_t h, unsigned int bits)
++{
++ h ^= h >> 16;
++ h *= 0x85ebca6b;
++ h ^= h >> 13;
++ h *= 0xc2b2ae35;
++ h ^= h >> 16;
++
++ return h >> (32 - bits);
++}
++
+ /* Return 1 if the slot with the index IDX in the hash table HT is
+ empty, and 0 otherwise. */
+ static inline int
+@@ -111,7 +124,7 @@ find_index (hurd_ihash_t ht, hurd_ihash_key_t key)
+ unsigned int up_idx;
+ unsigned int down_idx;
+
+- idx = key % ht->size;
++ idx = murmur3_mix32 (key, 32) % ht->size;
+
+ if (ht->items[idx].value == _HURD_IHASH_EMPTY || ht->items[idx].key == key)
+ return idx;
+@@ -264,7 +277,7 @@ add_one (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t value)
+ unsigned int idx;
+ unsigned int first_free;
+
+- idx = key % ht->size;
++ idx = murmur3_mix32 (key, 32) % ht->size;
+ first_free = idx;
+
+ if (ht->items[idx].value != _HURD_IHASH_EMPTY && ht->items[idx].key != key)
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0003-libihash-use-linear-probing-and-fast-modulo-operatio.patch b/debian/patches/0003-libihash-use-linear-probing-and-fast-modulo-operatio.patch
new file mode 100644
index 00000000..e1cce7e5
--- /dev/null
+++ b/debian/patches/0003-libihash-use-linear-probing-and-fast-modulo-operatio.patch
@@ -0,0 +1,236 @@
+From aa4d444a81c32e2f1036041ad7474fb13b8bf693 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Thu, 8 May 2014 18:33:57 +0200
+Subject: [PATCH 03/15] libihash: use linear probing and fast modulo operation
+
+libihash uses open addressing. Previously, quadratic probing in both
+directions was used to resolve collisions. Quadratic probing might
+result in a less efficient use of caches.
+
+Also, prime numbers of the form 4 * i + 3 were used as array sizes.
+This was used in combination with the integer modulo operation for
+hashing. It has been known for some time that libihash suffers from
+collisions, so a integer hash function is now applied to the keys to
+derive the index.
+
+Use linear probing instead. Also, use powers of two for the array
+sizes, so a bit mask can be used for the modulo operation.
+
+* libihash/ihash.c (ihash_sizes, ihash_nsizes): Remove.
+(find_index): Use linear probing and fast modulo operation.
+(add_one): Likewise.
+* libihash/ihash.h (HURD_IHASH_MIN_SIZE): New macro.
+---
+ libihash/ihash.c | 125 +++++++------------------------------------------------
+ libihash/ihash.h | 4 ++
+ 2 files changed, 19 insertions(+), 110 deletions(-)
+
+diff --git a/libihash/ihash.c b/libihash/ihash.c
+index 71ddc26..d628d75 100644
+--- a/libihash/ihash.c
++++ b/libihash/ihash.c
+@@ -31,55 +31,6 @@
+ #include <assert.h>
+
+ #include "ihash.h"
+-
+-
+-/* The prime numbers of the form 4 * i + 3 for some i, all greater
+- than twice the previous one and smaller than 2^40 (for now). */
+-static const uint64_t ihash_sizes[] =
+-{
+- 3,
+- 7,
+- 19,
+- 43,
+- 103,
+- 211,
+- 431,
+- 863,
+- 1747,
+- 3499,
+- 7019,
+- 14051,
+- 28111,
+- 56239,
+- 112507,
+- 225023,
+- 450067,
+- 900139,
+- 1800311,
+- 3600659,
+- 7201351,
+- 14402743,
+- 28805519,
+- 57611039,
+- 115222091,
+- 230444239,
+- 460888499,
+- 921777067,
+- 1843554151,
+- UINT64_C (3687108307),
+- UINT64_C (7374216631),
+- UINT64_C (14748433279),
+- UINT64_C (29496866579),
+- UINT64_C (58993733159),
+- UINT64_C (117987466379),
+- UINT64_C (235974932759),
+- UINT64_C (471949865531),
+- UINT64_C (943899731087)
+-};
+-
+-static const unsigned int ihash_nsizes = (sizeof ihash_sizes
+- / sizeof ihash_sizes[0]);
+-
+
+ /* This is the integer finalizer from MurmurHash3. */
+ static inline uint32_t
+@@ -120,40 +71,24 @@ static inline int
+ find_index (hurd_ihash_t ht, hurd_ihash_key_t key)
+ {
+ unsigned int idx;
+- unsigned int i;
+ unsigned int up_idx;
+- unsigned int down_idx;
++ unsigned int mask = ht->size - 1;
+
+- idx = murmur3_mix32 (key, 32) % ht->size;
++ idx = murmur3_mix32 (key, __builtin_ctz (ht->size));
+
+ if (ht->items[idx].value == _HURD_IHASH_EMPTY || ht->items[idx].key == key)
+ return idx;
+
+- /* Instead of calculating idx + 1, idx + 4, idx + 9, ..., idx + i^2,
+- we add 1, 3, 5, 7, etc to the previous index. We do this in both
+- directions separately. */
+- i = 1;
+ up_idx = idx;
+- down_idx = idx;
+
+ do
+ {
+- up_idx = (up_idx + i) % ht->size;
++ up_idx = (up_idx + 1) & mask;
+ if (ht->items[up_idx].value == _HURD_IHASH_EMPTY
+ || ht->items[up_idx].key == key)
+ return up_idx;
+-
+- if (down_idx < i)
+- down_idx += ht->size;
+- down_idx = (down_idx - i) % ht->size;
+- if (ht->items[down_idx].value == _HURD_IHASH_EMPTY
+- || ht->items[down_idx].key == key)
+- return down_idx;
+-
+- /* After (ht->size - 1) / 2 iterations, this will be 0. */
+- i = (i + 2) % ht->size;
+ }
+- while (i);
++ while (up_idx != idx);
+
+ /* If we end up here, the item could not be found. Return any
+ invalid index. */
+@@ -277,52 +212,25 @@ add_one (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t value)
+ unsigned int idx;
+ unsigned int first_free;
+
+- idx = murmur3_mix32 (key, 32) % ht->size;
++ idx = murmur3_mix32 (key, __builtin_ctz (ht->size));
+ first_free = idx;
+
+ if (ht->items[idx].value != _HURD_IHASH_EMPTY && ht->items[idx].key != key)
+ {
+- /* Instead of calculating idx + 1, idx + 4, idx + 9, ..., idx +
+- i^2, we add 1, 3, 5, 7, ... 2 * i - 1 to the previous index.
+- We do this in both directions separately. */
+- unsigned int i = 1;
++ unsigned int mask = ht->size - 1;
+ unsigned int up_idx = idx;
+- unsigned int down_idx = idx;
+-
++
+ do
+ {
+- up_idx = (up_idx + i) % ht->size;
++ up_idx = (up_idx + 1) & mask;
+ if (ht->items[up_idx].value == _HURD_IHASH_EMPTY
+ || ht->items[up_idx].key == key)
+ {
+ idx = up_idx;
+ break;
+ }
+- if (first_free == idx
+- && ht->items[up_idx].value == _HURD_IHASH_DELETED)
+- first_free = up_idx;
+-
+- if (down_idx < i)
+- down_idx += ht->size;
+- down_idx = (down_idx - i) % ht->size;
+- if (down_idx < 0)
+- down_idx += ht->size;
+- else
+- down_idx %= ht->size;
+- if (ht->items[down_idx].value == _HURD_IHASH_EMPTY
+- || ht->items[down_idx].key == key)
+- {
+- idx = down_idx;
+- break;
+- }
+- if (first_free == idx
+- && ht->items[down_idx].value == _HURD_IHASH_DELETED)
+- first_free = down_idx;
+-
+- /* After (ht->size - 1) / 2 iterations, this will be 0. */
+- i = (i + 2) % ht->size;
+ }
+- while (i);
++ while (up_idx != idx);
+ }
+
+ /* Remove the old entry for this key if necessary. */
+@@ -365,21 +273,18 @@ hurd_ihash_add (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t item)
+ if (ht->size)
+ {
+ /* Only fill the hash table up to its maximum load factor. */
+- if (ht->nr_items * 100 / ht->size <= ht->max_load)
++ if ((ht->nr_items * 100) >> __builtin_ctz (ht->size) <= ht->max_load)
+ if (add_one (ht, key, item))
+ return 0;
+ }
+
+ /* The hash table is too small, and we have to increase it. */
+- for (i = 0; i < ihash_nsizes; i++)
+- if (ihash_sizes[i] > old_ht.size)
+- break;
+- if (i == ihash_nsizes
+- || ihash_sizes[i] > SIZE_MAX / sizeof (struct _hurd_ihash_item))
+- return ENOMEM; /* Surely will be true momentarily. */
+-
+ ht->nr_items = 0;
+- ht->size = ihash_sizes[i];
++ if (ht->size == 0)
++ ht->size = HURD_IHASH_MIN_SIZE;
++ else
++ ht->size <<= 1;
++
+ /* calloc() will initialize all values to _HURD_IHASH_EMPTY implicitely. */
+ ht->items = calloc (ht->size, sizeof (struct _hurd_ihash_item));
+
+diff --git a/libihash/ihash.h b/libihash/ihash.h
+index a24f384..8829e51 100644
+--- a/libihash/ihash.h
++++ b/libihash/ihash.h
+@@ -93,6 +93,10 @@ typedef struct hurd_ihash *hurd_ihash_t;
+
+ /* Construction and destruction of hash tables. */
+
++/* The size of the initial allocation in number of items. This must
++ be a power of two. */
++#define HURD_IHASH_MIN_SIZE 32
++
+ /* The default value for the maximum load factor in percent. */
+ #define HURD_IHASH_MAX_LOAD_DEFAULT 75
+
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0004-libihash-use-fast-binary-scaling-to-determine-the-lo.patch b/debian/patches/0004-libihash-use-fast-binary-scaling-to-determine-the-lo.patch
new file mode 100644
index 00000000..1a835a7f
--- /dev/null
+++ b/debian/patches/0004-libihash-use-fast-binary-scaling-to-determine-the-lo.patch
@@ -0,0 +1,116 @@
+From 47ce159fbfca6e14f8ce442573c1fa10365113ff Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 18:59:10 +0200
+Subject: [PATCH 04/15] libihash: use fast binary scaling to determine the load
+
+Expressing the maximum load in binary percent (where 128b% corresponds
+to 100%) allows us to use fast binary scaling to determine if the
+maximum load has been reached.
+
+* libihash/ihash.c (hurd_ihash_set_max_load): Update the comment.
+(hurd_ihash_add): Use fast binary scaling to determine the current
+load.
+* libihash/ihash.h (hurd_ihash_set_max_load): Update the comment.
+---
+ libihash/ihash.c | 37 +++++++++++++++++++++++++++----------
+ libihash/ihash.h | 22 ++++++++++++----------
+ 2 files changed, 39 insertions(+), 20 deletions(-)
+
+diff --git a/libihash/ihash.c b/libihash/ihash.c
+index d628d75..a4f92d1 100644
+--- a/libihash/ihash.c
++++ b/libihash/ihash.c
+@@ -180,14 +180,15 @@ hurd_ihash_set_cleanup (hurd_ihash_t ht, hurd_ihash_cleanup_t cleanup,
+ }
+
+
+-/* Set the maximum load factor in percent to MAX_LOAD, which should be
+- between 1 and 100. The default is HURD_IHASH_MAX_LOAD_DEFAULT.
+- New elements are only added to the hash table while the number of
+- hashed elements is that much percent of the total size of the hash
+- table. If more elements are added, the hash table is first
+- expanded and reorganized. A MAX_LOAD of 100 will always fill the
+- whole table before enlarging it, but note that this will increase
+- the cost of operations significantly when the table is almost full.
++/* Set the maximum load factor in binary percent to MAX_LOAD, which
++ should be between 64 and 128. The default is
++ HURD_IHASH_MAX_LOAD_DEFAULT. New elements are only added to the
++ hash table while the number of hashed elements is that much percent
++ of the total size of the hash table. If more elements are added,
++ the hash table is first expanded and reorganized. A MAX_LOAD of
++ 128 will always fill the whole table before enlarging it, but note
++ that this will increase the cost of operations significantly when
++ the table is almost full.
+
+ If the value is set to a smaller value than the current load
+ factor, the next reorganization will happen when a new item is
+@@ -272,8 +273,24 @@ hurd_ihash_add (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t item)
+
+ if (ht->size)
+ {
+- /* Only fill the hash table up to its maximum load factor. */
+- if ((ht->nr_items * 100) >> __builtin_ctz (ht->size) <= ht->max_load)
++ /* Only fill the hash table up to its maximum load factor given
++ as "binary percent", where 128b% corresponds to 100%. As the
++ size is always a power of two, and 128 is also, the quotient
++ of both is also a power of two. Therefore, we can use bit
++ shifts to scale the number of items.
++
++ load = nr_items * 128 / size
++ = nr_items^{log2 (128) - log2 (size)}
++ = nr_items >> (log2 (size) - log2 (128))
++ -- if size >= 128
++ = nr_items << (log2 (128) - log2 (size))
++ -- otherwise
++ */
++ int d = __builtin_ctz (ht->size) - 7;
++ unsigned int load = d >= 0
++ ? ht->nr_items >> d
++ : ht->nr_items << -d;
++ if (load <= ht->max_load)
+ if (add_one (ht, key, item))
+ return 0;
+ }
+diff --git a/libihash/ihash.h b/libihash/ihash.h
+index 8829e51..e245db4 100644
+--- a/libihash/ihash.h
++++ b/libihash/ihash.h
+@@ -97,8 +97,9 @@ typedef struct hurd_ihash *hurd_ihash_t;
+ be a power of two. */
+ #define HURD_IHASH_MIN_SIZE 32
+
+-/* The default value for the maximum load factor in percent. */
+-#define HURD_IHASH_MAX_LOAD_DEFAULT 75
++/* The default value for the maximum load factor in binary percent.
++ 96b% is equivalent to 75%, 128b% to 100%. */
++#define HURD_IHASH_MAX_LOAD_DEFAULT 96
+
+ /* The LOCP_OFFS to use if no location pointer is available. */
+ #define HURD_IHASH_NO_LOCP INTPTR_MIN
+@@ -143,14 +144,15 @@ void hurd_ihash_free (hurd_ihash_t ht);
+ void hurd_ihash_set_cleanup (hurd_ihash_t ht, hurd_ihash_cleanup_t cleanup,
+ void *cleanup_data);
+
+-/* Set the maximum load factor in percent to MAX_LOAD, which should be
+- between 50 and 100. The default is HURD_IHASH_MAX_LOAD_DEFAULT.
+- New elements are only added to the hash table while the number of
+- hashed elements is that much percent of the total size of the hash
+- table. If more elements are added, the hash table is first
+- expanded and reorganized. A MAX_LOAD of 100 will always fill the
+- whole table before enlarging it, but note that this will increase
+- the cost of operations significantly when the table is almost full.
++/* Set the maximum load factor in binary percent to MAX_LOAD, which
++ should be between 64 and 128. The default is
++ HURD_IHASH_MAX_LOAD_DEFAULT. New elements are only added to the
++ hash table while the number of hashed elements is that much percent
++ of the total size of the hash table. If more elements are added,
++ the hash table is first expanded and reorganized. A MAX_LOAD of
++ 128 will always fill the whole table before enlarging it, but note
++ that this will increase the cost of operations significantly when
++ the table is almost full.
+
+ If the value is set to a smaller value than the current load
+ factor, the next reorganization will happen when a new item is
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0005-include-add-lock-less-reference-counting-primitives.patch b/debian/patches/0005-include-add-lock-less-reference-counting-primitives.patch
new file mode 100644
index 00000000..4a4dd9dc
--- /dev/null
+++ b/debian/patches/0005-include-add-lock-less-reference-counting-primitives.patch
@@ -0,0 +1,213 @@
+From f6ed9ea2b93816dff52e33b71d69ed6787a57841 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 6 May 2014 19:52:04 +0200
+Subject: [PATCH 05/15] include: add lock-less reference counting primitives
+
+* include/refcount.h: New file.
+---
+ include/refcount.h | 193 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 193 insertions(+)
+ create mode 100644 include/refcount.h
+
+diff --git a/include/refcount.h b/include/refcount.h
+new file mode 100644
+index 0000000..0816220
+--- /dev/null
++++ b/include/refcount.h
+@@ -0,0 +1,193 @@
++/* Lock-less reference counting primitives
++
++ Copyright (C) 2014 Free Software Foundation, Inc.
++
++ Written by Justus Winter <4winter@informatik.uni-hamburg.de>
++
++ This file is part of the GNU Hurd.
++
++ The GNU Hurd is free software; you can redistribute it and/or
++ modify it under the terms of the GNU General Public License as
++ published by the Free Software Foundation; either version 2, or (at
++ your option) any later version.
++
++ The GNU Hurd is distributed in the hope that it will be useful, but
++ WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with the GNU Hurd. If not, see <http://www.gnu.org/licenses/>. */
++
++#ifndef _HURD_REFCOUNT_H_
++#define _HURD_REFCOUNT_H_
++
++#include <assert.h>
++#include <stdint.h>
++
++/* Simple reference counting. */
++
++/* An opaque type. You must not access these values directly. */
++typedef unsigned int refcount_t;
++
++/* Initialize REF with REFERENCES. */
++static inline void
++refcount_init (refcount_t *ref, unsigned int references)
++{
++ *ref = references;
++}
++
++/* Increment REF. Return the result of the operation. This function
++ uses atomic operations. It is not required to serialize calls to
++ this function. */
++static inline unsigned int
++refcount_ref (refcount_t *ref)
++{
++ unsigned int r;
++ r = __atomic_add_fetch (ref, 1, __ATOMIC_RELAXED);
++ assert (r != UINT_MAX);
++ return r;
++}
++
++/* Decrement REF. Return the result of the operation. This function
++ uses atomic operations. It is not required to serialize calls to
++ this function. */
++static inline unsigned int
++refcount_deref (refcount_t *ref)
++{
++ unsigned int r;
++ r = __atomic_sub_fetch (ref, 1, __ATOMIC_RELAXED);
++ assert (r != UINT_MAX);
++ return r;
++}
++
++/* Return REF. This function uses atomic operations. It is not
++ required to serialize calls to this function. */
++static inline unsigned int
++refcount_references (refcount_t *ref)
++{
++ return __atomic_load_n (ref, __ATOMIC_RELAXED);
++}
++
++/* Reference counting with weak references. */
++
++/* An opaque type. You must not access these values directly. */
++typedef union _references refcounts_t;
++
++/* Instead, the functions manipulating refcounts_t values write the
++ results into this kind of objects. */
++struct references {
++ uint32_t hard;
++ uint32_t weak;
++};
++
++/* We use a union to convert struct reference values to uint64_t which
++ we can manipulate atomically. While this behavior is not
++ guaranteed by the C standard, it is supported by all major
++ compilers. */
++union _references {
++ struct references references;
++ uint64_t value;
++};
++
++/* Initialize REF with HARD and WEAK references. */
++static inline void
++refcounts_init (refcounts_t *ref, uint32_t hard, uint32_t weak)
++{
++ ref->references = (struct references) { .hard = hard, .weak = weak };
++}
++
++/* Increment the hard reference count of REF. If RESULT is not NULL,
++ the result of the operation is written there. This function uses
++ atomic operations. It is not required to serialize calls to this
++ function. */
++static inline void
++refcounts_ref (refcounts_t *ref, struct references *result)
++{
++ const union _references op = { .references = { .hard = 1 } };
++ union _references r;
++ r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
++ assert (r.references.hard != UINT32_MAX);
++ if (result)
++ *result = r.references;
++}
++
++/* Decrement the hard reference count of REF. If RESULT is not NULL,
++ the result of the operation is written there. This function uses
++ atomic operations. It is not required to serialize calls to this
++ function. */
++static inline void
++refcounts_deref (refcounts_t *ref, struct references *result)
++{
++ const union _references op = { .references = { .hard = 1 } };
++ union _references r;
++ r.value = __atomic_sub_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
++ assert (r.references.hard != UINT32_MAX);
++ if (result)
++ *result = r.references;
++}
++
++/* Increment the weak reference count of REF. If RESULT is not NULL,
++ the result of the operation is written there. This function uses
++ atomic operations. It is not required to serialize calls to this
++ function. */
++static inline void
++refcounts_ref_weak (refcounts_t *ref, struct references *result)
++{
++ const union _references op = { .references = { .weak = 1 } };
++ union _references r;
++ r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
++ assert (r.references.weak != UINT32_MAX);
++ if (result)
++ *result = r.references;
++}
++
++/* Decrement the weak reference count of REF. If RESULT is not NULL,
++ the result of the operation is written there. This function uses
++ atomic operations. It is not required to serialize calls to this
++ function. */
++static inline void
++refcounts_deref_weak (refcounts_t *ref, struct references *result)
++{
++ const union _references op = { .references = { .weak = 1 } };
++ union _references r;
++ r.value = __atomic_sub_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
++ assert (r.references.weak != UINT32_MAX);
++ if (result)
++ *result = r.references;
++}
++
++/* Store the current reference counts of REF in RESULT. This function
++ uses atomic operations. It is not required to serialize calls to
++ this function. */
++static inline void
++refcounts_references (refcounts_t *ref, struct references *result)
++{
++ union _references r;
++ r.value =__atomic_load_n (&ref->value, __ATOMIC_RELAXED);
++ *result = r.references;
++}
++
++/* Return the hard reference count of REF. This function uses atomic
++ operations. It is not required to serialize calls to this
++ function. */
++static inline uint32_t
++refcounts_hard_references (refcounts_t *ref)
++{
++ struct references result;
++ refcounts_references (ref, &result);
++ return result.hard;
++}
++
++/* Return the weak reference count of REF. This function uses atomic
++ operations. It is not required to serialize calls to this
++ function. */
++static inline uint32_t
++refcounts_weak_references (refcounts_t *ref)
++{
++ struct references result;
++ refcounts_references (ref, &result);
++ return result.weak;
++}
++
++#endif /* _HURD_REFCOUNT_H_ */
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0006-libdiskfs-lock-less-reference-counting-for-peropen-o.patch b/debian/patches/0006-libdiskfs-lock-less-reference-counting-for-peropen-o.patch
new file mode 100644
index 00000000..d688b3e5
--- /dev/null
+++ b/debian/patches/0006-libdiskfs-lock-less-reference-counting-for-peropen-o.patch
@@ -0,0 +1,143 @@
+From 78fbd3e318474ea61bc31783bc471809910b9244 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 6 May 2014 18:58:10 +0200
+Subject: [PATCH 06/15] libdiskfs: lock-less reference counting for peropen
+ objects
+
+* libdiskfs/diskfs.h (struct peropen): Use refcount_t for field refcnt.
+* libdiskfs/peropen-make.c (diskfs_make_peropen): Initialize refcnt.
+* libdiskfs/peropen-rele.c (diskfs_release_peropen): Adjust accordingly.
+* libdiskfs/protid-make.c (diskfs_start_protid): Likewise. Also, the
+node must no longer be locked, adjust comment accordingly.
+(diskfs_create_protid): Likewise.
+---
+ libdiskfs/diskfs.h | 7 ++++---
+ libdiskfs/peropen-make.c | 2 +-
+ libdiskfs/peropen-rele.c | 21 ++++++++++-----------
+ libdiskfs/protid-make.c | 8 ++++----
+ 4 files changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/libdiskfs/diskfs.h b/libdiskfs/diskfs.h
+index 8151ddc..ae1a150 100644
+--- a/libdiskfs/diskfs.h
++++ b/libdiskfs/diskfs.h
+@@ -28,6 +28,7 @@
+ #include <hurd/iohelp.h>
+ #include <idvec.h>
+ #include <features.h>
++#include <refcount.h>
+
+ #ifdef DISKFS_DEFINE_EXTERN_INLINE
+ #define DISKFS_EXTERN_INLINE
+@@ -57,7 +58,7 @@ struct peropen
+ {
+ int filepointer;
+ int lock_status;
+- int refcnt;
++ refcount_t refcnt;
+ int openstat;
+
+ struct node *np;
+@@ -792,12 +793,12 @@ diskfs_create_node (struct node *dir, const char *name, mode_t mode,
+ struct dirstat *ds);
+
+ /* Create and return a protid for an existing peropen PO in CRED,
+- referring to user USER. The node PO->np must be locked. */
++ referring to user USER. */
+ error_t diskfs_create_protid (struct peropen *po, struct iouser *user,
+ struct protid **cred);
+
+ /* Build and return in CRED a protid which has no user identification, for
+- peropen PO. The node PO->np must be locked. */
++ peropen PO. */
+ error_t diskfs_start_protid (struct peropen *po, struct protid **cred);
+
+ /* Finish building protid CRED started with diskfs_start_protid;
+diff --git a/libdiskfs/peropen-make.c b/libdiskfs/peropen-make.c
+index eba037f..6d5ca01 100644
+--- a/libdiskfs/peropen-make.c
++++ b/libdiskfs/peropen-make.c
+@@ -31,7 +31,7 @@ diskfs_make_peropen (struct node *np, int flags, struct peropen *context,
+
+ po->filepointer = 0;
+ po->lock_status = LOCK_UN;
+- po->refcnt = 0;
++ refcount_init (&po->refcnt, 0);
+ po->openstat = flags;
+ po->np = np;
+ po->path = NULL;
+diff --git a/libdiskfs/peropen-rele.c b/libdiskfs/peropen-rele.c
+index d3f7492..877137b 100644
+--- a/libdiskfs/peropen-rele.c
++++ b/libdiskfs/peropen-rele.c
+@@ -22,13 +22,8 @@
+ void
+ diskfs_release_peropen (struct peropen *po)
+ {
+- pthread_mutex_lock (&po->np->lock);
+-
+- if (--po->refcnt)
+- {
+- pthread_mutex_unlock (&po->np->lock);
+- return;
+- }
++ if (refcount_deref (&po->refcnt) > 0)
++ return;
+
+ if (po->root_parent)
+ mach_port_deallocate (mach_task_self (), po->root_parent);
+@@ -40,10 +35,14 @@ diskfs_release_peropen (struct peropen *po)
+ mach_port_deallocate (mach_task_self (), po->shadow_root_parent);
+
+ if (po->lock_status != LOCK_UN)
+- fshelp_acquire_lock (&po->np->userlock, &po->lock_status,
+- &po->np->lock, LOCK_UN);
+-
+- diskfs_nput (po->np);
++ {
++ pthread_mutex_lock (&po->np->lock);
++ fshelp_acquire_lock (&po->np->userlock, &po->lock_status,
++ &po->np->lock, LOCK_UN);
++ diskfs_nput (po->np);
++ }
++ else
++ diskfs_nrele (po->np);
+
+ free (po->path);
+ free (po);
+diff --git a/libdiskfs/protid-make.c b/libdiskfs/protid-make.c
+index b39b92a..22aaa2e 100644
+--- a/libdiskfs/protid-make.c
++++ b/libdiskfs/protid-make.c
+@@ -20,7 +20,7 @@
+ #include <assert.h>
+
+ /* Build and return in CRED a protid which has no user identification, for
+- peropen PO. The node PO->np must be locked. */
++ peropen PO. */
+ error_t
+ diskfs_start_protid (struct peropen *po, struct protid **cred)
+ {
+@@ -29,7 +29,7 @@ diskfs_start_protid (struct peropen *po, struct protid **cred)
+ sizeof (struct protid), cred);
+ if (! err)
+ {
+- po->refcnt++;
++ refcount_ref (&po->refcnt);
+ (*cred)->po = po;
+ (*cred)->shared_object = MACH_PORT_NULL;
+ (*cred)->mapped = 0;
+@@ -55,8 +55,8 @@ diskfs_finish_protid (struct protid *cred, struct iouser *user)
+ assert_perror (err);
+ }
+
+-/* Create and return a protid for an existing peropen PO in CRED for USER.
+- The node PO->np must be locked. */
++/* Create and return a protid for an existing peropen PO in CRED for
++ USER. */
+ error_t
+ diskfs_create_protid (struct peropen *po, struct iouser *user,
+ struct protid **cred)
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0007-libtrivfs-lock-less-reference-counting-for-trivfs_pe.patch b/debian/patches/0007-libtrivfs-lock-less-reference-counting-for-trivfs_pe.patch
new file mode 100644
index 00000000..7e3bf100
--- /dev/null
+++ b/debian/patches/0007-libtrivfs-lock-less-reference-counting-for-trivfs_pe.patch
@@ -0,0 +1,172 @@
+From c4bd59b5b1bda56338215fd509e9414ec8c7f052 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 6 May 2014 19:07:13 +0200
+Subject: [PATCH 07/15] libtrivfs: lock-less reference counting for
+ trivfs_peropen objects
+
+* libtrivfs/trivfs.h (struct trivfs_peropen): Use refcount_t for field
+refcnt.
+(struct trivfs_control): Remove unused field lock.
+* libtrivfs/cntl-create.c (trivfs_create_control): Drop the mutex
+initialization.
+* libtrivfs/io-reauthenticate.c (trivfs_S_io_reauthenticate): Adjust
+accordingly.
+* libtrivfs/io-restrict-auth.c (trivfs_S_io_restrict_auth): Likewise.
+* libtrivfs/open.c (trivfs_open): Initialize refcnt.
+* libtrivfs/protid-clean.c (trivfs_clean_protid): Likewise.
+* libtrivfs/protid-dup.c (trivfs_protid_dup): Likewise.
+---
+ libtrivfs/cntl-create.c | 1 -
+ libtrivfs/io-reauthenticate.c | 5 +----
+ libtrivfs/io-restrict-auth.c | 4 +---
+ libtrivfs/open.c | 2 +-
+ libtrivfs/protid-clean.c | 26 +++++++++++++++-----------
+ libtrivfs/protid-dup.c | 5 +----
+ libtrivfs/trivfs.h | 4 ++--
+ 7 files changed, 21 insertions(+), 26 deletions(-)
+
+diff --git a/libtrivfs/cntl-create.c b/libtrivfs/cntl-create.c
+index 910daf3..eb9a834 100644
+--- a/libtrivfs/cntl-create.c
++++ b/libtrivfs/cntl-create.c
+@@ -85,7 +85,6 @@ trivfs_create_control (mach_port_t underlying,
+ }
+
+ (*control)->hook = 0;
+- pthread_mutex_init (&(*control)->lock, NULL);
+ }
+
+ out:
+diff --git a/libtrivfs/io-reauthenticate.c b/libtrivfs/io-reauthenticate.c
+index 7677697..df0ed2e 100644
+--- a/libtrivfs/io-reauthenticate.c
++++ b/libtrivfs/io-reauthenticate.c
+@@ -62,11 +62,8 @@ trivfs_S_io_reauthenticate (struct trivfs_protid *cred,
+ newcred->isroot = 1;
+
+ newcred->hook = cred->hook;
+-
+- pthread_mutex_lock (&cred->po->cntl->lock);
+ newcred->po = cred->po;
+- newcred->po->refcnt++;
+- pthread_mutex_unlock (&cred->po->cntl->lock);
++ refcount_ref (&newcred->po->refcnt);
+
+ do
+ err = io_restrict_auth (newcred->po->cntl->underlying, &newcred->realnode,
+diff --git a/libtrivfs/io-restrict-auth.c b/libtrivfs/io-restrict-auth.c
+index 65b4fd6..39670fe 100644
+--- a/libtrivfs/io-restrict-auth.c
++++ b/libtrivfs/io-restrict-auth.c
+@@ -110,10 +110,8 @@ trivfs_S_io_restrict_auth (struct trivfs_protid *cred,
+ }
+
+ newcred->isroot = 0;
+- pthread_mutex_lock (&cred->po->cntl->lock);
+ newcred->po = cred->po;
+- newcred->po->refcnt++;
+- pthread_mutex_unlock (&cred->po->cntl->lock);
++ refcount_ref (&newcred->po->refcnt);
+ if (cred->isroot && idvec_contains (user->uids, 0))
+ newcred->isroot = 1;
+ newcred->user = user;
+diff --git a/libtrivfs/open.c b/libtrivfs/open.c
+index f64d2ff..97e70a1 100644
+--- a/libtrivfs/open.c
++++ b/libtrivfs/open.c
+@@ -40,7 +40,7 @@ trivfs_open (struct trivfs_control *cntl,
+
+ ports_port_ref (cntl);
+
+- po->refcnt = 1;
++ refcount_init (&po->refcnt, 1);
+ po->cntl = cntl;
+ po->openmodes = flags;
+ po->hook = 0;
+diff --git a/libtrivfs/protid-clean.c b/libtrivfs/protid-clean.c
+index f98da6a..cce736d 100644
+--- a/libtrivfs/protid-clean.c
++++ b/libtrivfs/protid-clean.c
+@@ -31,19 +31,23 @@ trivfs_clean_protid (void *arg)
+ (*trivfs_protid_destroy_hook) (cred);
+
+ /* If we hold the only reference to the peropen, try to get rid of it. */
+- pthread_mutex_lock (&cntl->lock);
+- if (cred->po->refcnt == 1 && trivfs_peropen_destroy_hook)
++ if (refcount_deref (&cred->po->refcnt) == 0)
+ {
+- pthread_mutex_unlock (&cntl->lock);
+- (*trivfs_peropen_destroy_hook) (cred->po);
+- pthread_mutex_lock (&cntl->lock);
++ if (trivfs_peropen_destroy_hook)
++ {
++ /* Reaquire a reference while we call the hook. */
++ refcount_ref (&cred->po->refcnt);
++ (*trivfs_peropen_destroy_hook) (cred->po);
++ if (refcount_deref (&cred->po->refcnt) == 0)
++ goto free_po;
++ }
++ else
++ {
++ free_po:
++ ports_port_deref (cntl);
++ free (cred->po);
++ }
+ }
+- if (--cred->po->refcnt == 0)
+- {
+- ports_port_deref (cntl);
+- free (cred->po);
+- }
+- pthread_mutex_unlock (&cntl->lock);
+
+ iohelp_free_iouser (cred->user);
+
+diff --git a/libtrivfs/protid-dup.c b/libtrivfs/protid-dup.c
+index 6169603..75f3ca8 100644
+--- a/libtrivfs/protid-dup.c
++++ b/libtrivfs/protid-dup.c
+@@ -35,11 +35,8 @@ trivfs_protid_dup (struct trivfs_protid *cred, struct trivfs_protid **dup)
+
+ if (! err)
+ {
+- pthread_mutex_lock (&cred->po->cntl->lock);
+ new->po = cred->po;
+- new->po->refcnt++;
+- pthread_mutex_unlock (&cred->po->cntl->lock);
+-
++ refcount_ref (&new->po->refcnt);
+ new->isroot = cred->isroot;
+
+ err = iohelp_dup_iouser (&new->user, cred->user);
+diff --git a/libtrivfs/trivfs.h b/libtrivfs/trivfs.h
+index bb456ff..8902338 100644
+--- a/libtrivfs/trivfs.h
++++ b/libtrivfs/trivfs.h
+@@ -24,6 +24,7 @@
+ #include <mach/mach.h>
+ #include <hurd/ports.h>
+ #include <hurd/iohelp.h>
++#include <refcount.h>
+
+ struct trivfs_protid
+ {
+@@ -41,14 +42,13 @@ struct trivfs_peropen
+ {
+ void *hook; /* for user use */
+ int openmodes;
+- int refcnt;
++ refcount_t refcnt;
+ struct trivfs_control *cntl;
+ };
+
+ struct trivfs_control
+ {
+ struct port_info pi;
+- pthread_mutex_t lock;
+ struct port_class *protid_class;
+ struct port_bucket *protid_bucket;
+ mach_port_t filesys_id;
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0008-libports-use-a-single-hash-table.patch b/debian/patches/0008-libports-use-a-single-hash-table.patch
new file mode 100644
index 00000000..b76029cd
--- /dev/null
+++ b/debian/patches/0008-libports-use-a-single-hash-table.patch
@@ -0,0 +1,619 @@
+From 5d9f77f93862d2f404704071ccb79efdd506e2a7 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 3 May 2014 03:53:41 +0200
+Subject: [PATCH 08/15] libports: use a single hash table
+
+Previously, libports used a hash table per port bucket. This makes
+looking up a port difficult if one does not know the port bucket, as
+one has to iterate over all buckets and do a hash table lookup each.
+
+Having to iterate over the buckets makes it necessary to keep a list
+of all buckets, which has to be updated and protected by a lock as
+well.
+
+Also, the current code in _ports_bucket_class_iterate iterates over
+the hash table associated with the bucket given. When
+ports_class_iterate calls this common function, it obtains a reference
+to the bucket from one of the ports in the given class. This will not
+work if a class contains ports in different port buckets. This
+limitation is not documented as far as I can see. Again, having to
+maintain this list has its cost and requires serialization.
+
+Use a single hash table instead. Furthermore, serialize access to it
+using a separate lock. Remove the linked lists of all buckets and all
+ports in a class.
+
+* libports/bucket-iterate.c (ports_bucket_iterate): Acquire
+_ports_htable_lock. Also, generalize ports_bucket_iterate to treat
+the bucket argument the same way as the class argument.
+* libports/class-iterate.c (ports_class_iterate): Just call the
+generalized _ports_bucket_class_iterate with NULL as class.
+* libports/ports.h (struct port_info): Remove the port class links.
+(struct port_bucket): Remove the hash table, and the all buckets link.
+(_ports_all_buckets): Remove declaration.
+(_ports_htable): New global hash table.
+(_ports_htable_lock): Protected by this lock.
+* libports/claim-right.c: Adjust accordingly.
+* libports/complete-deallocate.c: Likewise.
+* libports/create-bucket.c: Likewise.
+* libports/create-class.c: Likewise.
+* libports/create-internal.c: Likewise.
+* libports/destroy-right.c: Likewise.
+* libports/import-port.c: Likewise.
+* libports/lookup-port.c: Likewise.
+* libports/reallocate-from-external.c: Likewise.
+* libports/reallocate-port.c: Likewise.
+* libports/transfer-right.c: Likewise.
+* libports/inhibit-all-rpcs.c: Iterate over the hash table.
+* libports/inhibit-bucket-rpcs.c: Likewise, but filter using bucket.
+* libports/inhibit-class-rpcs.c: Likewise, but filter using class.
+* libports/init.c (_ports_htable): Initialize.
+(_ports_htable_lock): Likewise.
+---
+ libports/bucket-iterate.c | 21 +++++++++++++++------
+ libports/claim-right.c | 6 ++++--
+ libports/class-iterate.c | 10 +---------
+ libports/complete-deallocate.c | 8 +++-----
+ libports/create-bucket.c | 7 -------
+ libports/create-class.c | 1 -
+ libports/create-internal.c | 9 +++------
+ libports/destroy-right.c | 6 +++---
+ libports/import-port.c | 9 +++------
+ libports/inhibit-all-rpcs.c | 27 +++++++++++++--------------
+ libports/inhibit-bucket-rpcs.c | 7 +++++--
+ libports/inhibit-class-rpcs.c | 27 ++++++++++++++++++---------
+ libports/init.c | 7 ++++++-
+ libports/lookup-port.c | 21 ++++++++-------------
+ libports/ports.h | 11 ++++++-----
+ libports/reallocate-from-external.c | 16 ++++++++++------
+ libports/reallocate-port.c | 10 +++++++---
+ libports/transfer-right.c | 20 +++++++++++++-------
+ 18 files changed, 118 insertions(+), 105 deletions(-)
+
+diff --git a/libports/bucket-iterate.c b/libports/bucket-iterate.c
+index babc204..d5f590e 100644
+--- a/libports/bucket-iterate.c
++++ b/libports/bucket-iterate.c
+@@ -36,35 +36,44 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
+ error_t err;
+
+ pthread_mutex_lock (&_ports_lock);
++ pthread_mutex_lock (&_ports_htable_lock);
+
+- if (bucket->htable.nr_items == 0)
++ if (_ports_htable.nr_items == 0)
+ {
+- pthread_mutex_unlock (&_ports_lock);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ return 0;
+ }
+
+- nr_items = bucket->htable.nr_items;
++ nr_items = _ports_htable.nr_items;
+ p = malloc (nr_items * sizeof *p);
+ if (p == NULL)
+ {
+- pthread_mutex_unlock (&_ports_lock);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ return ENOMEM;
+ }
+
+ n = 0;
+- HURD_IHASH_ITERATE (&bucket->htable, arg)
++ HURD_IHASH_ITERATE (&_ports_htable, arg)
+ {
+ struct port_info *const pi = arg;
+
+- if (class == 0 || pi->class == class)
++ if ((bucket == NULL || pi->bucket == bucket)
++ && (class == NULL || pi->class == class))
+ {
+ pi->refcnt++;
+ p[n] = pi;
+ n++;
+ }
+ }
++ pthread_mutex_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+
++ if (n == 0)
++ {
++ free (p);
++ return 0;
++ }
++
+ if (n != nr_items)
+ {
+ /* We allocated too much. Release unused memory. */
+diff --git a/libports/claim-right.c b/libports/claim-right.c
+index 4851ea3..f453a82 100644
+--- a/libports/claim-right.c
++++ b/libports/claim-right.c
+@@ -34,10 +34,12 @@ ports_claim_right (void *portstruct)
+ if (ret == MACH_PORT_NULL)
+ return ret;
+
+- pthread_mutex_lock (&_ports_lock);
+- hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ err = mach_port_move_member (mach_task_self (), ret, MACH_PORT_NULL);
+ assert_perror (err);
++ pthread_mutex_lock (&_ports_lock);
+ pi->port_right = MACH_PORT_NULL;
+ if (pi->flags & PORT_HAS_SENDRIGHTS)
+ {
+diff --git a/libports/class-iterate.c b/libports/class-iterate.c
+index 1f8878a..5cfcfc8 100644
+--- a/libports/class-iterate.c
++++ b/libports/class-iterate.c
+@@ -23,13 +23,5 @@ error_t
+ ports_class_iterate (struct port_class *class,
+ error_t (*fun)(void *))
+ {
+- pthread_mutex_lock (&_ports_lock);
+- if (class->ports != 0)
+- {
+- struct port_bucket *bucket = class->ports->bucket;
+- pthread_mutex_unlock (&_ports_lock);
+- return _ports_bucket_class_iterate (bucket, class, fun);
+- }
+- pthread_mutex_unlock (&_ports_lock);
+- return 0;
++ return _ports_bucket_class_iterate (NULL, class, fun);
+ }
+diff --git a/libports/complete-deallocate.c b/libports/complete-deallocate.c
+index 8ce095b..1371d92 100644
+--- a/libports/complete-deallocate.c
++++ b/libports/complete-deallocate.c
+@@ -29,16 +29,14 @@ _ports_complete_deallocate (struct port_info *pi)
+
+ if (pi->port_right)
+ {
+- hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ pi->port_right = MACH_PORT_NULL;
+ }
+
+- *pi->prevp = pi->next;
+- if (pi->next)
+- pi->next->prevp = pi->prevp;
+-
+ pi->bucket->count--;
+ pi->class->count--;
+
+diff --git a/libports/create-bucket.c b/libports/create-bucket.c
+index 52d50c3..9f6821d 100644
+--- a/libports/create-bucket.c
++++ b/libports/create-bucket.c
+@@ -46,13 +46,6 @@ ports_create_bucket ()
+ return NULL;
+ }
+
+- hurd_ihash_init (&ret->htable, offsetof (struct port_info, hentry));
+ ret->rpcs = ret->flags = ret->count = 0;
+-
+- pthread_mutex_lock (&_ports_lock);
+- ret->next = _ports_all_buckets;
+- _ports_all_buckets = ret;
+- pthread_mutex_unlock (&_ports_lock);
+-
+ return ret;
+ }
+diff --git a/libports/create-class.c b/libports/create-class.c
+index 12c8add..782f52b 100644
+--- a/libports/create-class.c
++++ b/libports/create-class.c
+@@ -39,7 +39,6 @@ ports_create_class (void (*clean_routine)(void *),
+ cl->dropweak_routine = dropweak_routine;
+ cl->flags = 0;
+ cl->rpcs = 0;
+- cl->ports = NULL;
+ cl->count = 0;
+ cl->uninhibitable_rpcs = ports_default_uninhibitable_rpcs;
+
+diff --git a/libports/create-internal.c b/libports/create-internal.c
+index 8551297..e773dd6 100644
+--- a/libports/create-internal.c
++++ b/libports/create-internal.c
+@@ -81,15 +81,12 @@ _ports_create_port_internal (struct port_class *class,
+ goto loop;
+ }
+
+- err = hurd_ihash_add (&bucket->htable, port, pi);
++ pthread_mutex_lock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, pi);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ if (err)
+ goto lose;
+
+- pi->next = class->ports;
+- pi->prevp = &class->ports;
+- if (class->ports)
+- class->ports->prevp = &pi->next;
+- class->ports = pi;
+ bucket->count++;
+ class->count++;
+ pthread_mutex_unlock (&_ports_lock);
+diff --git a/libports/destroy-right.c b/libports/destroy-right.c
+index 65e19c7..6ba7302 100644
+--- a/libports/destroy-right.c
++++ b/libports/destroy-right.c
+@@ -30,12 +30,12 @@ ports_destroy_right (void *portstruct)
+
+ if (pi->port_right != MACH_PORT_NULL)
+ {
+- pthread_mutex_lock (&_ports_lock);
+- hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ err = mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+- pthread_mutex_unlock (&_ports_lock);
+
+ pi->port_right = MACH_PORT_NULL;
+
+diff --git a/libports/import-port.c b/libports/import-port.c
+index 226f47e..d0b2ea4 100644
+--- a/libports/import-port.c
++++ b/libports/import-port.c
+@@ -75,15 +75,12 @@ ports_import_port (struct port_class *class, struct port_bucket *bucket,
+ goto loop;
+ }
+
+- err = hurd_ihash_add (&bucket->htable, port, pi);
++ pthread_mutex_lock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, pi);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ if (err)
+ goto lose;
+
+- pi->next = class->ports;
+- pi->prevp = &class->ports;
+- if (class->ports)
+- class->ports->prevp = &pi->next;
+- class->ports = pi;
+ bucket->count++;
+ class->count++;
+ pthread_mutex_unlock (&_ports_lock);
+diff --git a/libports/inhibit-all-rpcs.c b/libports/inhibit-all-rpcs.c
+index d4a54ba..83c291f 100644
+--- a/libports/inhibit-all-rpcs.c
++++ b/libports/inhibit-all-rpcs.c
+@@ -36,24 +36,23 @@ ports_inhibit_all_rpcs ()
+ struct port_bucket *bucket;
+ int this_one = 0;
+
+- for (bucket = _ports_all_buckets; bucket; bucket = bucket->next)
++ pthread_mutex_lock (&_ports_htable_lock);
++ HURD_IHASH_ITERATE (&_ports_htable, portstruct)
+ {
+- HURD_IHASH_ITERATE (&bucket->htable, portstruct)
++ struct rpc_info *rpc;
++ struct port_info *pi = portstruct;
++
++ for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+ {
+- struct rpc_info *rpc;
+- struct port_info *pi = portstruct;
+-
+- for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+- {
+- /* Avoid cancelling the calling thread if it's currently
+- handling a RPC. */
+- if (rpc->thread == hurd_thread_self ())
+- this_one = 1;
+- else
+- hurd_thread_cancel (rpc->thread);
+- }
++ /* Avoid cancelling the calling thread if it's currently
++ handling a RPC. */
++ if (rpc->thread == hurd_thread_self ())
++ this_one = 1;
++ else
++ hurd_thread_cancel (rpc->thread);
+ }
+ }
++ pthread_mutex_unlock (&_ports_htable_lock);
+
+ while (_ports_total_rpcs > this_one)
+ {
+diff --git a/libports/inhibit-bucket-rpcs.c b/libports/inhibit-bucket-rpcs.c
+index 965aa03..ed3e29d 100644
+--- a/libports/inhibit-bucket-rpcs.c
++++ b/libports/inhibit-bucket-rpcs.c
+@@ -35,10 +35,13 @@ ports_inhibit_bucket_rpcs (struct port_bucket *bucket)
+ {
+ int this_one = 0;
+
+- HURD_IHASH_ITERATE (&bucket->htable, portstruct)
++ pthread_mutex_lock (&_ports_htable_lock);
++ HURD_IHASH_ITERATE (&_ports_htable, portstruct)
+ {
+ struct rpc_info *rpc;
+ struct port_info *pi = portstruct;
++ if (pi->bucket != bucket)
++ continue;
+
+ for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+ {
+@@ -49,7 +52,7 @@ ports_inhibit_bucket_rpcs (struct port_bucket *bucket)
+ hurd_thread_cancel (rpc->thread);
+ }
+ }
+-
++ pthread_mutex_unlock (&_ports_htable_lock);
+
+ while (bucket->rpcs > this_one)
+ {
+diff --git a/libports/inhibit-class-rpcs.c b/libports/inhibit-class-rpcs.c
+index 7ee8653..1580bdb 100644
+--- a/libports/inhibit-class-rpcs.c
++++ b/libports/inhibit-class-rpcs.c
+@@ -36,15 +36,24 @@ ports_inhibit_class_rpcs (struct port_class *class)
+ struct rpc_info *rpc;
+ int this_one = 0;
+
+- for (pi = class->ports; pi; pi = pi->next)
+- for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
+- {
+- /* Avoid cancelling the calling thread. */
+- if (rpc->thread == hurd_thread_self ())
+- this_one = 1;
+- else
+- hurd_thread_cancel (rpc->thread);
+- }
++ pthread_mutex_lock (&_ports_htable_lock);
++ HURD_IHASH_ITERATE (&_ports_htable, portstruct)
++ {
++ struct rpc_info *rpc;
++ struct port_info *pi = portstruct;
++ if (pi->class != class)
++ continue;
++
++ for (rpc = pi->current_rpcs; rpc; rpc = rpc->next)
++ {
++ /* Avoid cancelling the calling thread. */
++ if (rpc->thread == hurd_thread_self ())
++ this_one = 1;
++ else
++ hurd_thread_cancel (rpc->thread);
++ }
++ }
++ pthread_mutex_unlock (&_ports_htable_lock);
+
+ while (class->rpcs > this_one)
+ {
+diff --git a/libports/init.c b/libports/init.c
+index 3ef5388..8fbc7e8 100644
+--- a/libports/init.c
++++ b/libports/init.c
+@@ -19,9 +19,14 @@
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+ #include "ports.h"
++#include <stddef.h>
+
+ pthread_mutex_t _ports_lock = PTHREAD_MUTEX_INITIALIZER;
+ pthread_cond_t _ports_block = PTHREAD_COND_INITIALIZER;
+-struct port_bucket *_ports_all_buckets;
++
++struct hurd_ihash _ports_htable =
++ HURD_IHASH_INITIALIZER (offsetof (struct port_info, hentry));
++pthread_mutex_t _ports_htable_lock = PTHREAD_MUTEX_INITIALIZER;
++
+ int _ports_total_rpcs;
+ int _ports_flags;
+diff --git a/libports/lookup-port.c b/libports/lookup-port.c
+index f79f6f0..fbb13ef 100644
+--- a/libports/lookup-port.c
++++ b/libports/lookup-port.c
+@@ -26,26 +26,21 @@ ports_lookup_port (struct port_bucket *bucket,
+ mach_port_t port,
+ struct port_class *class)
+ {
+- struct port_info *pi = 0;
+-
++ struct port_info *pi;
++
+ pthread_mutex_lock (&_ports_lock);
++ pthread_mutex_lock (&_ports_htable_lock);
+
+- if (bucket)
+- pi = hurd_ihash_find (&bucket->htable, port);
+- else
+- for (bucket = _ports_all_buckets; bucket; bucket = bucket->next)
+- {
+- pi = hurd_ihash_find (&bucket->htable, port);
+- if (pi)
+- break;
+- }
+-
+- if (pi && class && pi->class != class)
++ pi = hurd_ihash_find (&_ports_htable, port);
++ if (pi
++ && ((class && pi->class != class)
++ || (bucket && pi->bucket != bucket)))
+ pi = 0;
+
+ if (pi)
+ pi->refcnt++;
+
++ pthread_mutex_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+
+ return pi;
+diff --git a/libports/ports.h b/libports/ports.h
+index 7f13124..9c5f14d 100644
+--- a/libports/ports.h
++++ b/libports/ports.h
+@@ -48,7 +48,6 @@ struct port_info
+ struct rpc_info *current_rpcs;
+ struct port_bucket *bucket;
+ hurd_ihash_locp_t hentry;
+- struct port_info *next, **prevp; /* links on port_class list */
+ };
+ typedef struct port_info *port_info_t;
+
+@@ -61,11 +60,9 @@ typedef struct port_info *port_info_t;
+ struct port_bucket
+ {
+ mach_port_t portset;
+- struct hurd_ihash htable;
+ int rpcs;
+ int flags;
+ int count;
+- struct port_bucket *next;
+ };
+ /* FLAGS above are the following: */
+ #define PORT_BUCKET_INHIBITED PORTS_INHIBITED
+@@ -78,7 +75,6 @@ struct port_class
+ {
+ int flags;
+ int rpcs;
+- struct port_info *ports;
+ int count;
+ void (*clean_routine) (void *);
+ void (*dropweak_routine) (void *);
+@@ -402,7 +398,12 @@ extern kern_return_t
+ /* Private data */
+ extern pthread_mutex_t _ports_lock;
+ extern pthread_cond_t _ports_block;
+-extern struct port_bucket *_ports_all_buckets;
++
++/* A hash table mapping port names to port_info objects. */
++extern struct hurd_ihash _ports_htable;
++/* Access to the hash table is protected by this lock. */
++extern pthread_mutex_t _ports_htable_lock;
++
+ extern int _ports_total_rpcs;
+ extern int _ports_flags;
+ #define _PORTS_INHIBITED PORTS_INHIBITED
+diff --git a/libports/reallocate-from-external.c b/libports/reallocate-from-external.c
+index 8cccb2a..bbcf9ba 100644
+--- a/libports/reallocate-from-external.c
++++ b/libports/reallocate-from-external.c
+@@ -43,8 +43,10 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+
+- hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
+-
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
++
+ if ((pi->flags & PORT_HAS_SENDRIGHTS) && !stat.mps_srights)
+ {
+ dropref = 1;
+@@ -59,11 +61,13 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ pi->port_right = receive;
+ pi->cancel_threshold = 0;
+ pi->mscount = stat.mps_mscount;
+-
+- err = hurd_ihash_add (&pi->bucket->htable, receive, pi);
+- assert_perror (err);
++
++ pthread_mutex_lock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, receive, pi);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
+-
++ assert_perror (err);
++
+ mach_port_move_member (mach_task_self (), receive, pi->bucket->portset);
+
+ if (stat.mps_srights)
+diff --git a/libports/reallocate-port.c b/libports/reallocate-port.c
+index d2adaeb..99c4041 100644
+--- a/libports/reallocate-port.c
++++ b/libports/reallocate-port.c
+@@ -36,7 +36,9 @@ ports_reallocate_port (void *portstruct)
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+
+- hurd_ihash_locp_remove (&pi->bucket->htable, pi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+
+ err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE,
+ &pi->port_right);
+@@ -48,9 +50,11 @@ ports_reallocate_port (void *portstruct)
+ }
+ pi->cancel_threshold = 0;
+ pi->mscount = 0;
+- err = hurd_ihash_add (&pi->bucket->htable, pi->port_right, pi);
+- assert_perror (err);
++ pthread_mutex_lock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, pi->port_right, pi);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ pthread_mutex_unlock (&_ports_lock);
++ assert_perror (err);
+
+ err = mach_port_move_member (mach_task_self (), pi->port_right,
+ pi->bucket->portset);
+diff --git a/libports/transfer-right.c b/libports/transfer-right.c
+index 72488a9..5a7653d 100644
+--- a/libports/transfer-right.c
++++ b/libports/transfer-right.c
+@@ -41,7 +41,9 @@ ports_transfer_right (void *tostruct,
+ port = frompi->port_right;
+ if (port != MACH_PORT_NULL)
+ {
+- hurd_ihash_locp_remove (&frompi->bucket->htable, frompi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, frompi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ frompi->port_right = MACH_PORT_NULL;
+ if (frompi->flags & PORT_HAS_SENDRIGHTS)
+ {
+@@ -54,7 +56,9 @@ ports_transfer_right (void *tostruct,
+ /* Destroy the existing right in TOPI. */
+ if (topi->port_right != MACH_PORT_NULL)
+ {
+- hurd_ihash_locp_remove (&topi->bucket->htable, topi->hentry);
++ pthread_mutex_lock (&_ports_htable_lock);
++ hurd_ihash_locp_remove (&_ports_htable, topi->hentry);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ err = mach_port_mod_refs (mach_task_self (), topi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ assert_perror (err);
+@@ -74,10 +78,14 @@ ports_transfer_right (void *tostruct,
+ topi->port_right = port;
+ topi->cancel_threshold = frompi->cancel_threshold;
+ topi->mscount = frompi->mscount;
+-
++
++ pthread_mutex_unlock (&_ports_lock);
++
+ if (port)
+ {
+- err = hurd_ihash_add (&topi->bucket->htable, port, topi);
++ pthread_mutex_lock (&_ports_htable_lock);
++ err = hurd_ihash_add (&_ports_htable, port, topi);
++ pthread_mutex_unlock (&_ports_htable_lock);
+ assert_perror (err);
+ if (topi->bucket != frompi->bucket)
+ {
+@@ -86,9 +94,7 @@ ports_transfer_right (void *tostruct,
+ assert_perror (err);
+ }
+ }
+-
+- pthread_mutex_unlock (&_ports_lock);
+-
++
+ /* Take care of any lowered reference counts. */
+ if (dereffrompi)
+ ports_port_deref (frompi);
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0009-libports-lock-less-reference-counting-for-port_info-.patch b/debian/patches/0009-libports-lock-less-reference-counting-for-port_info-.patch
new file mode 100644
index 00000000..6c0f4eeb
--- /dev/null
+++ b/debian/patches/0009-libports-lock-less-reference-counting-for-port_info-.patch
@@ -0,0 +1,345 @@
+From 6f5ee0a47ed6bade01f30ceda2be793bcfbb8661 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sat, 3 May 2014 01:02:35 +0200
+Subject: [PATCH 09/15] libports: lock-less reference counting for port_info
+ objects
+
+* libports/ports.h (struct port_info): Use the new type.
+* libports/lookup-port.c: No need to lock _ports_lock anymore.
+* libports/bucket-iterate.c: Likewise.
+* libports/complete-deallocate.c: Check if someone reacquired a
+reference through a hash table lookup.
+* libports/create-internal.c: Use the new reference counting primitives.
+* libports/get-right.c: Likewise.
+* libports/import-port.c: Likewise.
+* libports/port-deref-weak.c: Likewise.
+* libports/port-deref.c: Likewise.
+* libports/port-ref-weak.c: Likewise.
+* libports/port-ref.c: Likewise.
+* libports/reallocate-from-external.c: Likewise.
+* libports/transfer-right.c: Likewise.
+* utils/rpctrace.c: Likewise.
+---
+ libports/bucket-iterate.c | 4 +---
+ libports/complete-deallocate.c | 15 +++++++++++++++
+ libports/create-internal.c | 3 +--
+ libports/get-right.c | 2 +-
+ libports/import-port.c | 3 +--
+ libports/lookup-port.c | 6 ++----
+ libports/port-deref-weak.c | 10 +++-------
+ libports/port-deref.c | 34 ++++++++++++++++------------------
+ libports/port-ref-weak.c | 8 +++-----
+ libports/port-ref.c | 8 +++-----
+ libports/ports.h | 4 ++--
+ libports/reallocate-from-external.c | 2 +-
+ libports/transfer-right.c | 2 +-
+ utils/rpctrace.c | 10 ++++++++--
+ 14 files changed, 58 insertions(+), 53 deletions(-)
+
+diff --git a/libports/bucket-iterate.c b/libports/bucket-iterate.c
+index d5f590e..2a2b2b6 100644
+--- a/libports/bucket-iterate.c
++++ b/libports/bucket-iterate.c
+@@ -35,7 +35,6 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
+ size_t i, n, nr_items;
+ error_t err;
+
+- pthread_mutex_lock (&_ports_lock);
+ pthread_mutex_lock (&_ports_htable_lock);
+
+ if (_ports_htable.nr_items == 0)
+@@ -60,13 +59,12 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
+ if ((bucket == NULL || pi->bucket == bucket)
+ && (class == NULL || pi->class == class))
+ {
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ p[n] = pi;
+ n++;
+ }
+ }
+ pthread_mutex_unlock (&_ports_htable_lock);
+- pthread_mutex_unlock (&_ports_lock);
+
+ if (n == 0)
+ {
+diff --git a/libports/complete-deallocate.c b/libports/complete-deallocate.c
+index 1371d92..6c9074a 100644
+--- a/libports/complete-deallocate.c
++++ b/libports/complete-deallocate.c
+@@ -29,14 +29,29 @@ _ports_complete_deallocate (struct port_info *pi)
+
+ if (pi->port_right)
+ {
++ struct references result;
++
+ pthread_mutex_lock (&_ports_htable_lock);
++
++ refcounts_references (&pi->refcounts, &result);
++ if (result.hard > 0 || result.weak > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&_ports_htable_lock);
++ return;
++ }
++
+ hurd_ihash_locp_remove (&_ports_htable, pi->hentry);
+ pthread_mutex_unlock (&_ports_htable_lock);
++
+ mach_port_mod_refs (mach_task_self (), pi->port_right,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+ pi->port_right = MACH_PORT_NULL;
+ }
+
++ pthread_mutex_lock (&_ports_lock);
++
+ pi->bucket->count--;
+ pi->class->count--;
+
+diff --git a/libports/create-internal.c b/libports/create-internal.c
+index e773dd6..9e3824d 100644
+--- a/libports/create-internal.c
++++ b/libports/create-internal.c
+@@ -54,8 +54,7 @@ _ports_create_port_internal (struct port_class *class,
+ }
+
+ pi->class = class;
+- pi->refcnt = 1;
+- pi->weakrefcnt = 0;
++ refcounts_init (&pi->refcounts, 1, 0);
+ pi->cancel_threshold = 0;
+ pi->mscount = 0;
+ pi->flags = 0;
+diff --git a/libports/get-right.c b/libports/get-right.c
+index 89050c6..8681f46 100644
+--- a/libports/get-right.c
++++ b/libports/get-right.c
+@@ -41,7 +41,7 @@ ports_get_right (void *port)
+ if ((pi->flags & PORT_HAS_SENDRIGHTS) == 0)
+ {
+ pi->flags |= PORT_HAS_SENDRIGHTS;
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ err = mach_port_request_notification (mach_task_self (),
+ pi->port_right,
+ MACH_NOTIFY_NO_SENDERS,
+diff --git a/libports/import-port.c b/libports/import-port.c
+index d0b2ea4..e250b0e 100644
+--- a/libports/import-port.c
++++ b/libports/import-port.c
+@@ -48,8 +48,7 @@ ports_import_port (struct port_class *class, struct port_bucket *bucket,
+ return ENOMEM;
+
+ pi->class = class;
+- pi->refcnt = 1 + !!stat.mps_srights;
+- pi->weakrefcnt = 0;
++ refcounts_init (&pi->refcounts, 1 + !!stat.mps_srights, 0);
+ pi->cancel_threshold = 0;
+ pi->mscount = stat.mps_mscount;
+ pi->flags = stat.mps_srights ? PORT_HAS_SENDRIGHTS : 0;
+diff --git a/libports/lookup-port.c b/libports/lookup-port.c
+index fbb13ef..1bf012f 100644
+--- a/libports/lookup-port.c
++++ b/libports/lookup-port.c
+@@ -28,7 +28,6 @@ ports_lookup_port (struct port_bucket *bucket,
+ {
+ struct port_info *pi;
+
+- pthread_mutex_lock (&_ports_lock);
+ pthread_mutex_lock (&_ports_htable_lock);
+
+ pi = hurd_ihash_find (&_ports_htable, port);
+@@ -38,10 +37,9 @@ ports_lookup_port (struct port_bucket *bucket,
+ pi = 0;
+
+ if (pi)
+- pi->refcnt++;
++ ports_port_ref (pi);
+
+ pthread_mutex_unlock (&_ports_htable_lock);
+- pthread_mutex_unlock (&_ports_lock);
+-
++
+ return pi;
+ }
+diff --git a/libports/port-deref-weak.c b/libports/port-deref-weak.c
+index beb4842..8432660 100644
+--- a/libports/port-deref-weak.c
++++ b/libports/port-deref-weak.c
+@@ -25,12 +25,8 @@ void
+ ports_port_deref_weak (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->weakrefcnt);
+- pi->weakrefcnt--;
+- if (pi->refcnt == 0 && pi->weakrefcnt == 0)
++ struct references result;
++ refcounts_deref_weak (&pi->refcounts, &result);
++ if (result.hard == 0 && result.weak == 0)
+ _ports_complete_deallocate (pi);
+- else
+- pthread_mutex_unlock (&_ports_lock);
+ }
+diff --git a/libports/port-deref.c b/libports/port-deref.c
+index cf9b238..dd38f55 100644
+--- a/libports/port-deref.c
++++ b/libports/port-deref.c
+@@ -25,26 +25,24 @@ void
+ ports_port_deref (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+- int trieddroppingweakrefs = 0;
+-
+- retry:
+-
+- pthread_mutex_lock (&_ports_lock);
+-
+- if (pi->refcnt == 1 && pi->weakrefcnt
+- && pi->class->dropweak_routine && !trieddroppingweakrefs)
++ struct references result;
++
++ /* If we need to call the dropweak routine, we need to hold one
++ reference while doing so. We use a weak reference for this
++ purpose, which we acquire before we release our hard reference.
++ The order is important here to prevent a race. */
++ if (pi->class->dropweak_routine)
++ refcounts_ref_weak (&pi->refcounts, NULL);
++
++ refcounts_deref (&pi->refcounts, &result);
++
++ if (pi->class->dropweak_routine)
+ {
+- pthread_mutex_unlock (&_ports_lock);
+- (*pi->class->dropweak_routine) (pi);
+- trieddroppingweakrefs = 1;
+- goto retry;
++ if (result.hard == 0 && result.weak > 1)
++ (*pi->class->dropweak_routine) (pi);
++ refcounts_deref_weak (&pi->refcounts, &result);
+ }
+-
+- assert (pi->refcnt);
+
+- pi->refcnt--;
+- if (pi->refcnt == 0 && pi->weakrefcnt == 0)
++ if (result.hard == 0 && result.weak == 0)
+ _ports_complete_deallocate (pi);
+- else
+- pthread_mutex_unlock (&_ports_lock);
+ }
+diff --git a/libports/port-ref-weak.c b/libports/port-ref-weak.c
+index c7d3c69..e4b7fc8 100644
+--- a/libports/port-ref-weak.c
++++ b/libports/port-ref-weak.c
+@@ -25,9 +25,7 @@ void
+ ports_port_ref_weak (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->refcnt || pi->weakrefcnt);
+- pi->weakrefcnt++;
+- pthread_mutex_unlock (&_ports_lock);
++ struct references result;
++ refcounts_ref_weak (&pi->refcounts, &result);
++ assert (result.hard > 0 || result.weak > 1);
+ }
+diff --git a/libports/port-ref.c b/libports/port-ref.c
+index 92b7118..761c50f 100644
+--- a/libports/port-ref.c
++++ b/libports/port-ref.c
+@@ -25,9 +25,7 @@ void
+ ports_port_ref (void *portstruct)
+ {
+ struct port_info *pi = portstruct;
+-
+- pthread_mutex_lock (&_ports_lock);
+- assert (pi->refcnt || pi->weakrefcnt);
+- pi->refcnt++;
+- pthread_mutex_unlock (&_ports_lock);
++ struct references result;
++ refcounts_ref (&pi->refcounts, &result);
++ assert (result.hard > 1 || result.weak > 0);
+ }
+diff --git a/libports/ports.h b/libports/ports.h
+index 9c5f14d..116e920 100644
+--- a/libports/ports.h
++++ b/libports/ports.h
+@@ -27,6 +27,7 @@
+ #include <hurd/ihash.h>
+ #include <mach/notify.h>
+ #include <pthread.h>
++#include <refcount.h>
+
+ /* These are global values for common flags used in the various structures.
+ Not all of these are meaningful in all flag fields. */
+@@ -39,8 +40,7 @@
+ struct port_info
+ {
+ struct port_class *class;
+- int refcnt;
+- int weakrefcnt;
++ refcounts_t refcounts;
+ mach_port_mscount_t mscount;
+ mach_msg_seqno_t cancel_threshold;
+ int flags;
+diff --git a/libports/reallocate-from-external.c b/libports/reallocate-from-external.c
+index bbcf9ba..5ee579d 100644
+--- a/libports/reallocate-from-external.c
++++ b/libports/reallocate-from-external.c
+@@ -55,7 +55,7 @@ ports_reallocate_from_external (void *portstruct, mach_port_t receive)
+ else if (((pi->flags & PORT_HAS_SENDRIGHTS) == 0) && stat.mps_srights)
+ {
+ pi->flags |= PORT_HAS_SENDRIGHTS;
+- pi->refcnt++;
++ refcounts_ref (&pi->refcounts, NULL);
+ }
+
+ pi->port_right = receive;
+diff --git a/libports/transfer-right.c b/libports/transfer-right.c
+index 5a7653d..d3ff0f4 100644
+--- a/libports/transfer-right.c
++++ b/libports/transfer-right.c
+@@ -70,7 +70,7 @@ ports_transfer_right (void *tostruct,
+ else if (((topi->flags & PORT_HAS_SENDRIGHTS) == 0) && hassendrights)
+ {
+ topi->flags |= PORT_HAS_SENDRIGHTS;
+- topi->refcnt++;
++ refcounts_ref (&topi->refcounts, NULL);
+ }
+ }
+
+diff --git a/utils/rpctrace.c b/utils/rpctrace.c
+index fc913e3..b11fea4 100644
+--- a/utils/rpctrace.c
++++ b/utils/rpctrace.c
+@@ -431,7 +431,9 @@ destroy_receiver_info (struct receiver_info *info)
+ while (send_wrapper)
+ {
+ struct sender_info *next = send_wrapper->next;
+- assert (TRACED_INFO (send_wrapper)->pi.refcnt == 1);
++ assert (
++ refcounts_hard_references (&TRACED_INFO (send_wrapper)->pi.refcounts)
++ == 1);
+ /* Reset the receive_right of the send wrapper in advance to avoid
+ * destroy_receiver_info is called when the port info is destroyed. */
+ send_wrapper->receive_right = NULL;
+@@ -848,7 +850,11 @@ rewrite_right (mach_port_t *right, mach_msg_type_name_t *type,
+ hurd_ihash_locp_remove (&traced_names, receiver_info->locp);
+
+ send_wrapper2 = get_send_wrapper (receiver_info, dest, &rr);
+- assert (TRACED_INFO (send_wrapper2)->pi.refcnt == 1);
++ assert (
++ refcounts_hard_references (
++ &TRACED_INFO (send_wrapper2)->pi.refcounts)
++ == 1);
++
+ name = TRACED_INFO (send_wrapper2)->name;
+ TRACED_INFO (send_wrapper2)->name = NULL;
+ /* send_wrapper2 isn't destroyed normally, so we need to unlink
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0010-ext2fs-improve-enable-disable-_caching.patch b/debian/patches/0010-ext2fs-improve-enable-disable-_caching.patch
new file mode 100644
index 00000000..8cf1fabe
--- /dev/null
+++ b/debian/patches/0010-ext2fs-improve-enable-disable-_caching.patch
@@ -0,0 +1,38 @@
+From 283c322efe55cf00f25cabaa22d48078b94c526e Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 9 May 2014 10:07:28 +0200
+Subject: [PATCH 10/15] ext2fs: improve {enable,disable}_caching
+
+* ext2fs/pager.c (enable_caching, disable_caching): Iterate over the
+pager class instead of over both pager buckets.
+---
+ ext2fs/pager.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/ext2fs/pager.c b/ext2fs/pager.c
+index 017efcc..6328f3b 100644
+--- a/ext2fs/pager.c
++++ b/ext2fs/pager.c
+@@ -1409,8 +1409,7 @@ disable_caching ()
+
+ /* Loop through the pagers and turn off caching one by one,
+ synchronously. That should cause termination of each pager. */
+- ports_bucket_iterate (disk_pager_bucket, block_cache);
+- ports_bucket_iterate (file_pager_bucket, block_cache);
++ ports_class_iterate (_pager_class, block_cache);
+ }
+
+ static void
+@@ -1438,8 +1437,7 @@ enable_caching ()
+ return 0;
+ }
+
+- ports_bucket_iterate (disk_pager_bucket, enable_cache);
+- ports_bucket_iterate (file_pager_bucket, enable_cache);
++ ports_class_iterate (_pager_class, enable_cache);
+ }
+
+ /* Tell diskfs if there are pagers exported, and if none, then
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0011-fatfs-improve-enable-disable-_caching.patch b/debian/patches/0011-fatfs-improve-enable-disable-_caching.patch
new file mode 100644
index 00000000..45b3f45a
--- /dev/null
+++ b/debian/patches/0011-fatfs-improve-enable-disable-_caching.patch
@@ -0,0 +1,48 @@
+From 555a58f92cfcf75c5650ef1d96080c512a511f64 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Fri, 9 May 2014 10:11:45 +0200
+Subject: [PATCH 11/15] fatfs: improve {enable,disable}_caching
+
+* fatfs/pager.c (enable_caching, disable_caching): Iterate over the
+pager class instead of over both pager buckets.
+---
+ fatfs/pager.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/fatfs/pager.c b/fatfs/pager.c
+index f855ecf..7aa5c5e 100644
+--- a/fatfs/pager.c
++++ b/fatfs/pager.c
+@@ -23,6 +23,9 @@
+ #include <hurd/store.h>
+ #include "fatfs.h"
+
++/* XXX */
++#include "../libpager/priv.h"
++
+ /* A ports bucket to hold disk pager ports. */
+ struct port_bucket *disk_pager_bucket;
+
+@@ -963,8 +966,7 @@ disable_caching ()
+
+ /* Loop through the pagers and turn off caching one by one,
+ synchronously. That should cause termination of each pager. */
+- ports_bucket_iterate (disk_pager_bucket, block_cache);
+- ports_bucket_iterate (file_pager_bucket, block_cache);
++ ports_class_iterate (_pager_class, block_cache);
+ }
+
+ static void
+@@ -992,8 +994,7 @@ enable_caching ()
+ return 0;
+ }
+
+- ports_bucket_iterate (disk_pager_bucket, enable_cache);
+- ports_bucket_iterate (file_pager_bucket, enable_cache);
++ ports_class_iterate (_pager_class, enable_cache);
+ }
+
+ /* Tell diskfs if there are pagers exported, and if none, then
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0012-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0012-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
new file mode 100644
index 00000000..20e9d5f3
--- /dev/null
+++ b/debian/patches/0012-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
@@ -0,0 +1,190 @@
+From 72937c396cc7a51e5aff5f79ee60e092f54f135c Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 13:09:15 +0200
+Subject: [PATCH 12/15] ext2fs: use a seperate lock to protect nodehash
+
+Previously, ext2fs used diskfs_node_refcnt_lock to serialize access to
+the nodehash.
+
+Use a separate lock to protect nodehash. Adjust the reference
+counting accordingly. Every node in the nodehash carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the nodehash.
+
+* ext2fs/inode.c (nodehash_lock): New lock.
+(diskfs_cached_lookup): Use a separate lock to protect nodehash.
+Adjust the reference counting accordingly.
+(ifind): Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from nodehash...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ ext2fs/inode.c | 68 ++++++++++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 50 insertions(+), 18 deletions(-)
+
+diff --git a/ext2fs/inode.c b/ext2fs/inode.c
+index ed78265..aa070a2 100644
+--- a/ext2fs/inode.c
++++ b/ext2fs/inode.c
+@@ -46,8 +46,18 @@
+ #define INOHASH(ino) (((unsigned)(ino))%INOHSZ)
+ #endif
+
++/* The nodehash is a cache of nodes.
++
++ Access to nodehash and nodehash_nr_items is protected by
++ nodehash_lock.
++
++ Every node in the nodehash carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the nodehash. */
+ static struct node *nodehash[INOHSZ];
+ static size_t nodehash_nr_items;
++static pthread_mutex_t nodehash_lock = PTHREAD_MUTEX_INITIALIZER;
+
+ static error_t read_node (struct node *np);
+
+@@ -71,12 +81,12 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_mutex_unlock (&nodehash_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+@@ -86,7 +96,7 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ return ENOMEM;
+ }
+ dn->dirents = 0;
+@@ -107,9 +117,10 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+
+ /* Get the contents of NP off disk. */
+ err = read_node (np);
+@@ -140,14 +151,13 @@ ifind (ino_t inum)
+ {
+ struct node *np;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ {
+ if (np->cache_id != inum)
+ continue;
+
+- assert (np->references);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ return np;
+ }
+ assert (0);
+@@ -158,11 +168,6 @@ ifind (ino_t inum)
+ void
+ diskfs_node_norefs (struct node *np)
+ {
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- nodehash_nr_items -= 1;
+-
+ if (np->dn->dirents)
+ free (np->dn->dirents);
+ assert (!np->dn->pager);
+@@ -180,6 +185,33 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_mutex_lock (&nodehash_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ nodehash. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&nodehash_lock);
++ return;
++ }
++
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ np->dn->hnext = NULL;
++ nodehash_nr_items -= 1;
++ diskfs_nrele_light (np);
++ }
++ pthread_mutex_unlock (&nodehash_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+@@ -556,12 +588,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ nodehash_lock, but we can't hold this while locking the
+ individual node locks). */
+ num_nodes = nodehash_nr_items;
+
+@@ -570,7 +602,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ node_list = malloc (num_nodes * sizeof (struct node *));
+ if (node_list == NULL)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ ext2_debug ("unable to allocate temporary node table");
+ return ENOMEM;
+ }
+@@ -580,10 +612,10 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ for (node = nodehash[n]; node; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0013-fatfs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0013-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
new file mode 100644
index 00000000..a7918b0c
--- /dev/null
+++ b/debian/patches/0013-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
@@ -0,0 +1,222 @@
+From 664606b8d183455a6195ac954d78571af744362d Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:14:53 +0200
+Subject: [PATCH 13/15] fatfs: use a seperate lock to protect nodehash
+
+Previously, fatfs used diskfs_node_refcnt_lock to serialize access to
+the nodehash.
+
+Use a separate lock to protect nodehash. Adjust the reference
+counting accordingly. Every node in the nodehash carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the nodehash.
+
+* fatfs/inode.c (nodehash_lock): New lock.
+(diskfs_cached_lookup): Use a separate lock to protect nodehash.
+Adjust the reference counting accordingly.
+(ifind): Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from nodehash...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ fatfs/inode.c | 81 +++++++++++++++++++++++++++++++++++++++++------------------
+ 1 file changed, 57 insertions(+), 24 deletions(-)
+
+diff --git a/fatfs/inode.c b/fatfs/inode.c
+index ed6f3f0..8b3385b 100644
+--- a/fatfs/inode.c
++++ b/fatfs/inode.c
+@@ -44,8 +44,18 @@
+ #define INOHASH(ino) (((unsigned)(ino))%INOHSZ)
+ #endif
+
++/* The nodehash is a cache of nodes.
++
++ Access to nodehash and nodehash_nr_items is protected by
++ nodehash_lock.
++
++ Every node in the nodehash carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the nodehash. */
+ static struct node *nodehash[INOHSZ];
+ static size_t nodehash_nr_items;
++static pthread_mutex_t nodehash_lock = PTHREAD_MUTEX_INITIALIZER;
+
+ static error_t read_node (struct node *np, vm_address_t buf);
+
+@@ -67,12 +77,12 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_mutex_unlock (&nodehash_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+@@ -82,7 +92,7 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp)
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ return ENOMEM;
+ }
+ dn->pager = 0;
+@@ -107,10 +117,11 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
++ pthread_mutex_unlock (&nodehash_lock);
++
+ /* Get the contents of NP off disk. */
+ err = read_node (np, 0);
+
+@@ -133,12 +144,12 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf)
+ struct node *np;
+ struct disknode *dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ if (np->cache_id == inum)
+ {
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_mutex_unlock (&nodehash_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+@@ -148,7 +159,7 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf)
+ dn = malloc (sizeof (struct disknode));
+ if (! dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ return ENOMEM;
+ }
+ dn->pager = 0;
+@@ -173,10 +184,11 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &nodehash[INOHASH(inum)];
+ nodehash[INOHASH(inum)] = np;
++ diskfs_nref_light (np);
+ nodehash_nr_items += 1;
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
+-
++ pthread_mutex_unlock (&nodehash_lock);
++
+ /* Get the contents of NP off disk. */
+ err = read_node (np, buf);
+
+@@ -196,14 +208,13 @@ ifind (ino_t inum)
+ {
+ struct node *np;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext)
+ {
+ if (np->cache_id != inum)
+ continue;
+
+- assert (np->references);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+ return np;
+ }
+ assert (0);
+@@ -216,11 +227,6 @@ diskfs_node_norefs (struct node *np)
+ {
+ struct cluster_chain *last = np->dn->first;
+
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- nodehash_nr_items -= 1;
+-
+ while (last)
+ {
+ struct cluster_chain *next = last->next;
+@@ -251,6 +257,33 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_mutex_lock (&nodehash_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ nodehash. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&nodehash_lock);
++ return;
++ }
++
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ np->dn->hnext = NULL;
++ nodehash_nr_items -= 1;
++ diskfs_nrele_light (np);
++ }
++ pthread_mutex_unlock (&nodehash_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+@@ -554,12 +587,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&nodehash_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ nodehash_lock, but we can't hold this while locking the
+ individual node locks). */
+
+ num_nodes = nodehash_nr_items;
+@@ -570,10 +603,10 @@ diskfs_node_iterate (error_t (*fun)(struct node *))
+ for (node = nodehash[n]; node; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&nodehash_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0014-isofs-use-a-seperate-lock-to-protect-node_cache.patch b/debian/patches/0014-isofs-use-a-seperate-lock-to-protect-node_cache.patch
new file mode 100644
index 00000000..c6af3a4b
--- /dev/null
+++ b/debian/patches/0014-isofs-use-a-seperate-lock-to-protect-node_cache.patch
@@ -0,0 +1,222 @@
+From 57da36c3f7f68de55bd2c360b441879902dc14f8 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:16:31 +0200
+Subject: [PATCH 14/15] isofs: use a seperate lock to protect node_cache
+
+Previously, isofs used diskfs_node_refcnt_lock to serialize access to
+the node_cache.
+
+Use a separate lock to protect node_cache. Adjust the reference
+counting accordingly. Every node in the node_cache carries a light
+reference. When we are asked to give up that light reference, we
+reacquire our lock momentarily to check whether someone else
+reacquired a reference through the node_cache.
+
+* isofs/inode.c (node_cache_lock): New lock.
+(inode_cache_find): Use a separate lock to protect node_cache.
+Adjust the reference counting accordingly.
+(diskfs_cached_lookup): Likewise.
+(load_inode): Likewise.
+(cache_inode): Update comment accordingly.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from node_cache...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+---
+ isofs/inode.c | 72 +++++++++++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 53 insertions(+), 19 deletions(-)
+
+diff --git a/isofs/inode.c b/isofs/inode.c
+index cdc05ae..4f22086 100644
+--- a/isofs/inode.c
++++ b/isofs/inode.c
+@@ -48,9 +48,19 @@ struct node_cache
+ struct node *np; /* if live */
+ };
+
++/* The node_cache is a cache of nodes.
++
++ Access to node_cache, node_cache_size, and node_cache_alloced is
++ protected by node_cache_lock.
++
++ Every node in the node_cache carries a light reference. When we
++ are asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the node_cache. */
+ static int node_cache_size = 0;
+ static int node_cache_alloced = 0;
+ struct node_cache *node_cache = 0;
++static pthread_mutex_t node_cache_lock = PTHREAD_MUTEX_INITIALIZER;
+
+ /* Forward */
+ static error_t read_disknode (struct node *,
+@@ -58,7 +68,7 @@ static error_t read_disknode (struct node *,
+
+
+ /* See if node with identifier ID is in the cache. If so, return it,
+- with one additional reference. diskfs_node_refcnt_lock must be held
++ with one additional reference. node_cache_lock must be held
+ on entry to the call, and will be released iff the node was found
+ in the cache. */
+ void
+@@ -71,8 +81,8 @@ inode_cache_find (off_t id, struct node **npp)
+ && node_cache[i].np)
+ {
+ *npp = node_cache[i].np;
+- (*npp)->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (*npp);
++ pthread_mutex_unlock (&node_cache_lock);
+ pthread_mutex_lock (&(*npp)->lock);
+ return;
+ }
+@@ -92,7 +102,7 @@ use_file_start_id (struct dirrect *record, struct rrip_lookup *rr)
+ }
+
+ /* Enter NP into the cache. The directory entry we used is DR, the
+- cached Rock-Ridge info RR. diskfs_node_refcnt_lock must be held. */
++ cached Rock-Ridge info RR. node_cache_lock must be held. */
+ void
+ cache_inode (struct node *np, struct dirrect *record,
+ struct rrip_lookup *rr)
+@@ -155,7 +165,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ to avoid presenting zero cache ID's. */
+ id--;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&node_cache_lock);
+ assert (id < node_cache_size);
+
+ np = node_cache[id].np;
+@@ -174,7 +184,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ dn = malloc (sizeof (struct disknode));
+ if (!dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+ release_rrip (&rr);
+ return ENOMEM;
+ }
+@@ -185,16 +195,17 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ if (!np)
+ {
+ free (dn);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+ release_rrip (&rr);
+ return ENOMEM;
+ }
+ np->cache_id = id + 1; /* see above for rationale for increment */
+ pthread_mutex_lock (&np->lock);
+ c->np = np;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref_light (np);
++ pthread_mutex_unlock (&node_cache_lock);
+
+- err = read_disknode (np, node_cache[id].dr, &rr);
++ err = read_disknode (np, dn->dr, &rr);
+ if (!err)
+ *npp = np;
+
+@@ -204,8 +215,8 @@ diskfs_cached_lookup (ino_t id, struct node **npp)
+ }
+
+
+- np->references++;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref (np);
++ pthread_mutex_unlock (&node_cache_lock);
+ pthread_mutex_lock (&np->lock);
+ *npp = np;
+ return 0;
+@@ -315,7 +326,7 @@ load_inode (struct node **npp, struct dirrect *record,
+ if (rr->valid & VALID_CL)
+ record = rr->realdirent;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&node_cache_lock);
+
+ /* First check the cache */
+ if (use_file_start_id (record, rr))
+@@ -325,7 +336,7 @@ load_inode (struct node **npp, struct dirrect *record,
+
+ if (*npp)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+ return 0;
+ }
+
+@@ -333,7 +344,7 @@ load_inode (struct node **npp, struct dirrect *record,
+ dn = malloc (sizeof (struct disknode));
+ if (!dn)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+ return ENOMEM;
+ }
+ dn->fileinfo = 0;
+@@ -344,14 +355,14 @@ load_inode (struct node **npp, struct dirrect *record,
+ if (!np)
+ {
+ free (dn);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+ return ENOMEM;
+ }
+
+ pthread_mutex_lock (&np->lock);
+
+ cache_inode (np, record, rr);
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&node_cache_lock);
+
+ err = read_disknode (np, record, rr);
+ *npp = np;
+@@ -505,9 +516,6 @@ error_t (*diskfs_read_symlink_hook) (struct node *, char *)
+ void
+ diskfs_node_norefs (struct node *np)
+ {
+- assert (node_cache[np->cache_id - 1].np == np);
+- node_cache[np->cache_id - 1].np = 0;
+-
+ if (np->dn->translator)
+ free (np->dn->translator);
+
+@@ -521,6 +529,32 @@ diskfs_node_norefs (struct node *np)
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_mutex_lock (&node_cache_lock);
++ if (np->cache_id != 0)
++ {
++ assert (node_cache[np->cache_id - 1].np == np);
++
++ /* Check if someone reacquired a reference through the
++ node_cache. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&node_cache_lock);
++ return;
++ }
++
++ node_cache[np->cache_id - 1].np = 0;
++ np->cache_id = 0;
++ diskfs_nrele_light (np);
++ }
++ pthread_mutex_unlock (&node_cache_lock);
++
+ drop_pager_softrefs (np);
+ }
+
+--
+2.0.0.rc0
+
diff --git a/debian/patches/0015-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch b/debian/patches/0015-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
new file mode 100644
index 00000000..fafb113f
--- /dev/null
+++ b/debian/patches/0015-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
@@ -0,0 +1,263 @@
+From 7d53b7e08efefe1d6d24bf3a80b3391a9502c6aa Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Tue, 13 May 2014 15:35:42 +0200
+Subject: [PATCH 15/15] tmpfs: use a seperate lock to protect all_nodes
+
+Previously, tmpfs used diskfs_node_refcnt_lock to serialize access to
+the all_nodes and some other related global state related to memory
+consumption.
+
+Use a separate lock to protect all_nodes, and to the state related to
+memory consumption as this is updated during insertion and removal
+operations. Adjust the reference counting accordingly. Every node in
+the all_nodes carries a light reference. When we are asked to give up
+that light reference, we reacquire our lock momentarily to check
+whether someone else reacquired a reference through the all_nodes.
+
+* tmpfs/node.c (all_nodes_lock): New lock.
+(diskfs_alloc_node): Use a separate lock to protect all_nodes.
+Adjust the reference counting accordingly.
+(diskfs_free_node): Likewise.
+(diskfs_cached_lookup):Likewise.
+(diskfs_node_iterate): Likewise.
+(diskfs_node_norefs): Move the code removing the node from all_nodes...
+(diskfs_try_dropping_softrefs): ... here, where we check whether
+someone reacquired a reference, and if so hold on to our light
+reference.
+* tmpfs/tmpfs.c (diskfs_set_statfs): Use all_nodes_lock.
+* tmpfs/tmpfs.h (all_nodes_lock): New declaration.
+(adjust_used): Use all_nodes_lock.
+---
+ tmpfs/node.c | 76 +++++++++++++++++++++++++++++++++++++++++++----------------
+ tmpfs/tmpfs.c | 4 ++--
+ tmpfs/tmpfs.h | 6 +++--
+ 3 files changed, 62 insertions(+), 24 deletions(-)
+
+diff --git a/tmpfs/node.c b/tmpfs/node.c
+index acc029a..74971ec 100644
+--- a/tmpfs/node.c
++++ b/tmpfs/node.c
+@@ -29,8 +29,18 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+ unsigned int num_files;
+ static unsigned int gen;
+
++/* all_nodes is a cache of nodes.
++
++ Access to all_nodes and all_nodes_nr_items is protected by
++ all_nodes_lock.
++
++ Every node in the all_nodes carries a light reference. When we are
++ asked to give up that light reference, we reacquire our lock
++ momentarily to check whether someone else reacquired a reference
++ through the all_nodes. */
+ struct node *all_nodes;
+ static size_t all_nodes_nr_items;
++pthread_mutex_t all_nodes_lock = PTHREAD_MUTEX_INITIALIZER;
+
+ error_t
+ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
+@@ -40,18 +50,18 @@ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
+ dn = calloc (1, sizeof *dn);
+ if (dn == 0)
+ return ENOSPC;
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&all_nodes_lock);
+ if (round_page (tmpfs_space_used + sizeof *dn) / vm_page_size
+ > tmpfs_page_limit)
+ {
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&all_nodes_lock);
+ free (dn);
+ return ENOSPC;
+ }
+ dn->gen = gen++;
+ ++num_files;
+ tmpfs_space_used += sizeof *dn;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&all_nodes_lock);
+
+ dn->type = IFTODT (mode & S_IFMT);
+ return diskfs_cached_lookup ((ino_t) (uintptr_t) dn, npp);
+@@ -75,15 +85,18 @@ diskfs_free_node (struct node *np, mode_t mode)
+ free (np->dn->u.lnk);
+ break;
+ }
++
++ pthread_mutex_lock (&all_nodes_lock);
+ *np->dn->hprevp = np->dn->hnext;
+ if (np->dn->hnext != 0)
+ np->dn->hnext->dn->hprevp = np->dn->hprevp;
+ all_nodes_nr_items -= 1;
+- free (np->dn);
+- np->dn = 0;
+-
+ --num_files;
+ tmpfs_space_used -= sizeof *np->dn;
++ pthread_mutex_unlock (&all_nodes_lock);
++
++ free (np->dn);
++ np->dn = 0;
+ }
+
+ void
+@@ -117,14 +130,6 @@ diskfs_node_norefs (struct node *np)
+ np->dn->u.chr = np->dn_stat.st_rdev;
+ break;
+ }
+-
+- /* Remove this node from the cache list rooted at `all_nodes'. */
+- *np->dn->hprevp = np->dn->hnext;
+- if (np->dn->hnext != 0)
+- np->dn->hnext->dn->hprevp = np->dn->hprevp;
+- all_nodes_nr_items -= 1;
+- np->dn->hnext = 0;
+- np->dn->hprevp = 0;
+ }
+
+ free (np);
+@@ -167,11 +172,14 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+
+ assert (npp);
+
++ pthread_mutex_lock (&all_nodes_lock);
++
+ if (dn->hprevp != 0) /* There is already a node. */
+ {
+ np = *dn->hprevp;
+ assert (np->dn == dn);
+ assert (*dn->hprevp == np);
++ pthread_mutex_unlock (&all_nodes_lock);
+
+ diskfs_nref (np);
+ }
+@@ -183,14 +191,14 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
+ np = diskfs_make_node (dn);
+ np->cache_id = (ino_t) (uintptr_t) dn;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
+ dn->hnext = all_nodes;
+ if (dn->hnext)
+ dn->hnext->dn->hprevp = &dn->hnext;
+ dn->hprevp = &all_nodes;
+ all_nodes = np;
+ all_nodes_nr_items += 1;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ diskfs_nref_light (np);
++ pthread_mutex_unlock (&all_nodes_lock);
+
+ st = &np->dn_stat;
+ memset (st, 0, sizeof *st);
+@@ -229,12 +237,12 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ size_t num_nodes;
+ struct node *node, **node_list, **p;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&all_nodes_lock);
+
+ /* We must copy everything from the hash table into another data structure
+ to avoid running into any problems with the hash-table being modified
+ during processing (normally we delegate access to hash-table with
+- diskfs_node_refcnt_lock, but we can't hold this while locking the
++ all_nodes_lock, but we can't hold this while locking the
+ individual node locks). */
+
+ num_nodes = all_nodes_nr_items;
+@@ -243,10 +251,10 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ for (node = all_nodes; node != 0; node = node->dn->hnext)
+ {
+ *p++ = node;
+- node->references++;
++ diskfs_nref (node);
+ }
+
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&all_nodes_lock);
+
+ p = node_list;
+ while (num_nodes-- > 0)
+@@ -272,6 +280,34 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
+ void
+ diskfs_try_dropping_softrefs (struct node *np)
+ {
++ pthread_mutex_lock (&all_nodes_lock);
++ if (np->dn->hnext != NULL)
++ {
++ /* Check if someone reacquired a reference through the
++ all_nodes. */
++ unsigned int references;
++ pthread_spin_lock (&diskfs_node_refcnt_lock);
++ references = np->references;
++ pthread_spin_unlock (&diskfs_node_refcnt_lock);
++
++ if (references > 0)
++ {
++ /* A reference was reacquired through a hash table lookup.
++ It's fine, we didn't touch anything yet. */
++ pthread_mutex_unlock (&all_nodes_lock);
++ return;
++ }
++
++ /* Remove this node from the cache list rooted at `all_nodes'. */
++ *np->dn->hprevp = np->dn->hnext;
++ if (np->dn->hnext != 0)
++ np->dn->hnext->dn->hprevp = np->dn->hprevp;
++ all_nodes_nr_items -= 1;
++ np->dn->hnext = NULL;
++ np->dn->hprevp = NULL;
++ diskfs_nrele_light (np);
++ }
++ pthread_mutex_unlock (&all_nodes_lock);
+ }
+
+ /* The user must define this funcction. Node NP has some light
+diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c
+index a45d343..024b818 100644
+--- a/tmpfs/tmpfs.c
++++ b/tmpfs/tmpfs.c
+@@ -67,10 +67,10 @@ diskfs_set_statfs (struct statfs *st)
+ st->f_bsize = vm_page_size;
+ st->f_blocks = tmpfs_page_limit;
+
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&all_nodes_lock);
+ st->f_files = num_files;
+ pages = round_page (tmpfs_space_used) / vm_page_size;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&all_nodes_lock);
+
+ st->f_bfree = pages < tmpfs_page_limit ? tmpfs_page_limit - pages : 0;
+ st->f_bavail = st->f_bfree;
+diff --git a/tmpfs/tmpfs.h b/tmpfs/tmpfs.h
+index b3c636d..187249e 100644
+--- a/tmpfs/tmpfs.h
++++ b/tmpfs/tmpfs.h
+@@ -24,6 +24,7 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+ #include <sys/types.h>
+ #include <dirent.h>
+ #include <stdint.h>
++#include <pthread.h>
+
+ struct disknode
+ {
+@@ -71,15 +72,16 @@ struct tmpfs_dirent
+
+ extern unsigned int num_files;
+ extern off_t tmpfs_page_limit, tmpfs_space_used;
++extern pthread_mutex_t all_nodes_lock;
+
+ extern mach_port_t default_pager;
+
+ static inline void
+ adjust_used (off_t change)
+ {
+- pthread_spin_lock (&diskfs_node_refcnt_lock);
++ pthread_mutex_lock (&all_nodes_lock);
+ tmpfs_space_used += change;
+- pthread_spin_unlock (&diskfs_node_refcnt_lock);
++ pthread_mutex_unlock (&all_nodes_lock);
+ }
+
+ #endif
+--
+2.0.0.rc0
+
diff --git a/debian/patches/series b/debian/patches/series
index c08b0f99..5bce22f8 100644
--- a/debian/patches/series
+++ b/debian/patches/series
@@ -40,3 +40,18 @@ xkb-compat.patch
mach-defpager-protected-payload.patch
+0001-libihash-fix-type-of-max_load.patch
+0002-libihash-use-an-integer-hash-function-on-the-keys.patch
+0003-libihash-use-linear-probing-and-fast-modulo-operatio.patch
+0004-libihash-use-fast-binary-scaling-to-determine-the-lo.patch
+0005-include-add-lock-less-reference-counting-primitives.patch
+0006-libdiskfs-lock-less-reference-counting-for-peropen-o.patch
+0007-libtrivfs-lock-less-reference-counting-for-trivfs_pe.patch
+0008-libports-use-a-single-hash-table.patch
+0009-libports-lock-less-reference-counting-for-port_info-.patch
+0010-ext2fs-improve-enable-disable-_caching.patch
+0011-fatfs-improve-enable-disable-_caching.patch
+0012-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch
+0013-fatfs-use-a-seperate-lock-to-protect-nodehash.patch
+0014-isofs-use-a-seperate-lock-to-protect-node_cache.patch
+0015-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch