summaryrefslogtreecommitdiff
path: root/debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch')
-rw-r--r--debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch65
1 files changed, 65 insertions, 0 deletions
diff --git a/debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch b/debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch
new file mode 100644
index 00000000..d4c78df9
--- /dev/null
+++ b/debian/patches/nodeihash0003-libihash-prefer-performance-degradation-over-failure.patch
@@ -0,0 +1,65 @@
+From 920d550d22576e0bd5d09597cf8ce2c91a893713 Mon Sep 17 00:00:00 2001
+From: Justus Winter <4winter@informatik.uni-hamburg.de>
+Date: Sun, 7 Jun 2015 00:58:36 +0200
+Subject: [PATCH hurd 3/3] libihash: prefer performance degradation over
+ failure
+
+* libihash/ihash.c (hurd_ihash_add): Add the item even though we are
+above the load factor if resizing failed.
+---
+ libihash/ihash.c | 18 +++++++++++++++---
+ 1 file changed, 15 insertions(+), 3 deletions(-)
+
+diff --git a/libihash/ihash.c b/libihash/ihash.c
+index 76c695a..289fce5 100644
+--- a/libihash/ihash.c
++++ b/libihash/ihash.c
+@@ -303,18 +303,19 @@ hurd_ihash_add (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t item)
+ {
+ struct hurd_ihash old_ht = *ht;
+ int was_added;
++ int fatal = 0; /* bail out on allocation errors */
+ unsigned int i;
+
+ if (ht->size)
+ {
+ /* Only fill the hash table up to its maximum load factor. */
+ if (hurd_ihash_get_load (ht) <= ht->max_load)
++ add_one:
+ if (add_one (ht, key, item))
+ return 0;
+ }
+
+ /* The hash table is too small, and we have to increase it. */
+- ht->nr_items = 0;
+ if (ht->size == 0)
+ ht->size = HURD_IHASH_MIN_SIZE;
+ else
+@@ -325,11 +326,22 @@ hurd_ihash_add (hurd_ihash_t ht, hurd_ihash_key_t key, hurd_ihash_value_t item)
+
+ if (ht->items == NULL)
+ {
+- *ht = old_ht;
+- return ENOMEM;
++ if (fatal || old_ht.size == 0)
++ {
++ *ht = old_ht;
++ return ENOMEM;
++ }
++
++ /* We prefer performance degradation over failure. Therefore,
++ we add the item even though we are above the load factor. If
++ the table is full, this will fail. We set the fatal flag to
++ avoid looping. */
++ fatal = 1;
++ goto add_one;
+ }
+
+ /* We have to rehash the old entries. */
++ ht->nr_items = 0;
+ for (i = 0; i < old_ht.size; i++)
+ if (!index_empty (&old_ht, i))
+ {
+--
+2.1.4
+