diff options
Diffstat (limited to 'debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch')
-rw-r--r-- | debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch | 287 |
1 files changed, 0 insertions, 287 deletions
diff --git a/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch b/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch deleted file mode 100644 index 1f57a86b..00000000 --- a/debian/patches/0010-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch +++ /dev/null @@ -1,287 +0,0 @@ -From 0ec07139195dcc490b374a59684f8ee62cfe7164 Mon Sep 17 00:00:00 2001 -From: Justus Winter <4winter@informatik.uni-hamburg.de> -Date: Tue, 13 May 2014 15:35:42 +0200 -Subject: [PATCH 10/11] tmpfs: use a seperate lock to protect all_nodes - -Previously, tmpfs used diskfs_node_refcnt_lock to serialize access to -the all_nodes and some other related global state related to memory -consumption. - -Use a separate lock to protect all_nodes, and to the state related to -memory consumption as this is updated during insertion and removal -operations. Adjust the reference counting accordingly. Every node in -the all_nodes carries a light reference. When we are asked to give up -that light reference, we reacquire our lock momentarily to check -whether someone else reacquired a reference through the all_nodes. - -* tmpfs/node.c (all_nodes_lock): New lock. -(diskfs_alloc_node): Use a separate lock to protect all_nodes. -Adjust the reference counting accordingly. -(diskfs_free_node): Likewise. -(diskfs_cached_lookup):Likewise. -(diskfs_node_iterate): Likewise. -(diskfs_node_norefs): Move the code removing the node from all_nodes... -(diskfs_try_dropping_softrefs): ... here, where we check whether -someone reacquired a reference, and if so hold on to our light -reference. -* tmpfs/tmpfs.c (diskfs_set_statfs): Use all_nodes_lock. -* tmpfs/tmpfs.h (all_nodes_lock): New declaration. -(adjust_used): Use all_nodes_lock. ---- - tmpfs/node.c | 87 ++++++++++++++++++++++++++++++++++++++++++----------------- - tmpfs/tmpfs.c | 6 ++--- - tmpfs/tmpfs.h | 11 +++++--- - 3 files changed, 72 insertions(+), 32 deletions(-) - -diff --git a/tmpfs/node.c b/tmpfs/node.c -index acc029a..3c8e66a 100644 ---- a/tmpfs/node.c -+++ b/tmpfs/node.c -@@ -29,8 +29,18 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ - unsigned int num_files; - static unsigned int gen; - -+/* all_nodes is a cache of nodes. -+ -+ Access to all_nodes and all_nodes_nr_items is protected by -+ all_nodes_lock. -+ -+ Every node in the all_nodes carries a light reference. When we are -+ asked to give up that light reference, we reacquire our lock -+ momentarily to check whether someone else reacquired a reference -+ through the all_nodes. */ - struct node *all_nodes; - static size_t all_nodes_nr_items; -+pthread_rwlock_t all_nodes_lock = PTHREAD_RWLOCK_INITIALIZER; - - error_t - diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp) -@@ -40,18 +50,17 @@ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp) - dn = calloc (1, sizeof *dn); - if (dn == 0) - return ENOSPC; -- pthread_spin_lock (&diskfs_node_refcnt_lock); -- if (round_page (tmpfs_space_used + sizeof *dn) / vm_page_size -+ -+ if (round_page (get_used () + sizeof *dn) / vm_page_size - > tmpfs_page_limit) - { -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ pthread_rwlock_unlock (&all_nodes_lock); - free (dn); - return ENOSPC; - } - dn->gen = gen++; -- ++num_files; -- tmpfs_space_used += sizeof *dn; -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ __atomic_add_fetch (&num_files, 1, __ATOMIC_RELAXED); -+ adjust_used (sizeof *dn); - - dn->type = IFTODT (mode & S_IFMT); - return diskfs_cached_lookup ((ino_t) (uintptr_t) dn, npp); -@@ -75,15 +84,18 @@ diskfs_free_node (struct node *np, mode_t mode) - free (np->dn->u.lnk); - break; - } -+ -+ pthread_rwlock_wrlock (&all_nodes_lock); - *np->dn->hprevp = np->dn->hnext; - if (np->dn->hnext != 0) - np->dn->hnext->dn->hprevp = np->dn->hprevp; - all_nodes_nr_items -= 1; -+ __atomic_sub_fetch (&num_files, 1, __ATOMIC_RELAXED); -+ adjust_used (-sizeof *np->dn); -+ pthread_rwlock_unlock (&all_nodes_lock); -+ - free (np->dn); - np->dn = 0; -- -- --num_files; -- tmpfs_space_used -= sizeof *np->dn; - } - - void -@@ -117,14 +129,6 @@ diskfs_node_norefs (struct node *np) - np->dn->u.chr = np->dn_stat.st_rdev; - break; - } -- -- /* Remove this node from the cache list rooted at `all_nodes'. */ -- *np->dn->hprevp = np->dn->hnext; -- if (np->dn->hnext != 0) -- np->dn->hnext->dn->hprevp = np->dn->hprevp; -- all_nodes_nr_items -= 1; -- np->dn->hnext = 0; -- np->dn->hprevp = 0; - } - - free (np); -@@ -167,11 +171,14 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) - - assert (npp); - -+ pthread_rwlock_rdlock (&all_nodes_lock); -+ - if (dn->hprevp != 0) /* There is already a node. */ - { - np = *dn->hprevp; - assert (np->dn == dn); - assert (*dn->hprevp == np); -+ pthread_rwlock_unlock (&all_nodes_lock); - - diskfs_nref (np); - } -@@ -179,18 +186,20 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) - /* Create the new node. */ - { - struct stat *st; -+ pthread_rwlock_unlock (&all_nodes_lock); - - np = diskfs_make_node (dn); - np->cache_id = (ino_t) (uintptr_t) dn; - -- pthread_spin_lock (&diskfs_node_refcnt_lock); -+ pthread_rwlock_wrlock (&all_nodes_lock); - dn->hnext = all_nodes; - if (dn->hnext) - dn->hnext->dn->hprevp = &dn->hnext; - dn->hprevp = &all_nodes; - all_nodes = np; - all_nodes_nr_items += 1; -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ diskfs_nref_light (np); -+ pthread_rwlock_unlock (&all_nodes_lock); - - st = &np->dn_stat; - memset (st, 0, sizeof *st); -@@ -229,12 +238,12 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) - size_t num_nodes; - struct node *node, **node_list, **p; - -- pthread_spin_lock (&diskfs_node_refcnt_lock); -+ pthread_rwlock_rdlock (&all_nodes_lock); - - /* We must copy everything from the hash table into another data structure - to avoid running into any problems with the hash-table being modified - during processing (normally we delegate access to hash-table with -- diskfs_node_refcnt_lock, but we can't hold this while locking the -+ all_nodes_lock, but we can't hold this while locking the - individual node locks). */ - - num_nodes = all_nodes_nr_items; -@@ -243,10 +252,10 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) - for (node = all_nodes; node != 0; node = node->dn->hnext) - { - *p++ = node; -- node->references++; -+ diskfs_nref_light (node); - } - -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ pthread_rwlock_unlock (&all_nodes_lock); - - p = node_list; - while (num_nodes-- > 0) -@@ -258,7 +267,7 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) - err = (*fun) (node); - pthread_mutex_unlock (&node->lock); - } -- diskfs_nrele (node); -+ diskfs_nrele_light (node); - } - - return err; -@@ -272,6 +281,34 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) - void - diskfs_try_dropping_softrefs (struct node *np) - { -+ pthread_rwlock_wrlock (&all_nodes_lock); -+ if (np->dn->hnext != NULL) -+ { -+ /* Check if someone reacquired a reference through the -+ all_nodes. */ -+ unsigned int references; -+ pthread_spin_lock (&diskfs_node_refcnt_lock); -+ references = np->references; -+ pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ -+ if (references > 0) -+ { -+ /* A reference was reacquired through a hash table lookup. -+ It's fine, we didn't touch anything yet. */ -+ pthread_rwlock_unlock (&all_nodes_lock); -+ return; -+ } -+ -+ /* Remove this node from the cache list rooted at `all_nodes'. */ -+ *np->dn->hprevp = np->dn->hnext; -+ if (np->dn->hnext != 0) -+ np->dn->hnext->dn->hprevp = np->dn->hprevp; -+ all_nodes_nr_items -= 1; -+ np->dn->hnext = NULL; -+ np->dn->hprevp = NULL; -+ diskfs_nrele_light (np); -+ } -+ pthread_rwlock_unlock (&all_nodes_lock); - } - - /* The user must define this funcction. Node NP has some light -@@ -447,7 +484,7 @@ diskfs_grow (struct node *np, off_t size, struct protid *cred) - - off_t set_size = size; - size = round_page (size); -- if (round_page (tmpfs_space_used + size - np->allocsize) -+ if (round_page (get_used () + size - np->allocsize) - / vm_page_size > tmpfs_page_limit) - return ENOSPC; - -diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c -index a45d343..5337e58 100644 ---- a/tmpfs/tmpfs.c -+++ b/tmpfs/tmpfs.c -@@ -67,10 +67,8 @@ diskfs_set_statfs (struct statfs *st) - st->f_bsize = vm_page_size; - st->f_blocks = tmpfs_page_limit; - -- pthread_spin_lock (&diskfs_node_refcnt_lock); -- st->f_files = num_files; -- pages = round_page (tmpfs_space_used) / vm_page_size; -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ st->f_files = __atomic_load_n (&num_files, __ATOMIC_RELAXED); -+ pages = round_page (get_used ()) / vm_page_size; - - st->f_bfree = pages < tmpfs_page_limit ? tmpfs_page_limit - pages : 0; - st->f_bavail = st->f_bfree; -diff --git a/tmpfs/tmpfs.h b/tmpfs/tmpfs.h -index b3c636d..d1fbda9 100644 ---- a/tmpfs/tmpfs.h -+++ b/tmpfs/tmpfs.h -@@ -24,6 +24,7 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ - #include <sys/types.h> - #include <dirent.h> - #include <stdint.h> -+#include <pthread.h> - - struct disknode - { -@@ -77,9 +78,13 @@ extern mach_port_t default_pager; - static inline void - adjust_used (off_t change) - { -- pthread_spin_lock (&diskfs_node_refcnt_lock); -- tmpfs_space_used += change; -- pthread_spin_unlock (&diskfs_node_refcnt_lock); -+ __atomic_add_fetch (&num_files, change, __ATOMIC_RELAXED); -+} -+ -+static inline off_t -+get_used (void) -+{ -+ return __atomic_load_n (&num_files, __ATOMIC_RELAXED); - } - - #endif --- -2.0.0.rc2 - |