diff options
Diffstat (limited to 'debian/patches')
5 files changed, 1190 insertions, 0 deletions
diff --git a/debian/patches/0001-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0001-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch new file mode 100644 index 00000000..9d023403 --- /dev/null +++ b/debian/patches/0001-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch @@ -0,0 +1,259 @@ +From 9e33342213f1af944aab1b94a020f514eb7d8883 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Tue, 13 May 2014 13:09:15 +0200 +Subject: [PATCH 1/4] ext2fs: use a seperate lock to protect nodehash + +Previously, ext2fs used diskfs_node_refcnt_lock to serialize access to +the nodehash. + +Use a separate lock to protect nodehash. Adjust the reference +counting accordingly. Every node in the nodehash carries a light +reference. When we are asked to give up that light reference, we +reacquire our lock momentarily to check whether someone else +reacquired a reference through the nodehash. + +* ext2fs/inode.c (nodecache_lock): New lock. +(diskfs_cached_lookup): Use a separate lock to protect nodehash. +Adjust the reference counting accordingly. +(ifind): Likewise. +(diskfs_node_iterate): Likewise. +(diskfs_node_norefs): Move the code removing the node from nodehash... +(diskfs_try_dropping_softrefs): ... here, where we check whether +someone reacquired a reference, and if so hold on to our light +reference. +--- + ext2fs/inode.c | 127 +++++++++++++++++++++++++++++++++++++++++---------------- + 1 file changed, 91 insertions(+), 36 deletions(-) + +diff --git a/ext2fs/inode.c b/ext2fs/inode.c +index ed78265..67c502a 100644 +--- a/ext2fs/inode.c ++++ b/ext2fs/inode.c +@@ -46,8 +46,19 @@ + #define INOHASH(ino) (((unsigned)(ino))%INOHSZ) + #endif + ++/* The nodehash is a cache of nodes. ++ ++ Access to nodehash and nodehash_nr_items is protected by ++ nodecache_lock. ++ ++ Every node in the nodehash carries a light reference. When we are ++ asked to give up that light reference, we reacquire our lock ++ momentarily to check whether someone else reacquired a reference ++ through the nodehash. */ + static struct node *nodehash[INOHSZ]; + static size_t nodehash_nr_items; ++/* nodecache_lock must be acquired before diskfs_node_refcnt_lock. */ ++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER; + + static error_t read_node (struct node *np); + +@@ -62,33 +73,37 @@ inode_init () + nodehash[n] = 0; + } + ++/* Lookup node with inode number INUM. Returns NULL if the node is ++ not found in the node cache. */ ++static struct node * ++lookup (ino_t inum) ++{ ++ struct node *np; ++ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) ++ if (np->cache_id == inum) ++ return np; ++ return NULL; ++} ++ + /* Fetch inode INUM, set *NPP to the node structure; + gain one user reference and lock the node. */ + error_t + diskfs_cached_lookup (ino_t inum, struct node **npp) + { + error_t err; +- struct node *np; ++ struct node *np, *tmp; + struct disknode *dn; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) +- if (np->cache_id == inum) +- { +- np->references++; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- pthread_mutex_lock (&np->lock); +- *npp = np; +- return 0; +- } ++ pthread_rwlock_rdlock (&nodecache_lock); ++ np = lookup (inum); ++ if (np) ++ goto gotit; ++ pthread_rwlock_unlock (&nodecache_lock); + + /* Format specific data for the new node. */ + dn = malloc (sizeof (struct disknode)); + if (! dn) +- { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return ENOMEM; +- } ++ return ENOMEM; + dn->dirents = 0; + dn->dir_idx = 0; + dn->pager = 0; +@@ -102,14 +117,24 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) + pthread_mutex_lock (&np->lock); + + /* Put NP in NODEHASH. */ ++ pthread_rwlock_wrlock (&nodecache_lock); ++ tmp = lookup (inum); ++ if (tmp) ++ { ++ /* We lost a race. */ ++ diskfs_nput (np); ++ np = tmp; ++ goto gotit; ++ } ++ + dn->hnext = nodehash[INOHASH(inum)]; + if (dn->hnext) + dn->hnext->dn->hprevp = &dn->hnext; + dn->hprevp = &nodehash[INOHASH(inum)]; + nodehash[INOHASH(inum)] = np; ++ diskfs_nref_light (np); + nodehash_nr_items += 1; +- +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + + /* Get the contents of NP off disk. */ + err = read_node (np); +@@ -131,6 +156,13 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) + *npp = np; + return 0; + } ++ ++ gotit: ++ diskfs_nref (np); ++ pthread_rwlock_unlock (&nodecache_lock); ++ pthread_mutex_lock (&np->lock); ++ *npp = np; ++ return 0; + } + + /* Lookup node INUM (which must have a reference already) and return it +@@ -140,17 +172,12 @@ ifind (ino_t inum) + { + struct node *np; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) +- { +- if (np->cache_id != inum) +- continue; ++ pthread_rwlock_rdlock (&nodecache_lock); ++ np = lookup (inum); ++ pthread_rwlock_unlock (&nodecache_lock); + +- assert (np->references); +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return np; +- } +- assert (0); ++ assert (np); ++ return np; + } + + /* The last reference to a node has gone away; drop +@@ -158,11 +185,6 @@ ifind (ino_t inum) + void + diskfs_node_norefs (struct node *np) + { +- *np->dn->hprevp = np->dn->hnext; +- if (np->dn->hnext) +- np->dn->hnext->dn->hprevp = np->dn->hprevp; +- nodehash_nr_items -= 1; +- + if (np->dn->dirents) + free (np->dn->dirents); + assert (!np->dn->pager); +@@ -180,6 +202,35 @@ diskfs_node_norefs (struct node *np) + void + diskfs_try_dropping_softrefs (struct node *np) + { ++ pthread_rwlock_wrlock (&nodecache_lock); ++ if (np->dn->hnext != NULL) ++ { ++ /* Check if someone reacquired a reference through the ++ nodehash. */ ++ unsigned int references; ++ pthread_spin_lock (&diskfs_node_refcnt_lock); ++ references = np->references; ++ pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ ++ /* An additional reference is acquired by libdiskfs across calls ++ to diskfs_try_dropping_softrefs. */ ++ if (references > 1) ++ { ++ /* A reference was reacquired through a hash table lookup. ++ It's fine, we didn't touch anything yet. */ ++ pthread_rwlock_unlock (&nodecache_lock); ++ return; ++ } ++ ++ *np->dn->hprevp = np->dn->hnext; ++ if (np->dn->hnext) ++ np->dn->hnext->dn->hprevp = np->dn->hprevp; ++ np->dn->hnext = NULL; ++ nodehash_nr_items -= 1; ++ diskfs_nrele_light (np); ++ } ++ pthread_rwlock_unlock (&nodecache_lock); ++ + drop_pager_softrefs (np); + } + +@@ -556,12 +607,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) + size_t num_nodes; + struct node *node, **node_list, **p; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_rdlock (&nodecache_lock); + + /* We must copy everything from the hash table into another data structure + to avoid running into any problems with the hash-table being modified + during processing (normally we delegate access to hash-table with +- diskfs_node_refcnt_lock, but we can't hold this while locking the ++ nodecache_lock, but we can't hold this while locking the + individual node locks). */ + num_nodes = nodehash_nr_items; + +@@ -570,7 +621,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) + node_list = malloc (num_nodes * sizeof (struct node *)); + if (node_list == NULL) + { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + ext2_debug ("unable to allocate temporary node table"); + return ENOMEM; + } +@@ -580,10 +631,14 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) + for (node = nodehash[n]; node; node = node->dn->hnext) + { + *p++ = node; ++ ++ /* We acquire a hard reference for node, but without using ++ diskfs_nref. We do this so that diskfs_new_hardrefs will not ++ get called. */ + node->references++; + } + +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + + p = node_list; + while (num_nodes-- > 0) +-- +2.0.0.rc2 + diff --git a/debian/patches/0002-fatfs-use-a-seperate-lock-to-protect-nodehash.patch b/debian/patches/0002-fatfs-use-a-seperate-lock-to-protect-nodehash.patch new file mode 100644 index 00000000..6c3f2b49 --- /dev/null +++ b/debian/patches/0002-fatfs-use-a-seperate-lock-to-protect-nodehash.patch @@ -0,0 +1,303 @@ +From ecca4f4c8e32ff1774892718446566c2c5c36979 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Tue, 13 May 2014 15:14:53 +0200 +Subject: [PATCH 2/4] fatfs: use a seperate lock to protect nodehash + +Previously, fatfs used diskfs_node_refcnt_lock to serialize access to +the nodehash. + +Use a separate lock to protect nodehash. Adjust the reference +counting accordingly. Every node in the nodehash carries a light +reference. When we are asked to give up that light reference, we +reacquire our lock momentarily to check whether someone else +reacquired a reference through the nodehash. + +* fatfs/inode.c (nodecache_lock): New lock. +(diskfs_cached_lookup): Use a separate lock to protect nodehash. +Adjust the reference counting accordingly. +(ifind): Likewise. +(diskfs_node_iterate): Likewise. +(diskfs_node_norefs): Move the code removing the node from nodehash... +(diskfs_try_dropping_softrefs): ... here, where we check whether +someone reacquired a reference, and if so hold on to our light +reference. +--- + fatfs/inode.c | 146 ++++++++++++++++++++++++++++++++++++++++------------------ + 1 file changed, 101 insertions(+), 45 deletions(-) + +diff --git a/fatfs/inode.c b/fatfs/inode.c +index ed6f3f0..c3997d0 100644 +--- a/fatfs/inode.c ++++ b/fatfs/inode.c +@@ -44,8 +44,19 @@ + #define INOHASH(ino) (((unsigned)(ino))%INOHSZ) + #endif + ++/* The nodehash is a cache of nodes. ++ ++ Access to nodehash and nodehash_nr_items is protected by ++ nodecache_lock. ++ ++ Every node in the nodehash carries a light reference. When we are ++ asked to give up that light reference, we reacquire our lock ++ momentarily to check whether someone else reacquired a reference ++ through the nodehash. */ + static struct node *nodehash[INOHSZ]; + static size_t nodehash_nr_items; ++/* nodecache_lock must be acquired before diskfs_node_refcnt_lock. */ ++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER; + + static error_t read_node (struct node *np, vm_address_t buf); + +@@ -58,33 +69,38 @@ inode_init () + nodehash[n] = 0; + } + ++/* Lookup node with inode number INUM. Returns NULL if the node is ++ not found in the node cache. */ ++static struct node * ++lookup (ino_t inum) ++{ ++ struct node *np; ++ for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) ++ if (np->cache_id == inum) ++ return np; ++ return NULL; ++} ++ + /* Fetch inode INUM, set *NPP to the node structure; gain one user + reference and lock the node. */ + error_t + diskfs_cached_lookup (ino64_t inum, struct node **npp) + { + error_t err; +- struct node *np; ++ struct node *np, *tmp; + struct disknode *dn; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) +- if (np->cache_id == inum) +- { +- np->references++; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- pthread_mutex_lock (&np->lock); +- *npp = np; +- return 0; +- } ++ pthread_rwlock_rdlock (&nodecache_lock); ++ np = lookup (inum); ++ if (np) ++ goto gotit; ++ pthread_rwlock_unlock (&nodecache_lock); + + /* Format specific data for the new node. */ + dn = malloc (sizeof (struct disknode)); + if (! dn) +- { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return ENOMEM; +- } ++ return ENOMEM; ++ + dn->pager = 0; + dn->first = 0; + dn->last = 0; +@@ -102,15 +118,25 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp) + pthread_mutex_lock (&np->lock); + + /* Put NP in NODEHASH. */ ++ pthread_rwlock_wrlock (&nodecache_lock); ++ tmp = lookup (inum); ++ if (tmp) ++ { ++ /* We lost a race. */ ++ diskfs_nput (np); ++ np = tmp; ++ goto gotit; ++ } ++ + dn->hnext = nodehash[INOHASH(inum)]; + if (dn->hnext) + dn->hnext->dn->hprevp = &dn->hnext; + dn->hprevp = &nodehash[INOHASH(inum)]; + nodehash[INOHASH(inum)] = np; ++ diskfs_nref_light (np); + nodehash_nr_items += 1; ++ pthread_rwlock_unlock (&nodecache_lock); + +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- + /* Get the contents of NP off disk. */ + err = read_node (np, 0); + +@@ -121,6 +147,13 @@ diskfs_cached_lookup (ino64_t inum, struct node **npp) + *npp = np; + return 0; + } ++ ++ gotit: ++ diskfs_nref (np); ++ pthread_rwlock_unlock (&nodecache_lock); ++ pthread_mutex_lock (&np->lock); ++ *npp = np; ++ return 0; + } + + /* Fetch inode INUM, set *NPP to the node structure; +@@ -133,24 +166,23 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf) + struct node *np; + struct disknode *dn; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_rdlock (&nodecache_lock); + for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) + if (np->cache_id == inum) + { +- np->references++; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ diskfs_nref (np); ++ pthread_rwlock_unlock (&nodecache_lock); + pthread_mutex_lock (&np->lock); + *npp = np; + return 0; + } ++ pthread_rwlock_unlock (&nodecache_lock); + + /* Format specific data for the new node. */ + dn = malloc (sizeof (struct disknode)); + if (! dn) +- { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return ENOMEM; +- } ++ return ENOMEM; ++ + dn->pager = 0; + dn->first = 0; + dn->last = 0; +@@ -168,15 +200,16 @@ diskfs_cached_lookup_in_dirbuf (int inum, struct node **npp, vm_address_t buf) + pthread_mutex_lock (&np->lock); + + /* Put NP in NODEHASH. */ ++ pthread_rwlock_wrlock (&nodecache_lock); + dn->hnext = nodehash[INOHASH(inum)]; + if (dn->hnext) + dn->hnext->dn->hprevp = &dn->hnext; + dn->hprevp = &nodehash[INOHASH(inum)]; + nodehash[INOHASH(inum)] = np; ++ diskfs_nref_light (np); + nodehash_nr_items += 1; ++ pthread_rwlock_unlock (&nodecache_lock); + +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- + /* Get the contents of NP off disk. */ + err = read_node (np, buf); + +@@ -196,17 +229,12 @@ ifind (ino_t inum) + { + struct node *np; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) +- { +- if (np->cache_id != inum) +- continue; ++ pthread_rwlock_rdlock (&nodecache_lock); ++ np = lookup (inum); ++ pthread_rwlock_unlock (&nodecache_lock); + +- assert (np->references); +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return np; +- } +- assert (0); ++ assert (np); ++ return np; + } + + /* The last reference to a node has gone away; drop it from the hash +@@ -216,11 +244,6 @@ diskfs_node_norefs (struct node *np) + { + struct cluster_chain *last = np->dn->first; + +- *np->dn->hprevp = np->dn->hnext; +- if (np->dn->hnext) +- np->dn->hnext->dn->hprevp = np->dn->hprevp; +- nodehash_nr_items -= 1; +- + while (last) + { + struct cluster_chain *next = last->next; +@@ -251,6 +274,35 @@ diskfs_node_norefs (struct node *np) + void + diskfs_try_dropping_softrefs (struct node *np) + { ++ pthread_rwlock_wrlock (&nodecache_lock); ++ if (np->dn->hnext != NULL) ++ { ++ /* Check if someone reacquired a reference through the ++ nodehash. */ ++ unsigned int references; ++ pthread_spin_lock (&diskfs_node_refcnt_lock); ++ references = np->references; ++ pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ ++ /* An additional reference is acquired by libdiskfs across calls ++ to diskfs_try_dropping_softrefs. */ ++ if (references > 1) ++ { ++ /* A reference was reacquired through a hash table lookup. ++ It's fine, we didn't touch anything yet. */ ++ pthread_rwlock_unlock (&nodecache_lock); ++ return; ++ } ++ ++ *np->dn->hprevp = np->dn->hnext; ++ if (np->dn->hnext) ++ np->dn->hnext->dn->hprevp = np->dn->hprevp; ++ np->dn->hnext = NULL; ++ nodehash_nr_items -= 1; ++ diskfs_nrele_light (np); ++ } ++ pthread_rwlock_unlock (&nodecache_lock); ++ + drop_pager_softrefs (np); + } + +@@ -554,12 +606,12 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) + size_t num_nodes; + struct node *node, **node_list, **p; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_rdlock (&nodecache_lock); + + /* We must copy everything from the hash table into another data structure + to avoid running into any problems with the hash-table being modified + during processing (normally we delegate access to hash-table with +- diskfs_node_refcnt_lock, but we can't hold this while locking the ++ nodecache_lock, but we can't hold this while locking the + individual node locks). */ + + num_nodes = nodehash_nr_items; +@@ -570,10 +622,14 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) + for (node = nodehash[n]; node; node = node->dn->hnext) + { + *p++ = node; +- node->references++; ++ ++ /* We acquire a hard reference for node, but without using ++ diskfs_nref. We do this so that diskfs_new_hardrefs will not ++ get called. */ ++ node->references++; + } + +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + + p = node_list; + while (num_nodes-- > 0) +-- +2.0.0.rc2 + diff --git a/debian/patches/0003-isofs-use-a-seperate-lock-to-protect-node_cache.patch b/debian/patches/0003-isofs-use-a-seperate-lock-to-protect-node_cache.patch new file mode 100644 index 00000000..cd7811b8 --- /dev/null +++ b/debian/patches/0003-isofs-use-a-seperate-lock-to-protect-node_cache.patch @@ -0,0 +1,310 @@ +From 580ddc597aa7bda6f4941e308482fedd862a43c9 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Tue, 13 May 2014 15:16:31 +0200 +Subject: [PATCH 3/4] isofs: use a seperate lock to protect node_cache + +Previously, isofs used diskfs_node_refcnt_lock to serialize access to +the node_cache. + +Use a separate lock to protect node_cache. Adjust the reference +counting accordingly. Every node in the node_cache carries a light +reference. When we are asked to give up that light reference, we +reacquire our lock momentarily to check whether someone else +reacquired a reference through the node_cache. + +* isofs/inode.c (nodecache_lock): New lock. +(inode_cache_find): Use a separate lock to protect node_cache. +Adjust the reference counting accordingly. +(diskfs_cached_lookup): Likewise. +(load_inode): Likewise. +(cache_inode): Update comment accordingly. +(diskfs_node_iterate): Likewise. +(diskfs_node_norefs): Move the code removing the node from node_cache... +(diskfs_try_dropping_softrefs): ... here, where we check whether +someone reacquired a reference, and if so hold on to our light +reference. +--- + isofs/inode.c | 146 +++++++++++++++++++++++++++++++++++++++++----------------- + 1 file changed, 105 insertions(+), 41 deletions(-) + +diff --git a/isofs/inode.c b/isofs/inode.c +index cdc05ae..3941580 100644 +--- a/isofs/inode.c ++++ b/isofs/inode.c +@@ -48,35 +48,53 @@ struct node_cache + struct node *np; /* if live */ + }; + ++/* The node_cache is a cache of nodes. ++ ++ Access to node_cache, node_cache_size, and node_cache_alloced is ++ protected by nodecache_lock. ++ ++ Every node in the node_cache carries a light reference. When we ++ are asked to give up that light reference, we reacquire our lock ++ momentarily to check whether someone else reacquired a reference ++ through the node_cache. */ + static int node_cache_size = 0; + static int node_cache_alloced = 0; + struct node_cache *node_cache = 0; ++/* nodecache_lock must be acquired before diskfs_node_refcnt_lock. */ ++static pthread_rwlock_t nodecache_lock = PTHREAD_RWLOCK_INITIALIZER; + + /* Forward */ + static error_t read_disknode (struct node *, + struct dirrect *, struct rrip_lookup *); + + ++/* Lookup node with id ID. Returns NULL if the node is not found in ++ the node cache. */ ++static struct node * ++lookup (off_t id) ++{ ++ int i; ++ for (i = 0; i < node_cache_size; i++) ++ if (node_cache[i].id == id ++ && node_cache[i].np) ++ return node_cache[i].np; ++ return NULL; ++} ++ + /* See if node with identifier ID is in the cache. If so, return it, +- with one additional reference. diskfs_node_refcnt_lock must be held ++ with one additional reference. nodecache_lock must be held + on entry to the call, and will be released iff the node was found + in the cache. */ + void + inode_cache_find (off_t id, struct node **npp) + { +- int i; +- +- for (i = 0; i < node_cache_size; i++) +- if (node_cache[i].id == id +- && node_cache[i].np) +- { +- *npp = node_cache[i].np; +- (*npp)->references++; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- pthread_mutex_lock (&(*npp)->lock); +- return; +- } +- *npp = 0; ++ *npp = lookup (id); ++ if (*npp) ++ { ++ diskfs_nref (*npp); ++ pthread_rwlock_unlock (&nodecache_lock); ++ pthread_mutex_lock (&(*npp)->lock); ++ } + } + + +@@ -92,7 +110,7 @@ use_file_start_id (struct dirrect *record, struct rrip_lookup *rr) + } + + /* Enter NP into the cache. The directory entry we used is DR, the +- cached Rock-Ridge info RR. diskfs_node_refcnt_lock must be held. */ ++ cached Rock-Ridge info RR. nodecache_lock must be held. */ + void + cache_inode (struct node *np, struct dirrect *record, + struct rrip_lookup *rr) +@@ -137,6 +155,7 @@ cache_inode (struct node *np, struct dirrect *record, + c->id = id; + c->dr = record; + c->file_start = np->dn->file_start; ++ diskfs_nref_light (np); + c->np = np; + + /* PLUS 1 so that we don't store zero cache ID's (not allowed by diskfs) */ +@@ -155,7 +174,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp) + to avoid presenting zero cache ID's. */ + id--; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_rdlock (&nodecache_lock); + assert (id < node_cache_size); + + np = node_cache[id].np; +@@ -166,6 +185,8 @@ diskfs_cached_lookup (ino_t id, struct node **npp) + struct rrip_lookup rr; + struct disknode *dn; + ++ pthread_rwlock_unlock (&nodecache_lock); ++ + rrip_lookup (node_cache[id].dr, &rr, 1); + + /* We should never cache the wrong directory entry */ +@@ -174,7 +195,7 @@ diskfs_cached_lookup (ino_t id, struct node **npp) + dn = malloc (sizeof (struct disknode)); + if (!dn) + { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + release_rrip (&rr); + return ENOMEM; + } +@@ -185,16 +206,26 @@ diskfs_cached_lookup (ino_t id, struct node **npp) + if (!np) + { + free (dn); +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + release_rrip (&rr); + return ENOMEM; + } + np->cache_id = id + 1; /* see above for rationale for increment */ + pthread_mutex_lock (&np->lock); ++ ++ pthread_rwlock_wrlock (&nodecache_lock); ++ if (c->np != NULL) ++ { ++ /* We lost a race. */ ++ diskfs_nput (np); ++ np = c->np; ++ goto gotit; ++ } + c->np = np; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ diskfs_nref_light (np); ++ pthread_rwlock_unlock (&nodecache_lock); + +- err = read_disknode (np, node_cache[id].dr, &rr); ++ err = read_disknode (np, dn->dr, &rr); + if (!err) + *npp = np; + +@@ -203,9 +234,9 @@ diskfs_cached_lookup (ino_t id, struct node **npp) + return err; + } + +- +- np->references++; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ gotit: ++ diskfs_nref (np); ++ pthread_rwlock_unlock (&nodecache_lock); + pthread_mutex_lock (&np->lock); + *npp = np; + return 0; +@@ -307,7 +338,8 @@ load_inode (struct node **npp, struct dirrect *record, + error_t err; + off_t file_start; + struct disknode *dn; +- struct node *np; ++ struct node *np, *tmp; ++ off_t id; + + err = calculate_file_start (record, &file_start, rr); + if (err) +@@ -315,27 +347,23 @@ load_inode (struct node **npp, struct dirrect *record, + if (rr->valid & VALID_CL) + record = rr->realdirent; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- + /* First check the cache */ + if (use_file_start_id (record, rr)) +- inode_cache_find (file_start << store->log2_block_size, npp); ++ id = file_start << store->log2_block_size; + else +- inode_cache_find ((off_t) ((void *) record - (void *) disk_image), npp); ++ id = (off_t) ((void *) record - (void *) disk_image); + ++ pthread_rwlock_rdlock (&nodecache_lock); ++ inode_cache_find (id, npp); + if (*npp) +- { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return 0; +- } ++ return 0; ++ pthread_rwlock_unlock (&nodecache_lock); + + /* Create a new node */ + dn = malloc (sizeof (struct disknode)); + if (!dn) +- { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); +- return ENOMEM; +- } ++ return ENOMEM; ++ + dn->fileinfo = 0; + dn->dr = record; + dn->file_start = file_start; +@@ -344,14 +372,25 @@ load_inode (struct node **npp, struct dirrect *record, + if (!np) + { + free (dn); +- pthread_spin_unlock (&diskfs_node_refcnt_lock); + return ENOMEM; + } + + pthread_mutex_lock (&np->lock); + ++ pthread_rwlock_wrlock (&nodecache_lock); ++ tmp = lookup (id); ++ if (tmp) ++ { ++ /* We lost a race. */ ++ diskfs_nput (np); ++ diskfs_nref (tmp); ++ *npp = tmp; ++ pthread_rwlock_unlock (&nodecache_lock); ++ return 0; ++ } ++ + cache_inode (np, record, rr); +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&nodecache_lock); + + err = read_disknode (np, record, rr); + *npp = np; +@@ -505,9 +544,6 @@ error_t (*diskfs_read_symlink_hook) (struct node *, char *) + void + diskfs_node_norefs (struct node *np) + { +- assert (node_cache[np->cache_id - 1].np == np); +- node_cache[np->cache_id - 1].np = 0; +- + if (np->dn->translator) + free (np->dn->translator); + +@@ -521,6 +557,34 @@ diskfs_node_norefs (struct node *np) + void + diskfs_try_dropping_softrefs (struct node *np) + { ++ pthread_rwlock_wrlock (&nodecache_lock); ++ if (np->cache_id != 0) ++ { ++ assert (node_cache[np->cache_id - 1].np == np); ++ ++ /* Check if someone reacquired a reference through the ++ node_cache. */ ++ unsigned int references; ++ pthread_spin_lock (&diskfs_node_refcnt_lock); ++ references = np->references; ++ pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ ++ /* An additional reference is acquired by libdiskfs across calls ++ to diskfs_try_dropping_softrefs. */ ++ if (references > 1) ++ { ++ /* A reference was reacquired through a hash table lookup. ++ It's fine, we didn't touch anything yet. */ ++ pthread_rwlock_unlock (&nodecache_lock); ++ return; ++ } ++ ++ node_cache[np->cache_id - 1].np = 0; ++ np->cache_id = 0; ++ diskfs_nrele_light (np); ++ } ++ pthread_rwlock_unlock (&nodecache_lock); ++ + drop_pager_softrefs (np); + } + +-- +2.0.0.rc2 + diff --git a/debian/patches/0004-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch b/debian/patches/0004-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch new file mode 100644 index 00000000..7c1ec942 --- /dev/null +++ b/debian/patches/0004-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch @@ -0,0 +1,314 @@ +From 45a6d87522ab3d89b3410a79eed0ca12c020c609 Mon Sep 17 00:00:00 2001 +From: Justus Winter <4winter@informatik.uni-hamburg.de> +Date: Tue, 13 May 2014 15:35:42 +0200 +Subject: [PATCH 4/4] tmpfs: use a seperate lock to protect all_nodes + +Previously, tmpfs used diskfs_node_refcnt_lock to serialize access to +the all_nodes and some other related global state related to memory +consumption. + +Use a separate lock to protect all_nodes, and atomic operations to +access the state related to memory consumption. Adjust the reference +counting accordingly. Every node in the all_nodes carries a light +reference. When we are asked to give up that light reference, we +reacquire our lock momentarily to check whether someone else +reacquired a reference through the all_nodes. + +* tmpfs/tmpfs.h (num_files, tmpfs_space_used): Use atomic operations +for these variables. +(adjust_used): Use atomic operations. +(get_used): New convenience function to atomically retrieve +tmpfs_space_used. +* tmpfs/node.c (all_nodes_lock): New lock. +(diskfs_alloc_node): Use a separate lock to protect all_nodes. +Adjust the reference counting accordingly. +(diskfs_free_node): Likewise. +(diskfs_cached_lookup):Likewise. +(diskfs_node_iterate): Likewise. +(diskfs_node_norefs): Do not remove the node from all_nodes. This +actually looks like a mistake, I do not know why they did that here as +well as in diskfs_free_node. +(diskfs_try_dropping_softrefs): Check whether someone reacquired a +reference, and if so hold on to our light reference. +(diskfs_grow): Use atomic operations. +* tmpfs/tmpfs.c (diskfs_set_statfs): Likewise. +--- + tmpfs/node.c | 107 ++++++++++++++++++++++++++++++++++++++++++---------------- + tmpfs/tmpfs.c | 6 ++-- + tmpfs/tmpfs.h | 20 +++++++---- + 3 files changed, 94 insertions(+), 39 deletions(-) + +diff --git a/tmpfs/node.c b/tmpfs/node.c +index acc029a..24ad0bd 100644 +--- a/tmpfs/node.c ++++ b/tmpfs/node.c +@@ -29,8 +29,19 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ + unsigned int num_files; + static unsigned int gen; + ++/* all_nodes is a list of all nodes. ++ ++ Access to all_nodes and all_nodes_nr_items is protected by ++ all_nodes_lock. ++ ++ Every node in all_nodes carries a light reference. When we are ++ asked to give up that light reference, we reacquire our lock ++ momentarily to check whether someone else reacquired a ++ reference. */ + struct node *all_nodes; + static size_t all_nodes_nr_items; ++/* all_nodes_lock must be acquired before diskfs_node_refcnt_lock. */ ++pthread_rwlock_t all_nodes_lock = PTHREAD_RWLOCK_INITIALIZER; + + error_t + diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp) +@@ -40,18 +51,17 @@ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp) + dn = calloc (1, sizeof *dn); + if (dn == 0) + return ENOSPC; +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- if (round_page (tmpfs_space_used + sizeof *dn) / vm_page_size ++ ++ if (round_page (get_used () + sizeof *dn) / vm_page_size + > tmpfs_page_limit) + { +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&all_nodes_lock); + free (dn); + return ENOSPC; + } + dn->gen = gen++; +- ++num_files; +- tmpfs_space_used += sizeof *dn; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ __atomic_add_fetch (&num_files, 1, __ATOMIC_RELAXED); ++ adjust_used (sizeof *dn); + + dn->type = IFTODT (mode & S_IFMT); + return diskfs_cached_lookup ((ino_t) (uintptr_t) dn, npp); +@@ -75,15 +85,19 @@ diskfs_free_node (struct node *np, mode_t mode) + free (np->dn->u.lnk); + break; + } ++ ++ pthread_rwlock_wrlock (&all_nodes_lock); + *np->dn->hprevp = np->dn->hnext; + if (np->dn->hnext != 0) + np->dn->hnext->dn->hprevp = np->dn->hprevp; + all_nodes_nr_items -= 1; ++ pthread_rwlock_unlock (&all_nodes_lock); ++ + free (np->dn); + np->dn = 0; + +- --num_files; +- tmpfs_space_used -= sizeof *np->dn; ++ __atomic_sub_fetch (&num_files, 1, __ATOMIC_RELAXED); ++ adjust_used (-sizeof *np->dn); + } + + void +@@ -117,14 +131,6 @@ diskfs_node_norefs (struct node *np) + np->dn->u.chr = np->dn_stat.st_rdev; + break; + } +- +- /* Remove this node from the cache list rooted at `all_nodes'. */ +- *np->dn->hprevp = np->dn->hnext; +- if (np->dn->hnext != 0) +- np->dn->hnext->dn->hprevp = np->dn->hprevp; +- all_nodes_nr_items -= 1; +- np->dn->hnext = 0; +- np->dn->hprevp = 0; + } + + free (np); +@@ -167,30 +173,34 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) + + assert (npp); + ++ pthread_rwlock_rdlock (&all_nodes_lock); + if (dn->hprevp != 0) /* There is already a node. */ +- { +- np = *dn->hprevp; +- assert (np->dn == dn); +- assert (*dn->hprevp == np); +- +- diskfs_nref (np); +- } ++ goto gotit; + else + /* Create the new node. */ + { + struct stat *st; ++ pthread_rwlock_unlock (&all_nodes_lock); + + np = diskfs_make_node (dn); + np->cache_id = (ino_t) (uintptr_t) dn; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_wrlock (&all_nodes_lock); ++ if (dn->hprevp != NULL) ++ { ++ /* We lost a race. */ ++ diskfs_nrele (np); ++ goto gotit; ++ } ++ + dn->hnext = all_nodes; + if (dn->hnext) + dn->hnext->dn->hprevp = &dn->hnext; + dn->hprevp = &all_nodes; + all_nodes = np; + all_nodes_nr_items += 1; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ diskfs_nref_light (np); ++ pthread_rwlock_unlock (&all_nodes_lock); + + st = &np->dn_stat; + memset (st, 0, sizeof *st); +@@ -220,6 +230,16 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) + pthread_mutex_lock (&np->lock); + *npp = np; + return 0; ++ ++ gotit: ++ np = *dn->hprevp; ++ assert (np->dn == dn); ++ assert (*dn->hprevp == np); ++ diskfs_nref (np); ++ pthread_rwlock_unlock (&all_nodes_lock); ++ pthread_mutex_lock (&np->lock); ++ *npp = np; ++ return 0; + } + + error_t +@@ -229,12 +249,12 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) + size_t num_nodes; + struct node *node, **node_list, **p; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_rdlock (&all_nodes_lock); + + /* We must copy everything from the hash table into another data structure + to avoid running into any problems with the hash-table being modified + during processing (normally we delegate access to hash-table with +- diskfs_node_refcnt_lock, but we can't hold this while locking the ++ all_nodes_lock, but we can't hold this while locking the + individual node locks). */ + + num_nodes = all_nodes_nr_items; +@@ -243,10 +263,14 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) + for (node = all_nodes; node != 0; node = node->dn->hnext) + { + *p++ = node; ++ ++ /* We acquire a hard reference for node, but without using ++ diskfs_nref. We do this so that diskfs_new_hardrefs will not ++ get called. */ + node->references++; + } + +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ pthread_rwlock_unlock (&all_nodes_lock); + + p = node_list; + while (num_nodes-- > 0) +@@ -272,6 +296,31 @@ diskfs_node_iterate (error_t (*fun) (struct node *)) + void + diskfs_try_dropping_softrefs (struct node *np) + { ++ pthread_rwlock_wrlock (&all_nodes_lock); ++ if (np->cache_id != 0) ++ { ++ /* Check if someone reacquired a reference. */ ++ unsigned int references; ++ pthread_spin_lock (&diskfs_node_refcnt_lock); ++ references = np->references; ++ pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ ++ /* An additional reference is acquired by libdiskfs across calls ++ to diskfs_try_dropping_softrefs. */ ++ if (references > 1) ++ { ++ /* A reference was reacquired. It's fine, we didn't touch ++ anything yet. */ ++ pthread_rwlock_unlock (&all_nodes_lock); ++ return; ++ } ++ ++ /* Just let go of the weak reference. The node will be removed ++ from all_nodes in diskfs_free_node. */ ++ np->cache_id = 0; ++ diskfs_nrele_light (np); ++ } ++ pthread_rwlock_unlock (&all_nodes_lock); + } + + /* The user must define this funcction. Node NP has some light +@@ -447,7 +496,7 @@ diskfs_grow (struct node *np, off_t size, struct protid *cred) + + off_t set_size = size; + size = round_page (size); +- if (round_page (tmpfs_space_used + size - np->allocsize) ++ if (round_page (get_used () + size - np->allocsize) + / vm_page_size > tmpfs_page_limit) + return ENOSPC; + +diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c +index 718c6d8..0aace25 100644 +--- a/tmpfs/tmpfs.c ++++ b/tmpfs/tmpfs.c +@@ -67,10 +67,8 @@ diskfs_set_statfs (struct statfs *st) + st->f_bsize = vm_page_size; + st->f_blocks = tmpfs_page_limit; + +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- st->f_files = num_files; +- pages = round_page (tmpfs_space_used) / vm_page_size; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ st->f_files = __atomic_load_n (&num_files, __ATOMIC_RELAXED); ++ pages = round_page (get_used ()) / vm_page_size; + + st->f_bfree = pages < tmpfs_page_limit ? tmpfs_page_limit - pages : 0; + st->f_bavail = st->f_bfree; +diff --git a/tmpfs/tmpfs.h b/tmpfs/tmpfs.h +index b3c636d..ad47200 100644 +--- a/tmpfs/tmpfs.h ++++ b/tmpfs/tmpfs.h +@@ -69,17 +69,25 @@ struct tmpfs_dirent + char name[0]; + }; + +-extern unsigned int num_files; +-extern off_t tmpfs_page_limit, tmpfs_space_used; +- ++extern off_t tmpfs_page_limit; + extern mach_port_t default_pager; + ++/* These two must be accessed using atomic operations. */ ++extern unsigned int num_files; ++extern off_t tmpfs_space_used; ++ ++/* Convenience function to adjust tmpfs_space_used. */ + static inline void + adjust_used (off_t change) + { +- pthread_spin_lock (&diskfs_node_refcnt_lock); +- tmpfs_space_used += change; +- pthread_spin_unlock (&diskfs_node_refcnt_lock); ++ __atomic_add_fetch (&num_files, change, __ATOMIC_RELAXED); ++} ++ ++/* Convenience function to get tmpfs_space_used. */ ++static inline off_t ++get_used (void) ++{ ++ return __atomic_load_n (&num_files, __ATOMIC_RELAXED); + } + + #endif +-- +2.0.0.rc2 + diff --git a/debian/patches/series b/debian/patches/series index 2d703f80..a72a0c58 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -41,3 +41,7 @@ xkb-compat.patch mach-defpager-protected-payload.patch +0001-ext2fs-use-a-seperate-lock-to-protect-nodehash.patch +0002-fatfs-use-a-seperate-lock-to-protect-nodehash.patch +0003-isofs-use-a-seperate-lock-to-protect-node_cache.patch +0004-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch |