summaryrefslogtreecommitdiff
path: root/debian/patches/0004-tmpfs-use-a-seperate-lock-to-protect-all_nodes.patch
blob: 7c1ec942d833dcd10c0d8cb408f6d8423fbb539e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
From 45a6d87522ab3d89b3410a79eed0ca12c020c609 Mon Sep 17 00:00:00 2001
From: Justus Winter <4winter@informatik.uni-hamburg.de>
Date: Tue, 13 May 2014 15:35:42 +0200
Subject: [PATCH 4/4] tmpfs: use a seperate lock to protect all_nodes

Previously, tmpfs used diskfs_node_refcnt_lock to serialize access to
the all_nodes and some other related global state related to memory
consumption.

Use a separate lock to protect all_nodes, and atomic operations to
access the state related to memory consumption. Adjust the reference
counting accordingly.  Every node in the all_nodes carries a light
reference.  When we are asked to give up that light reference, we
reacquire our lock momentarily to check whether someone else
reacquired a reference through the all_nodes.

* tmpfs/tmpfs.h (num_files, tmpfs_space_used): Use atomic operations
for these variables.
(adjust_used): Use atomic operations.
(get_used): New convenience function to atomically retrieve
tmpfs_space_used.
* tmpfs/node.c (all_nodes_lock): New lock.
(diskfs_alloc_node): Use a separate lock to protect all_nodes.
Adjust the reference counting accordingly.
(diskfs_free_node): Likewise.
(diskfs_cached_lookup):Likewise.
(diskfs_node_iterate): Likewise.
(diskfs_node_norefs): Do not remove the node from all_nodes.  This
actually looks like a mistake, I do not know why they did that here as
well as in diskfs_free_node.
(diskfs_try_dropping_softrefs): Check whether someone reacquired a
reference, and if so hold on to our light reference.
(diskfs_grow): Use atomic operations.
* tmpfs/tmpfs.c (diskfs_set_statfs): Likewise.
---
 tmpfs/node.c  | 107 ++++++++++++++++++++++++++++++++++++++++++----------------
 tmpfs/tmpfs.c |   6 ++--
 tmpfs/tmpfs.h |  20 +++++++----
 3 files changed, 94 insertions(+), 39 deletions(-)

diff --git a/tmpfs/node.c b/tmpfs/node.c
index acc029a..24ad0bd 100644
--- a/tmpfs/node.c
+++ b/tmpfs/node.c
@@ -29,8 +29,19 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.  */
 unsigned int num_files;
 static unsigned int gen;
 
+/* all_nodes is a list of all nodes.
+
+   Access to all_nodes and all_nodes_nr_items is protected by
+   all_nodes_lock.
+
+   Every node in all_nodes carries a light reference.  When we are
+   asked to give up that light reference, we reacquire our lock
+   momentarily to check whether someone else reacquired a
+   reference.  */
 struct node *all_nodes;
 static size_t all_nodes_nr_items;
+/* all_nodes_lock must be acquired before diskfs_node_refcnt_lock.  */
+pthread_rwlock_t all_nodes_lock = PTHREAD_RWLOCK_INITIALIZER;
 
 error_t
 diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
@@ -40,18 +51,17 @@ diskfs_alloc_node (struct node *dp, mode_t mode, struct node **npp)
   dn = calloc (1, sizeof *dn);
   if (dn == 0)
     return ENOSPC;
-  pthread_spin_lock (&diskfs_node_refcnt_lock);
-  if (round_page (tmpfs_space_used + sizeof *dn) / vm_page_size
+
+  if (round_page (get_used () + sizeof *dn) / vm_page_size
       > tmpfs_page_limit)
     {
-      pthread_spin_unlock (&diskfs_node_refcnt_lock);
+      pthread_rwlock_unlock (&all_nodes_lock);
       free (dn);
       return ENOSPC;
     }
   dn->gen = gen++;
-  ++num_files;
-  tmpfs_space_used += sizeof *dn;
-  pthread_spin_unlock (&diskfs_node_refcnt_lock);
+  __atomic_add_fetch (&num_files, 1, __ATOMIC_RELAXED);
+  adjust_used (sizeof *dn);
 
   dn->type = IFTODT (mode & S_IFMT);
   return diskfs_cached_lookup ((ino_t) (uintptr_t) dn, npp);
@@ -75,15 +85,19 @@ diskfs_free_node (struct node *np, mode_t mode)
       free (np->dn->u.lnk);
       break;
     }
+
+  pthread_rwlock_wrlock (&all_nodes_lock);
   *np->dn->hprevp = np->dn->hnext;
   if (np->dn->hnext != 0)
     np->dn->hnext->dn->hprevp = np->dn->hprevp;
   all_nodes_nr_items -= 1;
+  pthread_rwlock_unlock (&all_nodes_lock);
+
   free (np->dn);
   np->dn = 0;
 
-  --num_files;
-  tmpfs_space_used -= sizeof *np->dn;
+  __atomic_sub_fetch (&num_files, 1, __ATOMIC_RELAXED);
+  adjust_used (-sizeof *np->dn);
 }
 
 void
@@ -117,14 +131,6 @@ diskfs_node_norefs (struct node *np)
 	  np->dn->u.chr = np->dn_stat.st_rdev;
 	  break;
 	}
-
-      /* Remove this node from the cache list rooted at `all_nodes'.  */
-      *np->dn->hprevp = np->dn->hnext;
-      if (np->dn->hnext != 0)
-	np->dn->hnext->dn->hprevp = np->dn->hprevp;
-      all_nodes_nr_items -= 1;
-      np->dn->hnext = 0;
-      np->dn->hprevp = 0;
     }
 
   free (np);
@@ -167,30 +173,34 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
 
   assert (npp);
 
+  pthread_rwlock_rdlock (&all_nodes_lock);
   if (dn->hprevp != 0)		/* There is already a node.  */
-    {
-      np = *dn->hprevp;
-      assert (np->dn == dn);
-      assert (*dn->hprevp == np);
-
-      diskfs_nref (np);
-    }
+    goto gotit;
   else
     /* Create the new node.  */
     {
       struct stat *st;
+      pthread_rwlock_unlock (&all_nodes_lock);
 
       np = diskfs_make_node (dn);
       np->cache_id = (ino_t) (uintptr_t) dn;
 
-      pthread_spin_lock (&diskfs_node_refcnt_lock);
+      pthread_rwlock_wrlock (&all_nodes_lock);
+      if (dn->hprevp != NULL)
+        {
+          /* We lost a race.  */
+          diskfs_nrele (np);
+          goto gotit;
+        }
+
       dn->hnext = all_nodes;
       if (dn->hnext)
 	dn->hnext->dn->hprevp = &dn->hnext;
       dn->hprevp = &all_nodes;
       all_nodes = np;
       all_nodes_nr_items += 1;
-      pthread_spin_unlock (&diskfs_node_refcnt_lock);
+      diskfs_nref_light (np);
+      pthread_rwlock_unlock (&all_nodes_lock);
 
       st = &np->dn_stat;
       memset (st, 0, sizeof *st);
@@ -220,6 +230,16 @@ diskfs_cached_lookup (ino_t inum, struct node **npp)
   pthread_mutex_lock (&np->lock);
   *npp = np;
   return 0;
+
+ gotit:
+  np = *dn->hprevp;
+  assert (np->dn == dn);
+  assert (*dn->hprevp == np);
+  diskfs_nref (np);
+  pthread_rwlock_unlock (&all_nodes_lock);
+  pthread_mutex_lock (&np->lock);
+  *npp = np;
+  return 0;
 }
 
 error_t
@@ -229,12 +249,12 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
   size_t num_nodes;
   struct node *node, **node_list, **p;
 
-  pthread_spin_lock (&diskfs_node_refcnt_lock);
+  pthread_rwlock_rdlock (&all_nodes_lock);
 
   /* We must copy everything from the hash table into another data structure
      to avoid running into any problems with the hash-table being modified
      during processing (normally we delegate access to hash-table with
-     diskfs_node_refcnt_lock, but we can't hold this while locking the
+     all_nodes_lock, but we can't hold this while locking the
      individual node locks).  */
 
   num_nodes = all_nodes_nr_items;
@@ -243,10 +263,14 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
   for (node = all_nodes; node != 0; node = node->dn->hnext)
     {
       *p++ = node;
+
+      /* We acquire a hard reference for node, but without using
+	 diskfs_nref.  We do this so that diskfs_new_hardrefs will not
+	 get called.  */
       node->references++;
     }
 
-  pthread_spin_unlock (&diskfs_node_refcnt_lock);
+  pthread_rwlock_unlock (&all_nodes_lock);
 
   p = node_list;
   while (num_nodes-- > 0)
@@ -272,6 +296,31 @@ diskfs_node_iterate (error_t (*fun) (struct node *))
 void
 diskfs_try_dropping_softrefs (struct node *np)
 {
+  pthread_rwlock_wrlock (&all_nodes_lock);
+  if (np->cache_id != 0)
+    {
+      /* Check if someone reacquired a reference.  */
+      unsigned int references;
+      pthread_spin_lock (&diskfs_node_refcnt_lock);
+      references = np->references;
+      pthread_spin_unlock (&diskfs_node_refcnt_lock);
+
+      /* An additional reference is acquired by libdiskfs across calls
+	 to diskfs_try_dropping_softrefs.  */
+      if (references > 1)
+	{
+	  /* A reference was reacquired.  It's fine, we didn't touch
+	     anything yet. */
+	  pthread_rwlock_unlock (&all_nodes_lock);
+	  return;
+	}
+
+      /* Just let go of the weak reference.  The node will be removed
+	 from all_nodes in diskfs_free_node.  */
+      np->cache_id = 0;
+      diskfs_nrele_light (np);
+    }
+  pthread_rwlock_unlock (&all_nodes_lock);
 }
 
 /* The user must define this funcction.  Node NP has some light
@@ -447,7 +496,7 @@ diskfs_grow (struct node *np, off_t size, struct protid *cred)
 
   off_t set_size = size;
   size = round_page (size);
-  if (round_page (tmpfs_space_used + size - np->allocsize)
+  if (round_page (get_used () + size - np->allocsize)
       / vm_page_size > tmpfs_page_limit)
     return ENOSPC;
 
diff --git a/tmpfs/tmpfs.c b/tmpfs/tmpfs.c
index 718c6d8..0aace25 100644
--- a/tmpfs/tmpfs.c
+++ b/tmpfs/tmpfs.c
@@ -67,10 +67,8 @@ diskfs_set_statfs (struct statfs *st)
   st->f_bsize = vm_page_size;
   st->f_blocks = tmpfs_page_limit;
 
-  pthread_spin_lock (&diskfs_node_refcnt_lock);
-  st->f_files = num_files;
-  pages = round_page (tmpfs_space_used) / vm_page_size;
-  pthread_spin_unlock (&diskfs_node_refcnt_lock);
+  st->f_files = __atomic_load_n (&num_files, __ATOMIC_RELAXED);
+  pages = round_page (get_used ()) / vm_page_size;
 
   st->f_bfree = pages < tmpfs_page_limit ? tmpfs_page_limit - pages : 0;
   st->f_bavail = st->f_bfree;
diff --git a/tmpfs/tmpfs.h b/tmpfs/tmpfs.h
index b3c636d..ad47200 100644
--- a/tmpfs/tmpfs.h
+++ b/tmpfs/tmpfs.h
@@ -69,17 +69,25 @@ struct tmpfs_dirent
   char name[0];
 };
 
-extern unsigned int num_files;
-extern off_t tmpfs_page_limit, tmpfs_space_used;
-
+extern off_t tmpfs_page_limit;
 extern mach_port_t default_pager;
 
+/* These two must be accessed using atomic operations.  */
+extern unsigned int num_files;
+extern off_t tmpfs_space_used;
+
+/* Convenience function to adjust tmpfs_space_used.  */
 static inline void
 adjust_used (off_t change)
 {
-  pthread_spin_lock (&diskfs_node_refcnt_lock);
-  tmpfs_space_used += change;
-  pthread_spin_unlock (&diskfs_node_refcnt_lock);
+  __atomic_add_fetch (&num_files, change, __ATOMIC_RELAXED);
+}
+
+/* Convenience function to get tmpfs_space_used.  */
+static inline off_t
+get_used (void)
+{
+  return __atomic_load_n (&num_files, __ATOMIC_RELAXED);
 }
 
 #endif
-- 
2.0.0.rc2