1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
|
/* Directory name lookup caching
Copyright (C) 1996, 1997, 1998, 2014 Free Software Foundation, Inc.
Written by Michael I. Bushnell, p/BSG, & Miles Bader.
This file is part of the GNU Hurd.
The GNU Hurd is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2, or (at
your option) any later version.
The GNU Hurd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
#include "priv.h"
#include <assert.h>
#include <string.h>
/* The name cache is implemented using a hash table.
We use buckets of a fixed size. We approximate the
least-frequently used cache algorithm by counting the number of
lookups using saturating arithmetic in the two lowest bits of the
pointer to the name. Using this strategy we achieve a constant
worst-case lookup and insertion time. */
/* Number of buckets. Must be a power of two. */
#define CACHE_SIZE 256
/* Entries per bucket. */
#define BUCKET_SIZE 4
/* A mask for fast binary modulo. */
#define CACHE_MASK (CACHE_SIZE - 1)
/* Cache bucket with BUCKET_SIZE entries.
The layout of the bucket is chosen so that it will be straight
forward to use vector operations in the future. */
struct cache_bucket
{
/* Name of the node NODE_CACHE_ID in the directory DIR_CACHE_ID. If
NULL, the entry is unused. */
unsigned long name[BUCKET_SIZE];
/* The key. */
unsigned long key[BUCKET_SIZE];
/* Used to indentify nodes to the fs dependent code. */
ino64_t dir_cache_id[BUCKET_SIZE];
/* 0 for NODE_CACHE_ID means a `negative' entry -- recording that
there's definitely no node with this name. */
ino64_t node_cache_id[BUCKET_SIZE];
};
/* The cache. */
static struct cache_bucket name_cache[CACHE_SIZE];
/* Protected by this lock. */
static pthread_mutex_t cache_lock = PTHREAD_MUTEX_INITIALIZER;
/* Given VALUE, return the char pointer. */
static inline char *
charp (unsigned long value)
{
return (char *) (value & ~3L);
}
/* Given VALUE, return the approximation of use frequency. */
static inline unsigned long
frequ (unsigned long value)
{
return value & 3;
}
/* Add an entry in the Ith slot of the given bucket. If there is a
value there, remove it first. */
static inline void
add_entry (struct cache_bucket *b, int i,
const char *name, unsigned long key,
ino64_t dir_cache_id, ino64_t node_cache_id)
{
if (b->name[i])
free (charp (b->name[i]));
b->name[i] = (unsigned long) strdup (name);
assert ((b->name[i] & 3) == 0);
if (b->name[i] == 0)
return;
b->key[i] = key;
b->dir_cache_id[i] = dir_cache_id;
b->node_cache_id[i] = node_cache_id;
}
/* Remove the entry in the Ith slot of the given bucket. */
static inline void
remove_entry (struct cache_bucket *b, int i)
{
if (b->name[i])
free (charp (b->name[i]));
b->name[i] = 0;
}
/* Check if the entry in the Ith slot of the given bucket is
valid. */
static inline int
valid_entry (struct cache_bucket *b, int i)
{
return b->name[i] != 0;
}
/* This is the Murmur3 hash algorithm. */
#define FORCE_INLINE inline __attribute__((always_inline))
inline uint32_t rotl32 ( uint32_t x, int8_t r )
{
return (x << r) | (x >> (32 - r));
}
#define ROTL32(x,y) rotl32(x,y)
/* Block read - if your platform needs to do endian-swapping or can
only handle aligned reads, do the conversion here. */
FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i )
{
return p[i];
}
/* Finalization mix - force all bits of a hash block to avalanche. */
FORCE_INLINE uint32_t fmix32 ( uint32_t h )
{
h ^= h >> 16;
h *= 0x85ebca6b;
h ^= h >> 13;
h *= 0xc2b2ae35;
h ^= h >> 16;
return h;
}
/* The Murmur3 hash function. */
void MurmurHash3_x86_32 ( const void * key, int len,
uint32_t seed, void * out )
{
const uint8_t * data = (const uint8_t*)key;
const int nblocks = len / 4;
uint32_t h1 = seed;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
/* body */
const uint32_t * blocks = (const uint32_t *)(data + nblocks*4);
for(int i = -nblocks; i; i++)
{
uint32_t k1 = getblock32(blocks,i);
k1 *= c1;
k1 = ROTL32(k1,15);
k1 *= c2;
h1 ^= k1;
h1 = ROTL32(h1,13);
h1 = h1*5+0xe6546b64;
}
/* tail */
const uint8_t * tail = (const uint8_t*)(data + nblocks*4);
uint32_t k1 = 0;
switch(len & 3)
{
case 3: k1 ^= tail[2] << 16;
case 2: k1 ^= tail[1] << 8;
case 1: k1 ^= tail[0];
k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1;
};
/* finalization */
h1 ^= len;
h1 = fmix32(h1);
*(uint32_t*)out = h1;
}
/* If there is no best candidate to replace, pick any. We approximate
any by picking the slot depicted by REPLACE, and increment REPLACE
then. */
static int replace;
/* Lookup (DIR_CACHE_ID, NAME, KEY) in the cache. If it is found,
return 1 and set BUCKET and INDEX to the item. Otherwise, return 0
and set BUCKET and INDEX to the slot where the item should be
inserted. */
static inline int
lookup (ino64_t dir_cache_id, const char *name, unsigned long key,
struct cache_bucket **bucket, int *index)
{
struct cache_bucket *b = *bucket = &name_cache[key & CACHE_MASK];
unsigned long best = 3;
int i;
for (i = 0; i < BUCKET_SIZE; i++)
{
unsigned long f = frequ (b->name[i]);
if (valid_entry (b, i)
&& b->key[i] == key
&& b->dir_cache_id[i] == dir_cache_id
&& strcmp (charp (b->name[i]), name) == 0)
{
if (f < 3)
b->name[i] += 1;
*index = i;
return 1;
}
/* Keep track of the replacement candidate. */
if (f < best)
{
best = f;
*index = i;
}
}
/* If there was no entry with a lower use frequency, just replace
any entry. */
if (best == 3)
{
*index = replace;
replace = (replace + 1) & (BUCKET_SIZE - 1);
}
return 0;
}
/* Hash the directory cache_id and the name. */
static inline unsigned long
hash (ino64_t dir_cache_id, const char *name)
{
unsigned long h;
MurmurHash3_x86_32 (&dir_cache_id, sizeof dir_cache_id, 0, &h);
MurmurHash3_x86_32 (name, strlen (name), h, &h);
return h;
}
/* Node NP has just been found in DIR with NAME. If NP is null, that
means that this name has been confirmed as absent in the directory. */
void
diskfs_enter_lookup_cache (struct node *dir, struct node *np, const char *name)
{
unsigned long key = hash (dir->cache_id, name);
ino64_t value = np ? np->cache_id : 0;
struct cache_bucket *bucket;
int i = 0, found;
pthread_mutex_lock (&cache_lock);
found = lookup (dir->cache_id, name, key, &bucket, &i);
if (! found)
add_entry (bucket, i, name, key, dir->cache_id, value);
else
if (bucket->node_cache_id[i] != value)
bucket->node_cache_id[i] = value;
pthread_mutex_unlock (&cache_lock);
}
/* Purge all references in the cache to NP as a node inside
directory DP. */
void
diskfs_purge_lookup_cache (struct node *dp, struct node *np)
{
int i;
struct cache_bucket *b;
pthread_mutex_lock (&cache_lock);
for (b = &name_cache[0]; b < &name_cache[CACHE_SIZE]; b++)
for (i = 0; i < BUCKET_SIZE; i++)
if (valid_entry (b, i)
&& b->dir_cache_id[i] == dp->cache_id
&& b->node_cache_id[i] == np->cache_id)
remove_entry (b, i);
pthread_mutex_unlock (&cache_lock);
}
/* Scan the cache looking for NAME inside DIR. If we don't know
anything entry at all, then return 0. If the entry is confirmed to
not exist, then return -1. Otherwise, return NP for the entry, with
a newly allocated reference. */
struct node *
diskfs_check_lookup_cache (struct node *dir, const char *name)
{
unsigned long key = hash (dir->cache_id, name);
int lookup_parent = name[0] == '.' && name[1] == '.' && name[2] == '\0';
struct cache_bucket *bucket;
int i, found;
if (lookup_parent && dir == diskfs_root_node)
/* This is outside our file system, return cache miss. */
return NULL;
pthread_mutex_lock (&cache_lock);
found = lookup (dir->cache_id, name, key, &bucket, &i);
if (found)
{
ino64_t id = bucket->node_cache_id[i];
pthread_mutex_unlock (&cache_lock);
if (id == 0)
/* A negative cache entry. */
return (struct node *) -1;
else if (id == dir->cache_id)
/* The cached node is the same as DIR. */
{
diskfs_nref (dir);
return dir;
}
else
/* Just a normal entry in DIR; get the actual node. */
{
struct node *np;
error_t err;
if (lookup_parent)
{
pthread_mutex_unlock (&dir->lock);
err = diskfs_cached_lookup (id, &np);
pthread_mutex_lock (&dir->lock);
/* In the window where DP was unlocked, we might
have lost. So check the cache again, and see
if it's still there; if so, then we win. */
pthread_mutex_lock (&cache_lock);
found = lookup (dir->cache_id, name, key, &bucket, &i);
if (! found
|| bucket->node_cache_id[i] != id)
{
pthread_mutex_unlock (&cache_lock);
/* Lose */
diskfs_nput (np);
return 0;
}
pthread_mutex_unlock (&cache_lock);
}
else
err = diskfs_cached_lookup (id, &np);
return err ? 0 : np;
}
}
pthread_mutex_unlock (&cache_lock);
return 0;
}
|