1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
|
/* Lock-less reference counting primitives
Copyright (C) 2014 Free Software Foundation, Inc.
Written by Justus Winter <4winter@informatik.uni-hamburg.de>
This file is part of the GNU Hurd.
The GNU Hurd is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2, or (at
your option) any later version.
The GNU Hurd is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with the GNU Hurd. If not, see <http://www.gnu.org/licenses/>. */
#ifndef _HURD_REFCOUNT_H_
#define _HURD_REFCOUNT_H_
#include <assert.h>
#include <limits.h>
#include <stdint.h>
/* Simple reference counting. */
/* An opaque type. You must not access these values directly. */
typedef unsigned int refcount_t;
/* Initialize REF with REFERENCES. REFERENCES must not be zero. */
static inline void
refcount_init (refcount_t *ref, unsigned int references)
{
assert (references > 0 || !"references must not be zero!");
*ref = references;
}
/* Increment REF. Return the result of the operation. This function
uses atomic operations. It is not required to serialize calls to
this function.
This is the unsafe version of refcount_ref. refcount_ref also
checks for use-after-free errors. When in doubt, use that one
instead. */
static inline unsigned int
refcount_unsafe_ref (refcount_t *ref)
{
unsigned int r;
r = __atomic_add_fetch (ref, 1, __ATOMIC_RELAXED);
assert (r != UINT_MAX || !"refcount overflowed!");
return r;
}
/* Increment REF. Return the result of the operation. This function
uses atomic operations. It is not required to serialize calls to
this function. */
static inline unsigned int
refcount_ref (refcount_t *ref)
{
unsigned int r;
r = refcount_unsafe_ref (ref);
assert (r != 1 || !"refcount detected use-after-free!");
return r;
}
/* Decrement REF. Return the result of the operation. This function
uses atomic operations. It is not required to serialize calls to
this function. */
static inline unsigned int
refcount_deref (refcount_t *ref)
{
unsigned int r;
r = __atomic_sub_fetch (ref, 1, __ATOMIC_RELAXED);
assert (r != UINT_MAX || !"refcount underflowed!");
return r;
}
/* Return REF. This function uses atomic operations. It is not
required to serialize calls to this function. */
static inline unsigned int
refcount_references (refcount_t *ref)
{
return __atomic_load_n (ref, __ATOMIC_RELAXED);
}
/* Reference counting with weak references. */
/* An opaque type. You must not access these values directly. */
typedef union _references refcounts_t;
/* Instead, the functions manipulating refcounts_t values write the
results into this kind of objects. */
struct references {
/* We chose the layout of this struct so that when it is used in the
union _references, the hard reference counts occupy the least
significant bits. We rely on this layout for atomic promotion
and demotion of references. See refcounts_promote and
refcounts_demote for details. */
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
uint32_t hard;
uint32_t weak;
#else
uint32_t weak;
uint32_t hard;
#endif
};
/* We use a union to convert struct reference values to uint64_t which
we can manipulate atomically. While this behavior is not
guaranteed by the C standard, it is supported by all major
compilers. */
union _references {
struct references references;
uint64_t value;
};
/* Initialize REF with HARD and WEAK references. HARD and WEAK must
not both be zero. */
static inline void
refcounts_init (refcounts_t *ref, uint32_t hard, uint32_t weak)
{
assert ((hard != 0 || weak != 0) || !"references must not both be zero!");
ref->references = (struct references) { .hard = hard, .weak = weak };
}
/* Increment the hard reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function.
This is the unsafe version of refcounts_ref. refcounts_ref also
checks for use-after-free errors. When in doubt, use that one
instead. */
static inline void
refcounts_unsafe_ref (refcounts_t *ref, struct references *result)
{
const union _references op = { .references = { .hard = 1 } };
union _references r;
r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.hard != UINT32_MAX || !"refcount overflowed!");
if (result)
*result = r.references;
}
/* Increment the hard reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function. */
static inline void
refcounts_ref (refcounts_t *ref, struct references *result)
{
struct references r;
refcounts_unsafe_ref (ref, &r);
assert (! (r.hard == 1 && r.weak == 0)
|| !"refcount detected use-after-free!");
if (result)
*result = r;
}
/* Decrement the hard reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function. */
static inline void
refcounts_deref (refcounts_t *ref, struct references *result)
{
const union _references op = { .references = { .hard = 1 } };
union _references r;
r.value = __atomic_sub_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.hard != UINT32_MAX || !"refcount underflowed!");
if (result)
*result = r.references;
}
/* Promote a weak reference to a hard reference. If RESULT is not
NULL, the result of the operation is written there. This function
uses atomic operations. It is not required to serialize calls to
this function. */
static inline void
refcounts_promote (refcounts_t *ref, struct references *result)
{
/* To promote a weak reference, we need to atomically subtract 1
from the weak reference count, and add 1 to the hard reference
count.
We can subtract by 1 by adding the two's complement of 1 = ~0 to
a fixed-width value, discarding the overflow.
We do the same in our uint64_t value, but we have chosen the
layout of struct references so that when it is used in the union
_references, the weak reference counts occupy the most
significant bits. When we add ~0 to the weak references, the
overflow will be discarded as unsigned arithmetic is modulo 2^n.
So we just add a hard reference. In combination, this is the
desired operation. */
const union _references op =
{ .references = { .weak = ~0U, .hard = 1} };
union _references r;
r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.hard != UINT32_MAX || !"refcount overflowed!");
assert (r.references.weak != UINT32_MAX || !"refcount underflowed!");
if (result)
*result = r.references;
}
/* Demote a hard reference to a weak reference. If RESULT is not
NULL, the result of the operation is written there. This function
uses atomic operations. It is not required to serialize calls to
this function. */
static inline void
refcounts_demote (refcounts_t *ref, struct references *result)
{
/* To demote a hard reference, we need to atomically subtract 1 from
the hard reference count, and add 1 to the weak reference count.
We can subtract by 1 by adding the two's complement of 1 = ~0 to
a fixed-width value, discarding the overflow.
We do the same in our uint64_t value, but we have chosen the
layout of struct references so that when it is used in the union
_references, the hard reference counts occupy the least
significant bits. When we add ~0 to the hard references, it will
overflow into the weak references. This is the desired
operation. */
const union _references op = { .references = { .hard = ~0U } };
union _references r;
r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.hard != UINT32_MAX || !"refcount underflowed!");
assert (r.references.weak != UINT32_MAX || !"refcount overflowed!");
if (result)
*result = r.references;
}
/* Increment the weak reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function.
This is the unsafe version of refcounts_ref_weak.
refcounts_ref_weak also checks for use-after-free errors. When in
doubt, use that one instead. */
static inline void
refcounts_unsafe_ref_weak (refcounts_t *ref, struct references *result)
{
const union _references op = { .references = { .weak = 1 } };
union _references r;
r.value = __atomic_add_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.weak != UINT32_MAX || !"refcount overflowed!");
if (result)
*result = r.references;
}
/* Increment the weak reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function. */
static inline void
refcounts_ref_weak (refcounts_t *ref, struct references *result)
{
struct references r;
refcounts_unsafe_ref_weak (ref, &r);
assert (! (r.hard == 0 && r.weak == 1)
|| !"refcount detected use-after-free!");
if (result)
*result = r;
}
/* Decrement the weak reference count of REF. If RESULT is not NULL,
the result of the operation is written there. This function uses
atomic operations. It is not required to serialize calls to this
function. */
static inline void
refcounts_deref_weak (refcounts_t *ref, struct references *result)
{
const union _references op = { .references = { .weak = 1 } };
union _references r;
r.value = __atomic_sub_fetch (&ref->value, op.value, __ATOMIC_RELAXED);
assert (r.references.weak != UINT32_MAX || !"refcount underflowed!");
if (result)
*result = r.references;
}
/* Store the current reference counts of REF in RESULT. This function
uses atomic operations. It is not required to serialize calls to
this function. */
static inline void
refcounts_references (refcounts_t *ref, struct references *result)
{
union _references r;
r.value =__atomic_load_n (&ref->value, __ATOMIC_RELAXED);
*result = r.references;
}
/* Return the hard reference count of REF. This function uses atomic
operations. It is not required to serialize calls to this
function. */
static inline uint32_t
refcounts_hard_references (refcounts_t *ref)
{
struct references result;
refcounts_references (ref, &result);
return result.hard;
}
/* Return the weak reference count of REF. This function uses atomic
operations. It is not required to serialize calls to this
function. */
static inline uint32_t
refcounts_weak_references (refcounts_t *ref)
{
struct references result;
refcounts_references (ref, &result);
return result.weak;
}
#endif /* _HURD_REFCOUNT_H_ */
|