1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
|
/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University.
* Copyright (c) 1993,1994 The University of Utah and
* the Computer Systems Laboratory (CSL).
* All rights reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
* THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
* OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
* THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
*/
/*
* File: ipc/ipc_space.h
* Author: Rich Draves
* Date: 1989
*
* Definitions for IPC spaces of capabilities.
*/
#ifndef _IPC_IPC_SPACE_H_
#define _IPC_IPC_SPACE_H_
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mach_types.h>
#include <machine/vm_param.h>
#include <kern/macros.h>
#include <kern/lock.h>
#include <kern/rdxtree.h>
#include <kern/slab.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_types.h>
/*
* Every task has a space of IPC capabilities.
* IPC operations like send and receive use this space.
* IPC kernel calls manipulate the space of the target task.
*
* Every space has a non-NULL is_table with is_table_size entries.
* A space may have a NULL is_tree. is_tree_small records the
* number of entries in the tree that, if the table were to grow
* to the next larger size, would move from the tree to the table.
*
* is_growing marks when the table is in the process of growing.
* When the table is growing, it can't be freed or grown by another
* thread, because of krealloc/kmem_realloc's requirements.
*/
typedef unsigned int ipc_space_refs_t;
struct ipc_space {
decl_simple_lock_data(,is_ref_lock_data)
ipc_space_refs_t is_references;
decl_simple_lock_data(,is_lock_data)
boolean_t is_active; /* is the space alive? */
struct rdxtree is_map; /* a map of entries */
size_t is_size; /* number of entries */
struct rdxtree is_reverse_map; /* maps objects to entries */
ipc_entry_t is_free_list; /* a linked list of free entries */
size_t is_free_list_size; /* number of free entries */
#define IS_FREE_LIST_SIZE_LIMIT 64 /* maximum number of entries
in the free list */
};
#define IS_NULL ((ipc_space_t) 0)
extern struct kmem_cache ipc_space_cache;
#define is_alloc() ((ipc_space_t) kmem_cache_alloc(&ipc_space_cache))
#define is_free(is) kmem_cache_free(&ipc_space_cache, (vm_offset_t) (is))
extern struct ipc_space *ipc_space_kernel;
extern struct ipc_space *ipc_space_reply;
#define is_ref_lock_init(is) simple_lock_init(&(is)->is_ref_lock_data)
#define ipc_space_reference_macro(is) \
MACRO_BEGIN \
simple_lock(&(is)->is_ref_lock_data); \
assert((is)->is_references > 0); \
(is)->is_references++; \
simple_unlock(&(is)->is_ref_lock_data); \
MACRO_END
#define ipc_space_release_macro(is) \
MACRO_BEGIN \
ipc_space_refs_t _refs; \
\
simple_lock(&(is)->is_ref_lock_data); \
assert((is)->is_references > 0); \
_refs = --(is)->is_references; \
simple_unlock(&(is)->is_ref_lock_data); \
\
if (_refs == 0) \
is_free(is); \
MACRO_END
#define is_lock_init(is) simple_lock_init(&(is)->is_lock_data)
#define is_read_lock(is) simple_lock(&(is)->is_lock_data)
#define is_read_unlock(is) simple_unlock(&(is)->is_lock_data)
#define is_write_lock(is) simple_lock(&(is)->is_lock_data)
#define is_write_lock_try(is) simple_lock_try(&(is)->is_lock_data)
#define is_write_unlock(is) simple_unlock(&(is)->is_lock_data)
#define is_write_to_read_lock(is)
extern void ipc_space_reference(struct ipc_space *space);
extern void ipc_space_release(struct ipc_space *space);
#define is_reference(is) ipc_space_reference(is)
#define is_release(is) ipc_space_release(is)
kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *);
kern_return_t ipc_space_create_special(struct ipc_space **);
void ipc_space_destroy(struct ipc_space *);
/* IPC entry lookups. */
/*
* Routine: ipc_entry_lookup
* Purpose:
* Searches for an entry, given its name.
* Conditions:
* The space must be read or write locked throughout.
* The space must be active.
*/
static inline ipc_entry_t
ipc_entry_lookup(
ipc_space_t space,
mach_port_t name)
{
ipc_entry_t entry;
assert(space->is_active);
entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
if (entry != IE_NULL
&& IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
entry = NULL;
assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
return entry;
}
/*
* Routine: ipc_entry_get
* Purpose:
* Tries to allocate an entry out of the space.
* Conditions:
* The space is write-locked and active throughout.
* An object may be locked. Will not allocate memory.
* Returns:
* KERN_SUCCESS A free entry was found.
* KERN_NO_SPACE No entry allocated.
*/
static inline kern_return_t
ipc_entry_get(
ipc_space_t space,
mach_port_t *namep,
ipc_entry_t *entryp)
{
mach_port_t new_name;
ipc_entry_t free_entry;
assert(space->is_active);
/* Get entry from the free list. */
free_entry = space->is_free_list;
if (free_entry == IE_NULL)
return KERN_NO_SPACE;
space->is_free_list = free_entry->ie_next_free;
space->is_free_list_size -= 1;
/*
* Initialize the new entry. We need only
* increment the generation number and clear ie_request.
*/
{
mach_port_gen_t gen;
assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
free_entry->ie_bits = gen;
free_entry->ie_request = 0;
new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
}
/*
* The new name can't be MACH_PORT_NULL because index
* is non-zero. It can't be MACH_PORT_DEAD because
* the table isn't allowed to grow big enough.
* (See comment in ipc/ipc_table.h.)
*/
assert(MACH_PORT_VALID(new_name));
assert(free_entry->ie_object == IO_NULL);
space->is_size += 1;
*namep = new_name;
*entryp = free_entry;
return KERN_SUCCESS;
}
/*
* Routine: ipc_entry_dealloc
* Purpose:
* Deallocates an entry from a space.
* Conditions:
* The space must be write-locked throughout.
* The space must be active.
*/
static inline void
ipc_entry_dealloc(
ipc_space_t space,
mach_port_t name,
ipc_entry_t entry)
{
assert(space->is_active);
assert(entry->ie_object == IO_NULL);
assert(entry->ie_request == 0);
if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
space->is_free_list_size += 1;
entry->ie_bits &= IE_BITS_GEN_MASK;
entry->ie_next_free = space->is_free_list;
space->is_free_list = entry;
} else {
rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
ie_free(entry);
}
space->is_size -= 1;
}
/* Reverse lookups. */
/* Cast a pointer to a suitable key. */
#define KEY(X) \
({ \
assert((((unsigned long) (X)) & 0x07) == 0); \
((unsigned long long) \
(((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \
})
/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must
be write-locked. */
static inline kern_return_t
ipc_reverse_insert(ipc_space_t space,
ipc_object_t obj,
ipc_entry_t entry)
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
KEY(obj), entry);
}
/* Remove OBJ from the reverse mapping. SPACE must be
write-locked. */
static inline ipc_entry_t
ipc_reverse_remove(ipc_space_t space,
ipc_object_t obj)
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
return rdxtree_remove(&space->is_reverse_map, KEY(obj));
}
/* Remove all entries from the reverse mapping. SPACE must be
write-locked. */
static inline void
ipc_reverse_remove_all(ipc_space_t space)
{
assert(space != IS_NULL);
rdxtree_remove_all(&space->is_reverse_map);
assert(space->is_reverse_map.height == 0);
assert(space->is_reverse_map.root == NULL);
}
/* Return ENTRY related to OBJ, or NULL if no such entry is found in
the reverse mapping. SPACE must be read-locked or
write-locked. */
static inline ipc_entry_t
ipc_reverse_lookup(ipc_space_t space,
ipc_object_t obj)
{
assert(space != IS_NULL);
assert(obj != IO_NULL);
return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
}
#undef KEY
#endif /* _IPC_IPC_SPACE_H_ */
|