File: | obj-scan-build/trans/../../trans/streamio.c |
Location: | line 875, column 5 |
Description: | Value stored to 'amount' is never read |
1 | /* A translator for handling stream devices. |
2 | |
3 | Copyright (C) 2001,02 Free Software Foundation, Inc. |
4 | |
5 | Written by OKUJI Yoshinori <okuji@kuicr.kyoto-u.ac.jp> |
6 | |
7 | This program is free software; you can redistribute it and/or |
8 | modify it under the terms of the GNU General Public License as |
9 | published by the Free Software Foundation; either version 2, or (at |
10 | your option) any later version. |
11 | |
12 | This program is distributed in the hope that it will be useful, but |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with this program; if not, write to the Free Software |
19 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
20 | |
21 | #include <string.h> |
22 | #include <assert.h> |
23 | #include <stdio.h> |
24 | #include <fcntl.h> |
25 | #include <argp.h> |
26 | #include <error.h> |
27 | |
28 | #include <mach.h> |
29 | #include <device/device.h> |
30 | #include <device/device_request.h> |
31 | |
32 | #include <hurd.h> |
33 | #include <hurd/ports.h> |
34 | #include <hurd/trivfs.h> |
35 | #include <version.h> |
36 | |
37 | /* The global lock */ |
38 | pthread_mutex_t global_lock; |
39 | |
40 | /* Wakeup when device_open is finished */ |
41 | pthread_cond_t open_alert; |
42 | |
43 | /* Wakeup for select */ |
44 | pthread_cond_t select_alert; |
45 | |
46 | /* Bucket for all out ports */ |
47 | struct port_bucket *streamdev_bucket; |
48 | |
49 | /* The buffers we use */ |
50 | struct buffer *input_buffer, *output_buffer; |
51 | |
52 | |
53 | /* Information about a buffer. */ |
54 | struct buffer |
55 | { |
56 | /* Point to the head of the buffer. */ |
57 | char *head; |
58 | /* Point to the tail of the buffer. */ |
59 | char *tail; |
60 | /* The buffer array size. */ |
61 | size_t size; |
62 | /* Wakeup when the buffer is not empty or not full. */ |
63 | pthread_cond_t *wait; |
64 | /* The buffer. */ |
65 | char buf[0]; |
66 | }; |
67 | |
68 | /* Create a new buffer structure with SIZE, returning the pointer. */ |
69 | static inline struct buffer * |
70 | create_buffer (size_t size) |
71 | { |
72 | struct buffer *new = malloc (sizeof (struct buffer) + size); |
73 | assert (new)((new) ? (void) (0) : __assert_fail ("new", "../../trans/streamio.c" , 73, __PRETTY_FUNCTION__)); |
74 | new->head = new->tail = new->buf; |
75 | new->size = size; |
76 | new->wait = malloc (sizeof (pthread_cond_t)); |
77 | assert (new->wait)((new->wait) ? (void) (0) : __assert_fail ("new->wait", "../../trans/streamio.c", 77, __PRETTY_FUNCTION__)); |
78 | pthread_cond_init (new->wait, NULL((void*)0)); |
79 | return new; |
80 | } |
81 | |
82 | /* Return the size of B. */ |
83 | static inline size_t |
84 | buffer_size (struct buffer *b) |
85 | { |
86 | return b->tail - b->head; |
87 | } |
88 | |
89 | /* Return how much characters can be read from B. */ |
90 | static inline size_t |
91 | buffer_readable (struct buffer *b) |
92 | { |
93 | return buffer_size (b); |
94 | } |
95 | |
96 | /* Return how much characters can be written to B. */ |
97 | static inline size_t |
98 | buffer_writable (struct buffer *b) |
99 | { |
100 | return b->size - buffer_size (b); |
101 | } |
102 | |
103 | /* Flush B. */ |
104 | static inline void |
105 | clear_buffer (struct buffer *b) |
106 | { |
107 | if (b == 0) |
108 | return; |
109 | b->head = b->tail = b->buf; |
110 | pthread_cond_broadcast (b->wait); |
111 | pthread_cond_broadcast (&select_alert); |
112 | } |
113 | |
114 | /* Read up to LEN bytes from B to DATA, returning the amount actually read. */ |
115 | static inline size_t |
116 | buffer_read (struct buffer *b, void *data, size_t len) |
117 | { |
118 | size_t max = buffer_size (b); |
119 | |
120 | if (len > max) |
121 | len = max; |
122 | |
123 | memcpy (data, b->head, len); |
124 | b->head += len; |
125 | |
126 | if (b->head > b->buf + b->size / 2) |
127 | { |
128 | size_t size = buffer_size (b); |
129 | |
130 | memmove (b->buf, b->head, size); |
131 | b->head = b->buf; |
132 | b->tail = b->buf + size; |
133 | } |
134 | |
135 | pthread_cond_broadcast (b->wait); |
136 | pthread_cond_broadcast (&select_alert); |
137 | return len; |
138 | } |
139 | |
140 | /* Write LEN bytes from DATA to B, returning the amount actually written. */ |
141 | static inline size_t |
142 | buffer_write (struct buffer *b, void *data, size_t len) |
143 | { |
144 | size_t size = buffer_writable (b); |
145 | |
146 | if (len > size) |
147 | len = size; |
148 | |
149 | memcpy (b->tail, data, len); |
150 | b->tail += len; |
151 | |
152 | pthread_cond_broadcast (b->wait); |
153 | pthread_cond_broadcast (&select_alert); |
154 | return len; |
155 | } |
156 | |
157 | |
158 | /* Open a new device structure for the device NAME with MODE. If an error |
159 | occurs, the error code is returned, otherwise 0. */ |
160 | error_t dev_open (const char *name, dev_mode_t mode); |
161 | |
162 | /* Check if the device is already opened. */ |
163 | int dev_already_opened (void); |
164 | |
165 | /* Close the device. */ |
166 | void dev_close (void); |
167 | |
168 | /* Read up to AMOUNT bytes, returned in BUF and LEN. If NOWAIT is non-zero |
169 | and the buffer is empty, then returns EWOULDBLOCK. If an error occurs, |
170 | the error code is returned, otherwise 0. */ |
171 | error_t dev_read (size_t amount, void **buf, size_t *len, int nowait); |
172 | |
173 | /* Return current readable size in AMOUNT. If an error occurs, the error |
174 | code is returned, otherwise 0. */ |
175 | error_t dev_readable (size_t *amount); |
176 | |
177 | /* Write LEN bytes from BUF, returning the amount actually written |
178 | in AMOUNT. If NOWAIT is non-zero and the buffer is full, then returns |
179 | EWOULDBLOCK. If an error occurs, the error code is returned, |
180 | otherwise 0. */ |
181 | error_t dev_write (void *buf, size_t len, size_t *amount, int nowait); |
182 | |
183 | /* Try and write out any pending writes to the device. If WAIT is non-zero, |
184 | will wait for any activity to cease. */ |
185 | error_t dev_sync (int wait); |
186 | |
187 | |
188 | |
189 | static struct argp_option options[] = |
190 | { |
191 | {"rdev", 'n', "ID", 0, |
192 | "The stat rdev number for this node; may be either a" |
193 | " single integer, or of the form MAJOR,MINOR"}, |
194 | {"readonly", 'r', 0, 0, "Disallow writing"}, |
195 | {"rdonly", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
196 | {"ro", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
197 | {"writable", 'w', 0, 0, "Allow writing"}, |
198 | {"rdwr", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
199 | {"rw", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
200 | {"writeonly", 'W',0, 0, "Disallow reading"}, |
201 | {"wronly", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
202 | {0} |
203 | }; |
204 | |
205 | static const char args_doc[] = "DEVICE"; |
206 | static const char doc[] = "Translator for stream devices."; |
207 | |
208 | const char *argp_program_version = STANDARD_HURD_VERSION (streamio)"streamio" " (GNU Hurd) " "0.5"; |
209 | |
210 | |
211 | static char *stream_name; |
212 | static int rdev; |
213 | static int nperopens; |
214 | |
215 | /* Parse a single option. */ |
216 | static error_t |
217 | parse_opt (int key, char *arg, struct argp_state *state) |
218 | { |
219 | switch (key) |
220 | { |
221 | case 'r': |
222 | trivfs_allow_open = O_READ0x0001; |
223 | break; |
224 | case 'w': |
225 | trivfs_allow_open = O_RDWR(0x0001|0x0002); |
226 | break; |
227 | case 'W': |
228 | trivfs_allow_open = O_WRITE0x0002; |
229 | break; |
230 | |
231 | case 'n': |
232 | { |
233 | char *start = arg; |
234 | char *end; |
235 | |
236 | rdev = strtoul (start, &end, 0); |
237 | if (*end == ',') |
238 | /* MAJOR,MINOR form */ |
239 | { |
240 | start = end + 1; |
241 | rdev = (rdev << 8) + strtoul (start, &end, 0); |
242 | } |
243 | |
244 | if (end == start || *end != '\0') |
245 | { |
246 | argp_error (state, "%s: Invalid argument to --rdev", arg); |
247 | return EINVAL((0x10 << 26) | ((22) & 0x3fff)); |
248 | } |
249 | } |
250 | break; |
251 | |
252 | case ARGP_KEY_ARG0: |
253 | stream_name = arg; |
254 | break; |
255 | |
256 | case ARGP_KEY_END0x1000001: |
257 | if (stream_name == 0) |
258 | argp_usage (state); |
259 | break; |
260 | |
261 | default: |
262 | return ARGP_ERR_UNKNOWN((0x10 << 26) | ((7) & 0x3fff)); |
263 | } |
264 | return 0; |
265 | } |
266 | |
267 | static const struct argp argp = { options, parse_opt, args_doc, doc }; |
268 | |
269 | |
270 | int |
271 | demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp) |
272 | { |
273 | extern int device_reply_server (mach_msg_header_t *, mach_msg_header_t *); |
274 | |
275 | return (trivfs_demuxer (inp, outp) |
276 | || device_reply_server (inp, outp)); |
277 | } |
278 | |
279 | int |
280 | main (int argc, char *argv[]) |
281 | { |
282 | error_t err; |
283 | mach_port_t bootstrap; |
284 | struct trivfs_control *fsys; |
285 | |
286 | argp_parse (&argp, argc, argv, 0, 0, 0); |
287 | |
288 | task_get_bootstrap_port (mach_task_self (), &bootstrap)(task_get_special_port((((__mach_task_self_ + 0))), 4, (& bootstrap))); |
289 | if (bootstrap == MACH_PORT_NULL((mach_port_t) 0)) |
290 | error (2, 0, "Must be started as a translator"); |
291 | |
292 | streamdev_bucket = ports_create_bucket (); |
293 | |
294 | err = trivfs_startup (bootstrap, 0, |
295 | 0, streamdev_bucket, 0, streamdev_bucket, |
296 | &fsys); |
297 | if (err) |
298 | error (3, err, "trivfs_startup"); |
299 | |
300 | pthread_mutex_init (&global_lock, NULL((void*)0)); |
301 | |
302 | pthread_cond_init (&open_alert, NULL((void*)0)); |
303 | pthread_cond_init (&select_alert, NULL((void*)0)); |
304 | |
305 | if (trivfs_allow_open & O_READ0x0001) |
306 | input_buffer = create_buffer (256); |
307 | if (trivfs_allow_open & O_WRITE0x0002) |
308 | output_buffer = create_buffer (256); |
309 | |
310 | /* Launch */ |
311 | ports_manage_port_operations_multithread (streamdev_bucket, demuxer, |
312 | 0, 0, 0); |
313 | |
314 | return 0; |
315 | } |
316 | |
317 | |
318 | int trivfs_fstype = FSTYPE_DEV0x0000000f; |
319 | int trivfs_fsid = 0; |
320 | |
321 | int trivfs_support_read = 1; |
322 | int trivfs_support_write = 1; |
323 | int trivfs_support_exec = 0; |
324 | |
325 | int trivfs_allow_open = O_READ0x0001 | O_WRITE0x0002; |
326 | |
327 | static error_t |
328 | open_hook (struct trivfs_control *cntl, struct iouser *user, int flags) |
329 | { |
330 | error_t err; |
331 | dev_mode_t mode; |
332 | |
333 | if (flags & O_WRITE0x0002 & ~trivfs_allow_open) |
334 | return EROFS((0x10 << 26) | ((30) & 0x3fff)); |
335 | if (flags & O_READ0x0001 & ~trivfs_allow_open) |
336 | return EIO((0x10 << 26) | ((5) & 0x3fff)); |
337 | |
338 | if ((flags & (O_READ0x0001|O_WRITE0x0002)) == 0) |
339 | return 0; |
340 | |
341 | /* XXX */ |
342 | if (flags & O_ASYNC0x0200) |
343 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
344 | |
345 | pthread_mutex_lock (&global_lock); |
346 | |
347 | mode = 0; |
348 | if (flags & O_READ0x0001) |
349 | mode |= D_READ0x1; |
350 | if (flags & O_WRITE0x0002) |
351 | mode |= D_WRITE0x2; |
352 | |
353 | if (!dev_already_opened ()) |
354 | { |
355 | err = dev_open (stream_name, mode); |
356 | if (err) |
357 | { |
358 | pthread_mutex_unlock (&global_lock); |
359 | return err; |
360 | } |
361 | |
362 | if (!(flags & O_NONBLOCK0x0008)) |
363 | { |
364 | if (pthread_hurd_cond_wait_np (&open_alert, &global_lock)) |
365 | { |
366 | pthread_mutex_unlock (&global_lock); |
367 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
368 | } |
369 | |
370 | if (!dev_already_opened ()) |
371 | { |
372 | pthread_mutex_unlock (&global_lock); |
373 | return ENODEV((0x10 << 26) | ((19) & 0x3fff)); |
374 | } |
375 | } |
376 | } |
377 | |
378 | pthread_mutex_unlock (&global_lock); |
379 | return 0; |
380 | } |
381 | |
382 | error_t (*trivfs_check_open_hook) (struct trivfs_control *, |
383 | struct iouser *, int) |
384 | = open_hook; |
385 | |
386 | static error_t |
387 | po_create_hook (struct trivfs_peropen *po) |
388 | { |
389 | pthread_mutex_lock (&global_lock); |
390 | nperopens++; |
391 | pthread_mutex_unlock (&global_lock); |
392 | return 0; |
393 | } |
394 | |
395 | error_t (*trivfs_peropen_create_hook) (struct trivfs_peropen *) = |
396 | po_create_hook; |
397 | |
398 | static void |
399 | po_destroy_hook (struct trivfs_peropen *po) |
400 | { |
401 | pthread_mutex_lock (&global_lock); |
402 | nperopens--; |
403 | if (!nperopens) |
404 | { |
405 | if (dev_already_opened ()) |
406 | { |
407 | clear_buffer (input_buffer); |
408 | dev_close (); |
409 | } |
410 | } |
411 | pthread_mutex_unlock (&global_lock); |
412 | } |
413 | |
414 | void (*trivfs_peropen_destroy_hook) (struct trivfs_peropen *) |
415 | = po_destroy_hook; |
416 | |
417 | void |
418 | trivfs_modify_stat (struct trivfs_protid *cred, struct stat *st) |
419 | { |
420 | st->st_blksize = vm_page_size; |
421 | st->st_size = 0; |
422 | |
423 | st->st_rdev = rdev; |
424 | st->st_mode &= ~S_IFMT0170000; |
425 | st->st_mode |= S_IFCHR0020000; |
426 | if ((trivfs_allow_open & O_READ0x0001) == 0) |
427 | st->st_mode &= ~(S_IRUSR00400 | S_IRGRP(00400 >> 3) | S_IROTH((00400 >> 3) >> 3)); |
428 | if ((trivfs_allow_open & O_WRITE0x0002) == 0) |
429 | st->st_mode &= ~(S_IWUSR00200 | S_IWGRP(00200 >> 3) | S_IWOTH((00200 >> 3) >> 3)); |
430 | } |
431 | |
432 | error_t |
433 | trivfs_goaway (struct trivfs_control *fsys, int flags) |
434 | { |
435 | error_t err; |
436 | int force = (flags & FSYS_GOAWAY_FORCE0x00000004); |
437 | int nosync = (flags & FSYS_GOAWAY_NOSYNC0x00000002); |
438 | struct port_class *root_port_class = fsys->protid_class; |
439 | |
440 | pthread_mutex_lock (&global_lock); |
441 | |
442 | if (!dev_already_opened ()) |
443 | exit (0); |
444 | |
445 | err = ports_inhibit_class_rpcs (root_port_class); |
446 | if (err == EINTR((0x10 << 26) | ((4) & 0x3fff)) || (err && !force)) |
447 | { |
448 | pthread_mutex_unlock (&global_lock); |
449 | return err; |
450 | } |
451 | |
452 | if (force && nosync) |
453 | exit (0); |
454 | |
455 | if (!force && ports_count_class (root_port_class) > 0) |
456 | goto busy; |
457 | |
458 | if (!nosync) |
459 | dev_close (); |
460 | exit (0); |
461 | |
462 | busy: |
463 | ports_enable_class (root_port_class); |
464 | ports_resume_class_rpcs (root_port_class); |
465 | pthread_mutex_unlock (&global_lock); |
466 | |
467 | return EBUSY((0x10 << 26) | ((16) & 0x3fff)); |
468 | } |
469 | |
470 | |
471 | error_t |
472 | trivfs_S_io_read (struct trivfs_protid *cred, |
473 | mach_port_t reply, mach_msg_type_name_t reply_type, |
474 | char **data, mach_msg_type_number_t *data_len, |
475 | loff_t offs, mach_msg_type_number_t amount) |
476 | { |
477 | error_t err; |
478 | |
479 | if (!cred) |
480 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
481 | |
482 | if (!(cred->po->openmodes & O_READ0x0001)) |
483 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
484 | |
485 | pthread_mutex_lock (&global_lock); |
486 | err = dev_read (amount, (void **)data, data_len, cred->po->openmodes & O_NONBLOCK0x0008); |
487 | pthread_mutex_unlock (&global_lock); |
488 | return err; |
489 | } |
490 | |
491 | error_t |
492 | trivfs_S_io_readable (struct trivfs_protid *cred, |
493 | mach_port_t reply, mach_msg_type_name_t reply_type, |
494 | mach_msg_type_number_t *amount) |
495 | { |
496 | error_t err; |
497 | |
498 | if (!cred) |
499 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
500 | |
501 | if (!(cred->po->openmodes & O_READ0x0001)) |
502 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
503 | |
504 | pthread_mutex_lock (&global_lock); |
505 | err = dev_readable (amount); |
506 | pthread_mutex_unlock (&global_lock); |
507 | return err; |
508 | } |
509 | |
510 | error_t |
511 | trivfs_S_io_write (struct trivfs_protid *cred, |
512 | mach_port_t reply, mach_msg_type_name_t reply_type, |
513 | char *data, mach_msg_type_number_t data_len, |
514 | loff_t offs, mach_msg_type_number_t *amount) |
515 | { |
516 | error_t err; |
517 | |
518 | if (!cred) |
519 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
520 | |
521 | if (!(cred->po->openmodes & O_WRITE0x0002)) |
522 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
523 | |
524 | pthread_mutex_lock (&global_lock); |
525 | err = dev_write ((void *)data, data_len, amount, cred->po->openmodes & O_NONBLOCK0x0008); |
526 | pthread_mutex_unlock (&global_lock); |
527 | return err; |
528 | } |
529 | |
530 | error_t |
531 | trivfs_S_io_seek (struct trivfs_protid *cred, |
532 | mach_port_t reply, mach_msg_type_name_t reply_type, |
533 | off_t offs, int whence, off_t *new_offs) |
534 | { |
535 | if (!cred) |
536 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
537 | else |
538 | return ESPIPE((0x10 << 26) | ((29) & 0x3fff)); |
539 | } |
540 | |
541 | static error_t |
542 | io_select_common (struct trivfs_protid *cred, |
543 | mach_port_t reply, mach_msg_type_name_t reply_type, |
544 | struct timespec *tsp, |
545 | int *type) |
546 | { |
547 | int available; |
548 | error_t err; |
549 | |
550 | if (!cred) |
551 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
552 | |
553 | if (!(cred->po->openmodes & O_WRITE0x0002) && (*type & SELECT_WRITE0x00000002)) |
554 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
555 | |
556 | *type &= SELECT_READ0x00000001 | SELECT_WRITE0x00000002; |
557 | |
558 | if (*type == 0) |
559 | return 0; |
560 | |
561 | available = 0; |
562 | |
563 | while (1) |
564 | { |
565 | pthread_mutex_lock (&global_lock); |
566 | if ((*type & SELECT_READ0x00000001) && buffer_readable (input_buffer)) |
567 | available |= SELECT_READ0x00000001; |
568 | if (output_buffer) |
569 | { |
570 | if ((*type & SELECT_WRITE0x00000002) && buffer_writable (output_buffer)) |
571 | available |= SELECT_WRITE0x00000002; |
572 | } |
573 | |
574 | if (available) |
575 | { |
576 | *type = available; |
577 | pthread_mutex_unlock (&global_lock); |
578 | return 0; |
579 | } |
580 | |
581 | if (cred->po->openmodes & O_NONBLOCK0x0008) |
582 | { |
583 | pthread_mutex_unlock (&global_lock); |
584 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
585 | } |
586 | |
587 | ports_interrupt_self_on_port_death (cred, reply)ports_interrupt_self_on_notification (cred, reply, (0100 + 010 )); |
588 | err = pthread_hurd_cond_timedwait_np (&select_alert, &global_lock, tsp); |
589 | if (err) |
590 | { |
591 | *type = 0; |
592 | pthread_mutex_unlock (&global_lock); |
593 | |
594 | if (err == ETIMEDOUT((0x10 << 26) | ((60) & 0x3fff))) |
595 | err = 0; |
596 | |
597 | return err; |
598 | } |
599 | } |
600 | } |
601 | |
602 | error_t |
603 | trivfs_S_io_select (struct trivfs_protid *cred, |
604 | mach_port_t reply, mach_msg_type_name_t reply_type, |
605 | int *type) |
606 | { |
607 | return io_select_common (cred, reply, reply_type, NULL((void*)0), type); |
608 | } |
609 | |
610 | error_t |
611 | trivfs_S_io_select_timeout (struct trivfs_protid *cred, |
612 | mach_port_t reply, mach_msg_type_name_t reply_type, |
613 | struct timespec ts, |
614 | int *type) |
615 | { |
616 | return io_select_common (cred, reply, reply_type, &ts, type); |
617 | } |
618 | |
619 | error_t |
620 | trivfs_S_file_set_size (struct trivfs_protid *cred, |
621 | mach_port_t reply, mach_msg_type_name_t reply_type, |
622 | off_t size) |
623 | { |
624 | if (!cred) |
625 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
626 | else if (!(cred->po->openmodes & O_WRITE0x0002)) |
627 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
628 | else if (size < 0) |
629 | return EINVAL((0x10 << 26) | ((22) & 0x3fff)); |
630 | else |
631 | return 0; |
632 | } |
633 | |
634 | error_t |
635 | trivfs_S_io_get_openmodes (struct trivfs_protid *cred, |
636 | mach_port_t reply, mach_msg_type_name_t reply_type, |
637 | int *bits) |
638 | { |
639 | if (! cred) |
640 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
641 | else |
642 | { |
643 | *bits = cred->po->openmodes; |
644 | return 0; |
645 | } |
646 | } |
647 | |
648 | error_t |
649 | trivfs_S_io_set_all_openmodes (struct trivfs_protid *cred, |
650 | mach_port_t reply, |
651 | mach_msg_type_name_t reply_type, |
652 | int mode) |
653 | { |
654 | if (! cred) |
655 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
656 | else |
657 | return 0; |
658 | } |
659 | |
660 | error_t |
661 | trivfs_S_io_set_some_openmodes (struct trivfs_protid *cred, |
662 | mach_port_t reply, |
663 | mach_msg_type_name_t reply_type, |
664 | int bits) |
665 | { |
666 | if (! cred) |
667 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
668 | else |
669 | return 0; |
670 | } |
671 | |
672 | error_t |
673 | trivfs_S_io_clear_some_openmodes (struct trivfs_protid *cred, |
674 | mach_port_t reply, |
675 | mach_msg_type_name_t reply_type, |
676 | int bits) |
677 | { |
678 | if (! cred) |
679 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
680 | else |
681 | return 0; |
682 | } |
683 | |
684 | error_t |
685 | trivfs_S_file_sync (struct trivfs_protid *cred, |
686 | mach_port_t reply, mach_msg_type_name_t reply_type, |
687 | int wait, int omit_metadata) |
688 | { |
689 | error_t err; |
690 | |
691 | if (!cred) |
692 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
693 | |
694 | pthread_mutex_lock (&global_lock); |
695 | err = dev_sync (wait); |
696 | pthread_mutex_unlock (&global_lock); |
697 | return err; |
698 | } |
699 | |
700 | error_t |
701 | trivfs_S_file_syncfs (struct trivfs_protid *cred, |
702 | mach_port_t reply, mach_msg_type_name_t reply_type, |
703 | int wait, int dochildren) |
704 | { |
705 | error_t err; |
706 | |
707 | if (!cred) |
708 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
709 | |
710 | pthread_mutex_lock (&global_lock); |
711 | err = dev_sync (wait); |
712 | pthread_mutex_unlock (&global_lock); |
713 | return err; |
714 | } |
715 | |
716 | |
717 | /* This flag is set if there is an outstanding device_write. */ |
718 | static int output_pending; |
719 | |
720 | /* This flag is set if there is an outstanding device_read. */ |
721 | static int input_pending; |
722 | |
723 | /* This flag is set if there is an outstanding device_open. */ |
724 | static int open_pending; |
725 | |
726 | static char pending_output[IO_INBAND_MAX(128)]; |
727 | static int npending_output; |
728 | |
729 | /* This flag is set if EOF is returned. */ |
730 | static int eof; |
731 | |
732 | /* The error number. */ |
733 | static error_t err; |
734 | |
735 | static struct port_class *phys_reply_class; |
736 | |
737 | /* The Mach device_t representing the stream. */ |
738 | static device_t phys_device = MACH_PORT_NULL((mach_port_t) 0); |
739 | |
740 | /* The ports we get replies on for device calls. */ |
741 | static mach_port_t phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
742 | static mach_port_t phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
743 | |
744 | /* The port-info structures. */ |
745 | static struct port_info *phys_reply_writes_pi; |
746 | static struct port_info *phys_reply_pi; |
747 | |
748 | static device_t device_master; |
749 | |
750 | /* The block size and whole size of the device. */ |
751 | static size_t dev_blksize; |
752 | static size_t dev_size; |
753 | |
754 | |
755 | /* Open a new device structure for the device NAME with MODE. If an error |
756 | occurs, the error code is returned, otherwise 0. */ |
757 | /* Be careful that the global lock is already locked. */ |
758 | error_t |
759 | dev_open (const char *name, dev_mode_t mode) |
760 | { |
761 | if (open_pending || (phys_device != MACH_PORT_NULL((mach_port_t) 0))) |
762 | return 0; |
763 | |
764 | err = get_privileged_ports (0, &device_master); |
765 | if (err) |
766 | return err; |
767 | |
768 | phys_reply_class = ports_create_class (0, 0); |
769 | err = ports_create_port (phys_reply_class, streamdev_bucket, |
770 | sizeof (struct port_info), &phys_reply_pi); |
771 | if (err) |
772 | { |
773 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
774 | return err; |
775 | } |
776 | |
777 | phys_reply = ports_get_right (phys_reply_pi); |
778 | mach_port_insert_right (mach_task_self ()((__mach_task_self_ + 0)), phys_reply, phys_reply, |
779 | MACH_MSG_TYPE_MAKE_SEND20); |
780 | |
781 | if (output_buffer) |
782 | { |
783 | err = ports_create_port (phys_reply_class, streamdev_bucket, |
784 | sizeof (struct port_info), |
785 | &phys_reply_writes_pi); |
786 | if (err) |
787 | { |
788 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
789 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
790 | ports_port_deref (phys_reply_pi); |
791 | phys_reply_pi = 0; |
792 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
793 | return err; |
794 | } |
795 | |
796 | phys_reply_writes = ports_get_right (phys_reply_writes_pi); |
797 | mach_port_insert_right (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes, |
798 | phys_reply_writes, MACH_MSG_TYPE_MAKE_SEND20); |
799 | } |
800 | |
801 | err = device_open_request (device_master, phys_reply, mode, name); |
802 | if (err) |
803 | { |
804 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
805 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
806 | ports_port_deref (phys_reply_pi); |
807 | phys_reply_pi = 0; |
808 | if (output_buffer) |
809 | { |
810 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes); |
811 | phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
812 | ports_port_deref (phys_reply_writes_pi); |
813 | phys_reply_writes_pi = 0; |
814 | } |
815 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
816 | return err; |
817 | } |
818 | |
819 | open_pending = 1; |
820 | return 0; |
821 | } |
822 | |
823 | kern_return_t |
824 | device_open_reply (mach_port_t reply, int returncode, mach_port_t device) |
825 | { |
826 | int sizes[DEV_GET_SIZE_COUNT2]; |
827 | size_t sizes_len = DEV_GET_SIZE_COUNT2; |
828 | int amount; |
829 | |
830 | if (reply != phys_reply) |
831 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
832 | |
833 | pthread_mutex_lock (&global_lock); |
834 | |
835 | open_pending = 0; |
836 | pthread_cond_broadcast (&open_alert); |
837 | |
838 | if (returncode != 0) |
839 | { |
840 | dev_close (); |
841 | pthread_mutex_unlock (&global_lock); |
842 | return 0; |
843 | } |
844 | |
845 | phys_device = device; |
846 | eof = 0; |
847 | |
848 | /* Get the block size and the whole size. */ |
849 | err = device_get_status (device, DEV_GET_SIZE0, sizes, &sizes_len); |
850 | if (err == D_INVALID_OPERATION2505) |
851 | { |
852 | /* XXX Assume that the block size is 1 and the whole size is 0. */ |
853 | dev_blksize = 1; |
854 | dev_size = 0; |
855 | err = 0; |
856 | } |
857 | else if (err == 0) |
858 | { |
859 | assert (sizes_len == DEV_GET_SIZE_COUNT)((sizes_len == 2) ? (void) (0) : __assert_fail ("sizes_len == 2" , "../../trans/streamio.c", 859, __PRETTY_FUNCTION__)); |
860 | |
861 | dev_blksize = sizes[DEV_GET_SIZE_RECORD_SIZE1]; |
862 | dev_size = sizes[DEV_GET_SIZE_DEVICE_SIZE0]; |
863 | |
864 | assert (dev_blksize && dev_blksize <= IO_INBAND_MAX)((dev_blksize && dev_blksize <= (128)) ? (void) (0 ) : __assert_fail ("dev_blksize && dev_blksize <= (128)" , "../../trans/streamio.c", 864, __PRETTY_FUNCTION__)); |
865 | } |
866 | else |
867 | { |
868 | dev_close (); |
869 | pthread_mutex_unlock (&global_lock); |
870 | return 0; |
871 | } |
872 | |
873 | amount = vm_page_size; |
874 | if (dev_blksize != 1) |
875 | amount = amount / dev_blksize * dev_blksize; |
Value stored to 'amount' is never read | |
876 | |
877 | pthread_mutex_unlock (&global_lock); |
878 | return 0; |
879 | } |
880 | |
881 | /* Check if the device is already opened. */ |
882 | /* Be careful that the global lock is already locked. */ |
883 | int |
884 | dev_already_opened (void) |
885 | { |
886 | return (phys_device != MACH_PORT_NULL((mach_port_t) 0)); |
887 | } |
888 | |
889 | /* Close the device. */ |
890 | /* Be careful that the global lock is already locked. */ |
891 | void |
892 | dev_close (void) |
893 | { |
894 | /* Sync all pending writes. */ |
895 | dev_sync (1); |
896 | |
897 | device_close (phys_device); |
898 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_device); |
899 | phys_device = MACH_PORT_NULL((mach_port_t) 0); |
900 | |
901 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
902 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
903 | ports_port_deref (phys_reply_pi); |
904 | phys_reply_pi = 0; |
905 | clear_buffer (input_buffer); |
906 | input_pending = 0; |
907 | |
908 | if (output_buffer) |
909 | { |
910 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes); |
911 | phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
912 | ports_port_deref (phys_reply_writes_pi); |
913 | phys_reply_writes_pi = 0; |
914 | clear_buffer (output_buffer); |
915 | npending_output = 0; |
916 | output_pending = 0; |
917 | } |
918 | } |
919 | |
920 | /* Be careful that the global lock is already locked. */ |
921 | static error_t |
922 | start_input (int nowait) |
923 | { |
924 | int size; |
925 | error_t err; |
926 | size_t amount; |
927 | |
928 | size = buffer_writable (input_buffer); |
929 | |
930 | if (size < dev_blksize || input_pending) |
931 | return 0; |
932 | |
933 | amount = vm_page_size; |
934 | if (dev_blksize != 1) |
935 | amount = amount / dev_blksize * dev_blksize; |
936 | |
937 | err = device_read_request_inband (phys_device, phys_reply, |
938 | nowait? D_NOWAIT0x8 : 0, |
939 | 0, amount); |
940 | if (err == D_WOULD_BLOCK2501) |
941 | err = 0; |
942 | if (err) |
943 | dev_close (); |
944 | else |
945 | input_pending = 1; |
946 | |
947 | return err; |
948 | } |
949 | |
950 | /* Read up to AMOUNT bytes, returned in BUF and LEN. If NOWAIT is non-zero |
951 | and the buffer is empty, then returns EWOULDBLOCK. If an error occurs, |
952 | the error code is returned, otherwise 0. */ |
953 | /* Be careful that the global lock is already locked. */ |
954 | error_t |
955 | dev_read (size_t amount, void **buf, size_t *len, int nowait) |
956 | { |
957 | size_t max, avail; |
958 | |
959 | if (err) |
960 | return err; |
961 | |
962 | while (!buffer_readable (input_buffer)) |
963 | { |
964 | err = start_input (nowait); |
965 | if (err) |
966 | return err; |
967 | |
968 | if (eof) |
969 | { |
970 | *len = 0; |
971 | return 0; |
972 | } |
973 | |
974 | if (nowait) |
975 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
976 | |
977 | if (pthread_hurd_cond_wait_np (input_buffer->wait, &global_lock)) |
978 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
979 | } |
980 | |
981 | avail = buffer_size (input_buffer); |
982 | max = (amount < avail) ? amount : avail; |
983 | if (max > *len) |
984 | vm_allocate (mach_task_self ()((__mach_task_self_ + 0)), (vm_address_t *)buf, max, 1); |
985 | |
986 | *len = buffer_read (input_buffer, *buf, max); |
987 | assert (*len == max)((*len == max) ? (void) (0) : __assert_fail ("*len == max", "../../trans/streamio.c" , 987, __PRETTY_FUNCTION__)); |
988 | |
989 | err = start_input (nowait); |
990 | return err; |
991 | } |
992 | |
993 | error_t |
994 | device_read_reply_inband (mach_port_t reply, error_t errorcode, |
995 | char *data, u_int datalen) |
996 | { |
997 | if (reply != phys_reply) |
998 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
999 | |
1000 | pthread_mutex_lock (&global_lock); |
1001 | |
1002 | input_pending = 0; |
1003 | err = errorcode; |
1004 | if (!err) |
1005 | { |
1006 | if (datalen == 0) |
1007 | { |
1008 | eof = 1; |
1009 | dev_close (); |
1010 | pthread_mutex_unlock (&global_lock); |
1011 | return 0; |
1012 | } |
1013 | |
1014 | while (datalen) |
1015 | { |
1016 | size_t nwritten; |
1017 | |
1018 | while (!buffer_writable (input_buffer)) |
1019 | pthread_cond_wait (input_buffer->wait, &global_lock); |
1020 | |
1021 | nwritten = buffer_write (input_buffer, data, datalen); |
1022 | data += nwritten; |
1023 | datalen -= nwritten; |
1024 | pthread_cond_broadcast (input_buffer->wait); |
1025 | pthread_cond_broadcast (&select_alert); |
1026 | } |
1027 | } |
1028 | else |
1029 | { |
1030 | dev_close (); |
1031 | pthread_mutex_unlock (&global_lock); |
1032 | return 0; |
1033 | } |
1034 | pthread_mutex_unlock (&global_lock); |
1035 | return 0; |
1036 | } |
1037 | |
1038 | /* Return current readable size in AMOUNT. If an error occurs, the error |
1039 | code is returned, otherwise 0. */ |
1040 | /* Be careful that the global lock is already locked. */ |
1041 | error_t |
1042 | dev_readable (size_t *amount) |
1043 | { |
1044 | *amount = buffer_size (input_buffer); |
1045 | return 0; |
1046 | } |
1047 | |
1048 | /* Be careful that the global lock is already locked. */ |
1049 | static error_t |
1050 | start_output (int nowait) |
1051 | { |
1052 | int size; |
1053 | |
1054 | assert (output_buffer)((output_buffer) ? (void) (0) : __assert_fail ("output_buffer" , "../../trans/streamio.c", 1054, __PRETTY_FUNCTION__)); |
1055 | |
1056 | size = buffer_size (output_buffer); |
1057 | |
1058 | if (size < dev_blksize || output_pending) |
1059 | return 0; |
1060 | |
1061 | if (size + npending_output > IO_INBAND_MAX(128)) |
1062 | size = IO_INBAND_MAX(128) - npending_output; |
1063 | |
1064 | if (dev_blksize != 1) |
1065 | size = size / dev_blksize * dev_blksize; |
1066 | |
1067 | buffer_read (output_buffer, pending_output + npending_output, size); |
1068 | npending_output += size; |
1069 | |
1070 | err = device_write_request_inband (phys_device, phys_reply_writes, |
1071 | nowait? D_NOWAIT0x8 : 0, |
1072 | 0, pending_output, npending_output); |
1073 | if (err == D_WOULD_BLOCK2501) |
1074 | err = 0; |
1075 | if (err) |
1076 | dev_close (); |
1077 | else |
1078 | output_pending = 1; |
1079 | |
1080 | return err; |
1081 | } |
1082 | |
1083 | /* Write LEN bytes from BUF, returning the amount actually written |
1084 | in AMOUNT. If NOWAIT is non-zero and the buffer is full, then returns |
1085 | EWOULDBLOCK. If an error occurs, the error code is returned, |
1086 | otherwise 0. */ |
1087 | /* Be careful that the global lock is already locked. */ |
1088 | error_t |
1089 | dev_write (void *buf, size_t len, size_t *amount, int nowait) |
1090 | { |
1091 | if (err) |
1092 | return err; |
1093 | |
1094 | while (!buffer_writable (output_buffer)) |
1095 | { |
1096 | err = start_output (nowait); |
1097 | if (err) |
1098 | return err; |
1099 | |
1100 | if (nowait) |
1101 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
1102 | |
1103 | if (pthread_hurd_cond_wait_np (output_buffer->wait, &global_lock)) |
1104 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
1105 | } |
1106 | |
1107 | *amount = buffer_write (output_buffer, buf, len); |
1108 | err = start_output (nowait); |
1109 | |
1110 | return err; |
1111 | } |
1112 | |
1113 | error_t |
1114 | device_write_reply_inband (mach_port_t reply, error_t returncode, int amount) |
1115 | { |
1116 | if (reply != phys_reply_writes) |
1117 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1118 | |
1119 | pthread_mutex_lock (&global_lock); |
1120 | |
1121 | output_pending = 0; |
1122 | |
1123 | if (!returncode) |
1124 | { |
1125 | if (amount >= npending_output) |
1126 | { |
1127 | npending_output = 0; |
1128 | pthread_cond_broadcast (output_buffer->wait); |
1129 | pthread_cond_broadcast (&select_alert); |
1130 | } |
1131 | else |
1132 | { |
1133 | npending_output -= amount; |
1134 | memmove (pending_output, pending_output + amount, npending_output); |
1135 | } |
1136 | } |
1137 | else |
1138 | dev_close (); |
1139 | |
1140 | pthread_mutex_unlock (&global_lock); |
1141 | return 0; |
1142 | } |
1143 | |
1144 | /* Try and write out any pending writes to the device. If WAIT is non-zero, |
1145 | will wait for any activity to cease. */ |
1146 | /* Be careful that the global lock is already locked. */ |
1147 | error_t |
1148 | dev_sync (int wait) |
1149 | { |
1150 | if (err) |
1151 | return err; |
1152 | |
1153 | if (!output_buffer || phys_device == MACH_PORT_NULL((mach_port_t) 0)) |
1154 | return 0; |
1155 | |
1156 | while (buffer_readable (output_buffer) >= dev_blksize) |
1157 | { |
1158 | err = start_output (! wait); |
1159 | if (err) |
1160 | return err; |
1161 | |
1162 | if (!wait) |
1163 | return 0; |
1164 | |
1165 | if (pthread_hurd_cond_wait_np (output_buffer->wait, &global_lock)) |
1166 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
1167 | } |
1168 | |
1169 | /* XXX: When the size of output_buffer is non-zero and less than |
1170 | DEV_BLKSIZE, the rest will be ignored or discarded. */ |
1171 | return 0; |
1172 | } |
1173 | |
1174 | /* Unused stubs. */ |
1175 | kern_return_t |
1176 | device_read_reply (mach_port_t reply, kern_return_t returncode, |
1177 | io_buf_ptr_t data, mach_msg_type_number_t amount) |
1178 | { |
1179 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1180 | } |
1181 | |
1182 | kern_return_t |
1183 | device_write_reply (mach_port_t reply, kern_return_t returncode, int amount) |
1184 | { |
1185 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1186 | } |