File: | obj-scan-build/trans/../../trans/streamio.c |
Location: | line 878, column 5 |
Description: | Value stored to 'amount' is never read |
1 | /* A translator for handling stream devices. |
2 | |
3 | Copyright (C) 2001,02 Free Software Foundation, Inc. |
4 | |
5 | Written by OKUJI Yoshinori <okuji@kuicr.kyoto-u.ac.jp> |
6 | |
7 | This program is free software; you can redistribute it and/or |
8 | modify it under the terms of the GNU General Public License as |
9 | published by the Free Software Foundation; either version 2, or (at |
10 | your option) any later version. |
11 | |
12 | This program is distributed in the hope that it will be useful, but |
13 | WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with this program; if not, write to the Free Software |
19 | Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ |
20 | |
21 | #include <string.h> |
22 | #include <assert.h> |
23 | #include <stdio.h> |
24 | #include <fcntl.h> |
25 | #include <argp.h> |
26 | #include <error.h> |
27 | |
28 | #include <mach.h> |
29 | #include <device/device.h> |
30 | #include <device/device_request.h> |
31 | |
32 | #include <hurd.h> |
33 | #include <hurd/ports.h> |
34 | #include <hurd/trivfs.h> |
35 | #include <version.h> |
36 | |
37 | #include "libtrivfs/trivfs_fs_S.h" |
38 | #include "libtrivfs/trivfs_io_S.h" |
39 | |
40 | /* The global lock */ |
41 | pthread_mutex_t global_lock; |
42 | |
43 | /* Wakeup when device_open is finished */ |
44 | pthread_cond_t open_alert; |
45 | |
46 | /* Wakeup for select */ |
47 | pthread_cond_t select_alert; |
48 | |
49 | /* Bucket for all out ports */ |
50 | struct port_bucket *streamdev_bucket; |
51 | |
52 | /* The buffers we use */ |
53 | struct buffer *input_buffer, *output_buffer; |
54 | |
55 | |
56 | /* Information about a buffer. */ |
57 | struct buffer |
58 | { |
59 | /* Point to the head of the buffer. */ |
60 | char *head; |
61 | /* Point to the tail of the buffer. */ |
62 | char *tail; |
63 | /* The buffer array size. */ |
64 | size_t size; |
65 | /* Wakeup when the buffer is not empty or not full. */ |
66 | pthread_cond_t *wait; |
67 | /* The buffer. */ |
68 | char buf[0]; |
69 | }; |
70 | |
71 | /* Create a new buffer structure with SIZE, returning the pointer. */ |
72 | static inline struct buffer * |
73 | create_buffer (size_t size) |
74 | { |
75 | struct buffer *new = malloc (sizeof (struct buffer) + size); |
76 | assert (new)((new) ? (void) (0) : __assert_fail ("new", "../../trans/streamio.c" , 76, __PRETTY_FUNCTION__)); |
77 | new->head = new->tail = new->buf; |
78 | new->size = size; |
79 | new->wait = malloc (sizeof (pthread_cond_t)); |
80 | assert (new->wait)((new->wait) ? (void) (0) : __assert_fail ("new->wait", "../../trans/streamio.c", 80, __PRETTY_FUNCTION__)); |
81 | pthread_cond_init (new->wait, NULL((void*)0)); |
82 | return new; |
83 | } |
84 | |
85 | /* Return the size of B. */ |
86 | static inline size_t |
87 | buffer_size (struct buffer *b) |
88 | { |
89 | return b->tail - b->head; |
90 | } |
91 | |
92 | /* Return how much characters can be read from B. */ |
93 | static inline size_t |
94 | buffer_readable (struct buffer *b) |
95 | { |
96 | return buffer_size (b); |
97 | } |
98 | |
99 | /* Return how much characters can be written to B. */ |
100 | static inline size_t |
101 | buffer_writable (struct buffer *b) |
102 | { |
103 | return b->size - buffer_size (b); |
104 | } |
105 | |
106 | /* Flush B. */ |
107 | static inline void |
108 | clear_buffer (struct buffer *b) |
109 | { |
110 | if (b == 0) |
111 | return; |
112 | b->head = b->tail = b->buf; |
113 | pthread_cond_broadcast (b->wait); |
114 | pthread_cond_broadcast (&select_alert); |
115 | } |
116 | |
117 | /* Read up to LEN bytes from B to DATA, returning the amount actually read. */ |
118 | static inline size_t |
119 | buffer_read (struct buffer *b, void *data, size_t len) |
120 | { |
121 | size_t max = buffer_size (b); |
122 | |
123 | if (len > max) |
124 | len = max; |
125 | |
126 | memcpy (data, b->head, len); |
127 | b->head += len; |
128 | |
129 | if (b->head > b->buf + b->size / 2) |
130 | { |
131 | size_t size = buffer_size (b); |
132 | |
133 | memmove (b->buf, b->head, size); |
134 | b->head = b->buf; |
135 | b->tail = b->buf + size; |
136 | } |
137 | |
138 | pthread_cond_broadcast (b->wait); |
139 | pthread_cond_broadcast (&select_alert); |
140 | return len; |
141 | } |
142 | |
143 | /* Write LEN bytes from DATA to B, returning the amount actually written. */ |
144 | static inline size_t |
145 | buffer_write (struct buffer *b, void *data, size_t len) |
146 | { |
147 | size_t size = buffer_writable (b); |
148 | |
149 | if (len > size) |
150 | len = size; |
151 | |
152 | memcpy (b->tail, data, len); |
153 | b->tail += len; |
154 | |
155 | pthread_cond_broadcast (b->wait); |
156 | pthread_cond_broadcast (&select_alert); |
157 | return len; |
158 | } |
159 | |
160 | |
161 | /* Open a new device structure for the device NAME with MODE. If an error |
162 | occurs, the error code is returned, otherwise 0. */ |
163 | error_t dev_open (const char *name, dev_mode_t mode); |
164 | |
165 | /* Check if the device is already opened. */ |
166 | int dev_already_opened (void); |
167 | |
168 | /* Close the device. */ |
169 | void dev_close (void); |
170 | |
171 | /* Read up to AMOUNT bytes, returned in BUF and LEN. If NOWAIT is non-zero |
172 | and the buffer is empty, then returns EWOULDBLOCK. If an error occurs, |
173 | the error code is returned, otherwise 0. */ |
174 | error_t dev_read (size_t amount, void **buf, size_t *len, int nowait); |
175 | |
176 | /* Return current readable size in AMOUNT. If an error occurs, the error |
177 | code is returned, otherwise 0. */ |
178 | error_t dev_readable (size_t *amount); |
179 | |
180 | /* Write LEN bytes from BUF, returning the amount actually written |
181 | in AMOUNT. If NOWAIT is non-zero and the buffer is full, then returns |
182 | EWOULDBLOCK. If an error occurs, the error code is returned, |
183 | otherwise 0. */ |
184 | error_t dev_write (void *buf, size_t len, size_t *amount, int nowait); |
185 | |
186 | /* Try and write out any pending writes to the device. If WAIT is non-zero, |
187 | will wait for any activity to cease. */ |
188 | error_t dev_sync (int wait); |
189 | |
190 | |
191 | |
192 | static struct argp_option options[] = |
193 | { |
194 | {"rdev", 'n', "ID", 0, |
195 | "The stat rdev number for this node; may be either a" |
196 | " single integer, or of the form MAJOR,MINOR"}, |
197 | {"readonly", 'r', 0, 0, "Disallow writing"}, |
198 | {"rdonly", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
199 | {"ro", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
200 | {"writable", 'w', 0, 0, "Allow writing"}, |
201 | {"rdwr", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
202 | {"rw", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
203 | {"writeonly", 'W',0, 0, "Disallow reading"}, |
204 | {"wronly", 0, 0, OPTION_ALIAS0x4 | OPTION_HIDDEN0x2}, |
205 | {0} |
206 | }; |
207 | |
208 | static const char args_doc[] = "DEVICE"; |
209 | static const char doc[] = "Translator for stream devices."; |
210 | |
211 | const char *argp_program_version = STANDARD_HURD_VERSION (streamio)"streamio" " (GNU Hurd) " "0.5"; |
212 | |
213 | |
214 | static char *stream_name; |
215 | static int rdev; |
216 | static int nperopens; |
217 | |
218 | /* Parse a single option. */ |
219 | static error_t |
220 | parse_opt (int key, char *arg, struct argp_state *state) |
221 | { |
222 | switch (key) |
223 | { |
224 | case 'r': |
225 | trivfs_allow_open = O_READ0x0001; |
226 | break; |
227 | case 'w': |
228 | trivfs_allow_open = O_RDWR(0x0001|0x0002); |
229 | break; |
230 | case 'W': |
231 | trivfs_allow_open = O_WRITE0x0002; |
232 | break; |
233 | |
234 | case 'n': |
235 | { |
236 | char *start = arg; |
237 | char *end; |
238 | |
239 | rdev = strtoul (start, &end, 0); |
240 | if (*end == ',') |
241 | /* MAJOR,MINOR form */ |
242 | { |
243 | start = end + 1; |
244 | rdev = (rdev << 8) + strtoul (start, &end, 0); |
245 | } |
246 | |
247 | if (end == start || *end != '\0') |
248 | { |
249 | argp_error (state, "%s: Invalid argument to --rdev", arg); |
250 | return EINVAL((0x10 << 26) | ((22) & 0x3fff)); |
251 | } |
252 | } |
253 | break; |
254 | |
255 | case ARGP_KEY_ARG0: |
256 | stream_name = arg; |
257 | break; |
258 | |
259 | case ARGP_KEY_END0x1000001: |
260 | if (stream_name == 0) |
261 | argp_usage (state); |
262 | break; |
263 | |
264 | default: |
265 | return ARGP_ERR_UNKNOWN((0x10 << 26) | ((7) & 0x3fff)); |
266 | } |
267 | return 0; |
268 | } |
269 | |
270 | static const struct argp argp = { options, parse_opt, args_doc, doc }; |
271 | |
272 | |
273 | int |
274 | demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp) |
275 | { |
276 | extern int device_reply_server (mach_msg_header_t *, mach_msg_header_t *); |
277 | |
278 | return (trivfs_demuxer (inp, outp) |
279 | || device_reply_server (inp, outp)); |
280 | } |
281 | |
282 | int |
283 | main (int argc, char *argv[]) |
284 | { |
285 | error_t err; |
286 | mach_port_t bootstrap; |
287 | struct trivfs_control *fsys; |
288 | |
289 | argp_parse (&argp, argc, argv, 0, 0, 0); |
290 | |
291 | task_get_bootstrap_port (mach_task_self (), &bootstrap)(task_get_special_port((((__mach_task_self_ + 0))), 4, (& bootstrap))); |
292 | if (bootstrap == MACH_PORT_NULL((mach_port_t) 0)) |
293 | error (2, 0, "Must be started as a translator"); |
294 | |
295 | streamdev_bucket = ports_create_bucket (); |
296 | |
297 | err = trivfs_startup (bootstrap, 0, |
298 | 0, streamdev_bucket, 0, streamdev_bucket, |
299 | &fsys); |
300 | if (err) |
301 | error (3, err, "trivfs_startup"); |
302 | |
303 | pthread_mutex_init (&global_lock, NULL((void*)0)); |
304 | |
305 | pthread_cond_init (&open_alert, NULL((void*)0)); |
306 | pthread_cond_init (&select_alert, NULL((void*)0)); |
307 | |
308 | if (trivfs_allow_open & O_READ0x0001) |
309 | input_buffer = create_buffer (256); |
310 | if (trivfs_allow_open & O_WRITE0x0002) |
311 | output_buffer = create_buffer (256); |
312 | |
313 | /* Launch */ |
314 | ports_manage_port_operations_multithread (streamdev_bucket, demuxer, |
315 | 0, 0, 0); |
316 | |
317 | return 0; |
318 | } |
319 | |
320 | |
321 | int trivfs_fstype = FSTYPE_DEV0x0000000f; |
322 | int trivfs_fsid = 0; |
323 | |
324 | int trivfs_support_read = 1; |
325 | int trivfs_support_write = 1; |
326 | int trivfs_support_exec = 0; |
327 | |
328 | int trivfs_allow_open = O_READ0x0001 | O_WRITE0x0002; |
329 | |
330 | static error_t |
331 | open_hook (struct trivfs_control *cntl, struct iouser *user, int flags) |
332 | { |
333 | error_t err; |
334 | dev_mode_t mode; |
335 | |
336 | if (flags & O_WRITE0x0002 & ~trivfs_allow_open) |
337 | return EROFS((0x10 << 26) | ((30) & 0x3fff)); |
338 | if (flags & O_READ0x0001 & ~trivfs_allow_open) |
339 | return EIO((0x10 << 26) | ((5) & 0x3fff)); |
340 | |
341 | if ((flags & (O_READ0x0001|O_WRITE0x0002)) == 0) |
342 | return 0; |
343 | |
344 | /* XXX */ |
345 | if (flags & O_ASYNC0x0200) |
346 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
347 | |
348 | pthread_mutex_lock (&global_lock); |
349 | |
350 | mode = 0; |
351 | if (flags & O_READ0x0001) |
352 | mode |= D_READ0x1; |
353 | if (flags & O_WRITE0x0002) |
354 | mode |= D_WRITE0x2; |
355 | |
356 | if (!dev_already_opened ()) |
357 | { |
358 | err = dev_open (stream_name, mode); |
359 | if (err) |
360 | { |
361 | pthread_mutex_unlock (&global_lock); |
362 | return err; |
363 | } |
364 | |
365 | if (!(flags & O_NONBLOCK0x0008)) |
366 | { |
367 | if (pthread_hurd_cond_wait_np (&open_alert, &global_lock)) |
368 | { |
369 | pthread_mutex_unlock (&global_lock); |
370 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
371 | } |
372 | |
373 | if (!dev_already_opened ()) |
374 | { |
375 | pthread_mutex_unlock (&global_lock); |
376 | return ENODEV((0x10 << 26) | ((19) & 0x3fff)); |
377 | } |
378 | } |
379 | } |
380 | |
381 | pthread_mutex_unlock (&global_lock); |
382 | return 0; |
383 | } |
384 | |
385 | error_t (*trivfs_check_open_hook) (struct trivfs_control *, |
386 | struct iouser *, int) |
387 | = open_hook; |
388 | |
389 | static error_t |
390 | po_create_hook (struct trivfs_peropen *po) |
391 | { |
392 | pthread_mutex_lock (&global_lock); |
393 | nperopens++; |
394 | pthread_mutex_unlock (&global_lock); |
395 | return 0; |
396 | } |
397 | |
398 | error_t (*trivfs_peropen_create_hook) (struct trivfs_peropen *) = |
399 | po_create_hook; |
400 | |
401 | static void |
402 | po_destroy_hook (struct trivfs_peropen *po) |
403 | { |
404 | pthread_mutex_lock (&global_lock); |
405 | nperopens--; |
406 | if (!nperopens) |
407 | { |
408 | if (dev_already_opened ()) |
409 | { |
410 | clear_buffer (input_buffer); |
411 | dev_close (); |
412 | } |
413 | } |
414 | pthread_mutex_unlock (&global_lock); |
415 | } |
416 | |
417 | void (*trivfs_peropen_destroy_hook) (struct trivfs_peropen *) |
418 | = po_destroy_hook; |
419 | |
420 | void |
421 | trivfs_modify_stat (struct trivfs_protid *cred, struct stat *st) |
422 | { |
423 | st->st_blksize = vm_page_size; |
424 | st->st_size = 0; |
425 | |
426 | st->st_rdev = rdev; |
427 | st->st_mode &= ~S_IFMT0170000; |
428 | st->st_mode |= S_IFCHR0020000; |
429 | if ((trivfs_allow_open & O_READ0x0001) == 0) |
430 | st->st_mode &= ~(S_IRUSR00400 | S_IRGRP(00400 >> 3) | S_IROTH((00400 >> 3) >> 3)); |
431 | if ((trivfs_allow_open & O_WRITE0x0002) == 0) |
432 | st->st_mode &= ~(S_IWUSR00200 | S_IWGRP(00200 >> 3) | S_IWOTH((00200 >> 3) >> 3)); |
433 | } |
434 | |
435 | error_t |
436 | trivfs_goaway (struct trivfs_control *fsys, int flags) |
437 | { |
438 | error_t err; |
439 | int force = (flags & FSYS_GOAWAY_FORCE0x00000004); |
440 | int nosync = (flags & FSYS_GOAWAY_NOSYNC0x00000002); |
441 | struct port_class *root_port_class = fsys->protid_class; |
442 | |
443 | pthread_mutex_lock (&global_lock); |
444 | |
445 | if (!dev_already_opened ()) |
446 | exit (0); |
447 | |
448 | err = ports_inhibit_class_rpcs (root_port_class); |
449 | if (err == EINTR((0x10 << 26) | ((4) & 0x3fff)) || (err && !force)) |
450 | { |
451 | pthread_mutex_unlock (&global_lock); |
452 | return err; |
453 | } |
454 | |
455 | if (force && nosync) |
456 | exit (0); |
457 | |
458 | if (!force && ports_count_class (root_port_class) > 0) |
459 | goto busy; |
460 | |
461 | if (!nosync) |
462 | dev_close (); |
463 | exit (0); |
464 | |
465 | busy: |
466 | ports_enable_class (root_port_class); |
467 | ports_resume_class_rpcs (root_port_class); |
468 | pthread_mutex_unlock (&global_lock); |
469 | |
470 | return EBUSY((0x10 << 26) | ((16) & 0x3fff)); |
471 | } |
472 | |
473 | |
474 | error_t |
475 | trivfs_S_io_read (struct trivfs_protid *cred, |
476 | mach_port_t reply, mach_msg_type_name_t reply_type, |
477 | char **data, mach_msg_type_number_t *data_len, |
478 | loff_t offs, mach_msg_type_number_t amount) |
479 | { |
480 | error_t err; |
481 | |
482 | if (!cred) |
483 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
484 | |
485 | if (!(cred->po->openmodes & O_READ0x0001)) |
486 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
487 | |
488 | pthread_mutex_lock (&global_lock); |
489 | err = dev_read (amount, (void **)data, data_len, cred->po->openmodes & O_NONBLOCK0x0008); |
490 | pthread_mutex_unlock (&global_lock); |
491 | return err; |
492 | } |
493 | |
494 | error_t |
495 | trivfs_S_io_readable (struct trivfs_protid *cred, |
496 | mach_port_t reply, mach_msg_type_name_t reply_type, |
497 | mach_msg_type_number_t *amount) |
498 | { |
499 | error_t err; |
500 | |
501 | if (!cred) |
502 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
503 | |
504 | if (!(cred->po->openmodes & O_READ0x0001)) |
505 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
506 | |
507 | pthread_mutex_lock (&global_lock); |
508 | err = dev_readable (amount); |
509 | pthread_mutex_unlock (&global_lock); |
510 | return err; |
511 | } |
512 | |
513 | error_t |
514 | trivfs_S_io_write (struct trivfs_protid *cred, |
515 | mach_port_t reply, mach_msg_type_name_t reply_type, |
516 | char *data, mach_msg_type_number_t data_len, |
517 | loff_t offs, mach_msg_type_number_t *amount) |
518 | { |
519 | error_t err; |
520 | |
521 | if (!cred) |
522 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
523 | |
524 | if (!(cred->po->openmodes & O_WRITE0x0002)) |
525 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
526 | |
527 | pthread_mutex_lock (&global_lock); |
528 | err = dev_write ((void *)data, data_len, amount, cred->po->openmodes & O_NONBLOCK0x0008); |
529 | pthread_mutex_unlock (&global_lock); |
530 | return err; |
531 | } |
532 | |
533 | error_t |
534 | trivfs_S_io_seek (struct trivfs_protid *cred, |
535 | mach_port_t reply, mach_msg_type_name_t reply_type, |
536 | off_t offs, int whence, off_t *new_offs) |
537 | { |
538 | if (!cred) |
539 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
540 | else |
541 | return ESPIPE((0x10 << 26) | ((29) & 0x3fff)); |
542 | } |
543 | |
544 | static error_t |
545 | io_select_common (struct trivfs_protid *cred, |
546 | mach_port_t reply, mach_msg_type_name_t reply_type, |
547 | struct timespec *tsp, |
548 | int *type) |
549 | { |
550 | int available; |
551 | error_t err; |
552 | |
553 | if (!cred) |
554 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
555 | |
556 | if (!(cred->po->openmodes & O_WRITE0x0002) && (*type & SELECT_WRITE0x00000002)) |
557 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
558 | |
559 | *type &= SELECT_READ0x00000001 | SELECT_WRITE0x00000002; |
560 | |
561 | if (*type == 0) |
562 | return 0; |
563 | |
564 | available = 0; |
565 | |
566 | while (1) |
567 | { |
568 | pthread_mutex_lock (&global_lock); |
569 | if ((*type & SELECT_READ0x00000001) && buffer_readable (input_buffer)) |
570 | available |= SELECT_READ0x00000001; |
571 | if (output_buffer) |
572 | { |
573 | if ((*type & SELECT_WRITE0x00000002) && buffer_writable (output_buffer)) |
574 | available |= SELECT_WRITE0x00000002; |
575 | } |
576 | |
577 | if (available) |
578 | { |
579 | *type = available; |
580 | pthread_mutex_unlock (&global_lock); |
581 | return 0; |
582 | } |
583 | |
584 | if (cred->po->openmodes & O_NONBLOCK0x0008) |
585 | { |
586 | pthread_mutex_unlock (&global_lock); |
587 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
588 | } |
589 | |
590 | ports_interrupt_self_on_port_death (cred, reply)ports_interrupt_self_on_notification (cred, reply, (0100 + 010 )); |
591 | err = pthread_hurd_cond_timedwait_np (&select_alert, &global_lock, tsp); |
592 | if (err) |
593 | { |
594 | *type = 0; |
595 | pthread_mutex_unlock (&global_lock); |
596 | |
597 | if (err == ETIMEDOUT((0x10 << 26) | ((60) & 0x3fff))) |
598 | err = 0; |
599 | |
600 | return err; |
601 | } |
602 | } |
603 | } |
604 | |
605 | error_t |
606 | trivfs_S_io_select (struct trivfs_protid *cred, |
607 | mach_port_t reply, mach_msg_type_name_t reply_type, |
608 | int *type) |
609 | { |
610 | return io_select_common (cred, reply, reply_type, NULL((void*)0), type); |
611 | } |
612 | |
613 | error_t |
614 | trivfs_S_io_select_timeout (struct trivfs_protid *cred, |
615 | mach_port_t reply, mach_msg_type_name_t reply_type, |
616 | struct timespec ts, |
617 | int *type) |
618 | { |
619 | return io_select_common (cred, reply, reply_type, &ts, type); |
620 | } |
621 | |
622 | error_t |
623 | trivfs_S_file_set_size (struct trivfs_protid *cred, |
624 | mach_port_t reply, mach_msg_type_name_t reply_type, |
625 | off_t size) |
626 | { |
627 | if (!cred) |
628 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
629 | else if (!(cred->po->openmodes & O_WRITE0x0002)) |
630 | return EBADF((0x10 << 26) | ((9) & 0x3fff)); |
631 | else if (size < 0) |
632 | return EINVAL((0x10 << 26) | ((22) & 0x3fff)); |
633 | else |
634 | return 0; |
635 | } |
636 | |
637 | error_t |
638 | trivfs_S_io_get_openmodes (struct trivfs_protid *cred, |
639 | mach_port_t reply, mach_msg_type_name_t reply_type, |
640 | int *bits) |
641 | { |
642 | if (! cred) |
643 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
644 | else |
645 | { |
646 | *bits = cred->po->openmodes; |
647 | return 0; |
648 | } |
649 | } |
650 | |
651 | error_t |
652 | trivfs_S_io_set_all_openmodes (struct trivfs_protid *cred, |
653 | mach_port_t reply, |
654 | mach_msg_type_name_t reply_type, |
655 | int mode) |
656 | { |
657 | if (! cred) |
658 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
659 | else |
660 | return 0; |
661 | } |
662 | |
663 | error_t |
664 | trivfs_S_io_set_some_openmodes (struct trivfs_protid *cred, |
665 | mach_port_t reply, |
666 | mach_msg_type_name_t reply_type, |
667 | int bits) |
668 | { |
669 | if (! cred) |
670 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
671 | else |
672 | return 0; |
673 | } |
674 | |
675 | error_t |
676 | trivfs_S_io_clear_some_openmodes (struct trivfs_protid *cred, |
677 | mach_port_t reply, |
678 | mach_msg_type_name_t reply_type, |
679 | int bits) |
680 | { |
681 | if (! cred) |
682 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
683 | else |
684 | return 0; |
685 | } |
686 | |
687 | error_t |
688 | trivfs_S_file_sync (struct trivfs_protid *cred, |
689 | mach_port_t reply, mach_msg_type_name_t reply_type, |
690 | int wait, int omit_metadata) |
691 | { |
692 | error_t err; |
693 | |
694 | if (!cred) |
695 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
696 | |
697 | pthread_mutex_lock (&global_lock); |
698 | err = dev_sync (wait); |
699 | pthread_mutex_unlock (&global_lock); |
700 | return err; |
701 | } |
702 | |
703 | error_t |
704 | trivfs_S_file_syncfs (struct trivfs_protid *cred, |
705 | mach_port_t reply, mach_msg_type_name_t reply_type, |
706 | int wait, int dochildren) |
707 | { |
708 | error_t err; |
709 | |
710 | if (!cred) |
711 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
712 | |
713 | pthread_mutex_lock (&global_lock); |
714 | err = dev_sync (wait); |
715 | pthread_mutex_unlock (&global_lock); |
716 | return err; |
717 | } |
718 | |
719 | |
720 | /* This flag is set if there is an outstanding device_write. */ |
721 | static int output_pending; |
722 | |
723 | /* This flag is set if there is an outstanding device_read. */ |
724 | static int input_pending; |
725 | |
726 | /* This flag is set if there is an outstanding device_open. */ |
727 | static int open_pending; |
728 | |
729 | static char pending_output[IO_INBAND_MAX(128)]; |
730 | static int npending_output; |
731 | |
732 | /* This flag is set if EOF is returned. */ |
733 | static int eof; |
734 | |
735 | /* The error number. */ |
736 | static error_t err; |
737 | |
738 | static struct port_class *phys_reply_class; |
739 | |
740 | /* The Mach device_t representing the stream. */ |
741 | static device_t phys_device = MACH_PORT_NULL((mach_port_t) 0); |
742 | |
743 | /* The ports we get replies on for device calls. */ |
744 | static mach_port_t phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
745 | static mach_port_t phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
746 | |
747 | /* The port-info structures. */ |
748 | static struct port_info *phys_reply_writes_pi; |
749 | static struct port_info *phys_reply_pi; |
750 | |
751 | static device_t device_master; |
752 | |
753 | /* The block size and whole size of the device. */ |
754 | static size_t dev_blksize; |
755 | static size_t dev_size; |
756 | |
757 | |
758 | /* Open a new device structure for the device NAME with MODE. If an error |
759 | occurs, the error code is returned, otherwise 0. */ |
760 | /* Be careful that the global lock is already locked. */ |
761 | error_t |
762 | dev_open (const char *name, dev_mode_t mode) |
763 | { |
764 | if (open_pending || (phys_device != MACH_PORT_NULL((mach_port_t) 0))) |
765 | return 0; |
766 | |
767 | err = get_privileged_ports (0, &device_master); |
768 | if (err) |
769 | return err; |
770 | |
771 | phys_reply_class = ports_create_class (0, 0); |
772 | err = ports_create_port (phys_reply_class, streamdev_bucket, |
773 | sizeof (struct port_info), &phys_reply_pi); |
774 | if (err) |
775 | { |
776 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
777 | return err; |
778 | } |
779 | |
780 | phys_reply = ports_get_right (phys_reply_pi); |
781 | mach_port_insert_right (mach_task_self ()((__mach_task_self_ + 0)), phys_reply, phys_reply, |
782 | MACH_MSG_TYPE_MAKE_SEND20); |
783 | |
784 | if (output_buffer) |
785 | { |
786 | err = ports_create_port (phys_reply_class, streamdev_bucket, |
787 | sizeof (struct port_info), |
788 | &phys_reply_writes_pi); |
789 | if (err) |
790 | { |
791 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
792 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
793 | ports_port_deref (phys_reply_pi); |
794 | phys_reply_pi = 0; |
795 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
796 | return err; |
797 | } |
798 | |
799 | phys_reply_writes = ports_get_right (phys_reply_writes_pi); |
800 | mach_port_insert_right (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes, |
801 | phys_reply_writes, MACH_MSG_TYPE_MAKE_SEND20); |
802 | } |
803 | |
804 | err = device_open_request (device_master, phys_reply, mode, name); |
805 | if (err) |
806 | { |
807 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
808 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
809 | ports_port_deref (phys_reply_pi); |
810 | phys_reply_pi = 0; |
811 | if (output_buffer) |
812 | { |
813 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes); |
814 | phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
815 | ports_port_deref (phys_reply_writes_pi); |
816 | phys_reply_writes_pi = 0; |
817 | } |
818 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), device_master); |
819 | return err; |
820 | } |
821 | |
822 | open_pending = 1; |
823 | return 0; |
824 | } |
825 | |
826 | kern_return_t |
827 | device_open_reply (mach_port_t reply, int returncode, mach_port_t device) |
828 | { |
829 | int sizes[DEV_GET_SIZE_COUNT2]; |
830 | size_t sizes_len = DEV_GET_SIZE_COUNT2; |
831 | int amount; |
832 | |
833 | if (reply != phys_reply) |
834 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
835 | |
836 | pthread_mutex_lock (&global_lock); |
837 | |
838 | open_pending = 0; |
839 | pthread_cond_broadcast (&open_alert); |
840 | |
841 | if (returncode != 0) |
842 | { |
843 | dev_close (); |
844 | pthread_mutex_unlock (&global_lock); |
845 | return 0; |
846 | } |
847 | |
848 | phys_device = device; |
849 | eof = 0; |
850 | |
851 | /* Get the block size and the whole size. */ |
852 | err = device_get_status (device, DEV_GET_SIZE0, sizes, &sizes_len); |
853 | if (err == D_INVALID_OPERATION2505) |
854 | { |
855 | /* XXX Assume that the block size is 1 and the whole size is 0. */ |
856 | dev_blksize = 1; |
857 | dev_size = 0; |
858 | err = 0; |
859 | } |
860 | else if (err == 0) |
861 | { |
862 | assert (sizes_len == DEV_GET_SIZE_COUNT)((sizes_len == 2) ? (void) (0) : __assert_fail ("sizes_len == 2" , "../../trans/streamio.c", 862, __PRETTY_FUNCTION__)); |
863 | |
864 | dev_blksize = sizes[DEV_GET_SIZE_RECORD_SIZE1]; |
865 | dev_size = sizes[DEV_GET_SIZE_DEVICE_SIZE0]; |
866 | |
867 | assert (dev_blksize && dev_blksize <= IO_INBAND_MAX)((dev_blksize && dev_blksize <= (128)) ? (void) (0 ) : __assert_fail ("dev_blksize && dev_blksize <= (128)" , "../../trans/streamio.c", 867, __PRETTY_FUNCTION__)); |
868 | } |
869 | else |
870 | { |
871 | dev_close (); |
872 | pthread_mutex_unlock (&global_lock); |
873 | return 0; |
874 | } |
875 | |
876 | amount = vm_page_size; |
877 | if (dev_blksize != 1) |
878 | amount = amount / dev_blksize * dev_blksize; |
Value stored to 'amount' is never read | |
879 | |
880 | pthread_mutex_unlock (&global_lock); |
881 | return 0; |
882 | } |
883 | |
884 | /* Check if the device is already opened. */ |
885 | /* Be careful that the global lock is already locked. */ |
886 | int |
887 | dev_already_opened (void) |
888 | { |
889 | return (phys_device != MACH_PORT_NULL((mach_port_t) 0)); |
890 | } |
891 | |
892 | /* Close the device. */ |
893 | /* Be careful that the global lock is already locked. */ |
894 | void |
895 | dev_close (void) |
896 | { |
897 | /* Sync all pending writes. */ |
898 | dev_sync (1); |
899 | |
900 | device_close (phys_device); |
901 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_device); |
902 | phys_device = MACH_PORT_NULL((mach_port_t) 0); |
903 | |
904 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply); |
905 | phys_reply = MACH_PORT_NULL((mach_port_t) 0); |
906 | ports_port_deref (phys_reply_pi); |
907 | phys_reply_pi = 0; |
908 | clear_buffer (input_buffer); |
909 | input_pending = 0; |
910 | |
911 | if (output_buffer) |
912 | { |
913 | mach_port_deallocate (mach_task_self ()((__mach_task_self_ + 0)), phys_reply_writes); |
914 | phys_reply_writes = MACH_PORT_NULL((mach_port_t) 0); |
915 | ports_port_deref (phys_reply_writes_pi); |
916 | phys_reply_writes_pi = 0; |
917 | clear_buffer (output_buffer); |
918 | npending_output = 0; |
919 | output_pending = 0; |
920 | } |
921 | } |
922 | |
923 | /* Be careful that the global lock is already locked. */ |
924 | static error_t |
925 | start_input (int nowait) |
926 | { |
927 | int size; |
928 | error_t err; |
929 | size_t amount; |
930 | |
931 | size = buffer_writable (input_buffer); |
932 | |
933 | if (size < dev_blksize || input_pending) |
934 | return 0; |
935 | |
936 | amount = vm_page_size; |
937 | if (dev_blksize != 1) |
938 | amount = amount / dev_blksize * dev_blksize; |
939 | |
940 | err = device_read_request_inband (phys_device, phys_reply, |
941 | nowait? D_NOWAIT0x8 : 0, |
942 | 0, amount); |
943 | if (err == D_WOULD_BLOCK2501) |
944 | err = 0; |
945 | if (err) |
946 | dev_close (); |
947 | else |
948 | input_pending = 1; |
949 | |
950 | return err; |
951 | } |
952 | |
953 | /* Read up to AMOUNT bytes, returned in BUF and LEN. If NOWAIT is non-zero |
954 | and the buffer is empty, then returns EWOULDBLOCK. If an error occurs, |
955 | the error code is returned, otherwise 0. */ |
956 | /* Be careful that the global lock is already locked. */ |
957 | error_t |
958 | dev_read (size_t amount, void **buf, size_t *len, int nowait) |
959 | { |
960 | size_t max, avail; |
961 | |
962 | if (err) |
963 | return err; |
964 | |
965 | while (!buffer_readable (input_buffer)) |
966 | { |
967 | err = start_input (nowait); |
968 | if (err) |
969 | return err; |
970 | |
971 | if (eof) |
972 | { |
973 | *len = 0; |
974 | return 0; |
975 | } |
976 | |
977 | if (nowait) |
978 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
979 | |
980 | if (pthread_hurd_cond_wait_np (input_buffer->wait, &global_lock)) |
981 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
982 | } |
983 | |
984 | avail = buffer_size (input_buffer); |
985 | max = (amount < avail) ? amount : avail; |
986 | if (max > *len) |
987 | vm_allocate (mach_task_self ()((__mach_task_self_ + 0)), (vm_address_t *)buf, max, 1); |
988 | |
989 | *len = buffer_read (input_buffer, *buf, max); |
990 | assert (*len == max)((*len == max) ? (void) (0) : __assert_fail ("*len == max", "../../trans/streamio.c" , 990, __PRETTY_FUNCTION__)); |
991 | |
992 | err = start_input (nowait); |
993 | return err; |
994 | } |
995 | |
996 | error_t |
997 | device_read_reply_inband (mach_port_t reply, error_t errorcode, |
998 | char *data, u_int datalen) |
999 | { |
1000 | if (reply != phys_reply) |
1001 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1002 | |
1003 | pthread_mutex_lock (&global_lock); |
1004 | |
1005 | input_pending = 0; |
1006 | err = errorcode; |
1007 | if (!err) |
1008 | { |
1009 | if (datalen == 0) |
1010 | { |
1011 | eof = 1; |
1012 | dev_close (); |
1013 | pthread_mutex_unlock (&global_lock); |
1014 | return 0; |
1015 | } |
1016 | |
1017 | while (datalen) |
1018 | { |
1019 | size_t nwritten; |
1020 | |
1021 | while (!buffer_writable (input_buffer)) |
1022 | pthread_cond_wait (input_buffer->wait, &global_lock); |
1023 | |
1024 | nwritten = buffer_write (input_buffer, data, datalen); |
1025 | data += nwritten; |
1026 | datalen -= nwritten; |
1027 | pthread_cond_broadcast (input_buffer->wait); |
1028 | pthread_cond_broadcast (&select_alert); |
1029 | } |
1030 | } |
1031 | else |
1032 | { |
1033 | dev_close (); |
1034 | pthread_mutex_unlock (&global_lock); |
1035 | return 0; |
1036 | } |
1037 | pthread_mutex_unlock (&global_lock); |
1038 | return 0; |
1039 | } |
1040 | |
1041 | /* Return current readable size in AMOUNT. If an error occurs, the error |
1042 | code is returned, otherwise 0. */ |
1043 | /* Be careful that the global lock is already locked. */ |
1044 | error_t |
1045 | dev_readable (size_t *amount) |
1046 | { |
1047 | *amount = buffer_size (input_buffer); |
1048 | return 0; |
1049 | } |
1050 | |
1051 | /* Be careful that the global lock is already locked. */ |
1052 | static error_t |
1053 | start_output (int nowait) |
1054 | { |
1055 | int size; |
1056 | |
1057 | assert (output_buffer)((output_buffer) ? (void) (0) : __assert_fail ("output_buffer" , "../../trans/streamio.c", 1057, __PRETTY_FUNCTION__)); |
1058 | |
1059 | size = buffer_size (output_buffer); |
1060 | |
1061 | if (size < dev_blksize || output_pending) |
1062 | return 0; |
1063 | |
1064 | if (size + npending_output > IO_INBAND_MAX(128)) |
1065 | size = IO_INBAND_MAX(128) - npending_output; |
1066 | |
1067 | if (dev_blksize != 1) |
1068 | size = size / dev_blksize * dev_blksize; |
1069 | |
1070 | buffer_read (output_buffer, pending_output + npending_output, size); |
1071 | npending_output += size; |
1072 | |
1073 | err = device_write_request_inband (phys_device, phys_reply_writes, |
1074 | nowait? D_NOWAIT0x8 : 0, |
1075 | 0, pending_output, npending_output); |
1076 | if (err == D_WOULD_BLOCK2501) |
1077 | err = 0; |
1078 | if (err) |
1079 | dev_close (); |
1080 | else |
1081 | output_pending = 1; |
1082 | |
1083 | return err; |
1084 | } |
1085 | |
1086 | /* Write LEN bytes from BUF, returning the amount actually written |
1087 | in AMOUNT. If NOWAIT is non-zero and the buffer is full, then returns |
1088 | EWOULDBLOCK. If an error occurs, the error code is returned, |
1089 | otherwise 0. */ |
1090 | /* Be careful that the global lock is already locked. */ |
1091 | error_t |
1092 | dev_write (void *buf, size_t len, size_t *amount, int nowait) |
1093 | { |
1094 | if (err) |
1095 | return err; |
1096 | |
1097 | while (!buffer_writable (output_buffer)) |
1098 | { |
1099 | err = start_output (nowait); |
1100 | if (err) |
1101 | return err; |
1102 | |
1103 | if (nowait) |
1104 | return EWOULDBLOCK((0x10 << 26) | ((35) & 0x3fff)); |
1105 | |
1106 | if (pthread_hurd_cond_wait_np (output_buffer->wait, &global_lock)) |
1107 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
1108 | } |
1109 | |
1110 | *amount = buffer_write (output_buffer, buf, len); |
1111 | err = start_output (nowait); |
1112 | |
1113 | return err; |
1114 | } |
1115 | |
1116 | error_t |
1117 | device_write_reply_inband (mach_port_t reply, error_t returncode, int amount) |
1118 | { |
1119 | if (reply != phys_reply_writes) |
1120 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1121 | |
1122 | pthread_mutex_lock (&global_lock); |
1123 | |
1124 | output_pending = 0; |
1125 | |
1126 | if (!returncode) |
1127 | { |
1128 | if (amount >= npending_output) |
1129 | { |
1130 | npending_output = 0; |
1131 | pthread_cond_broadcast (output_buffer->wait); |
1132 | pthread_cond_broadcast (&select_alert); |
1133 | } |
1134 | else |
1135 | { |
1136 | npending_output -= amount; |
1137 | memmove (pending_output, pending_output + amount, npending_output); |
1138 | } |
1139 | } |
1140 | else |
1141 | dev_close (); |
1142 | |
1143 | pthread_mutex_unlock (&global_lock); |
1144 | return 0; |
1145 | } |
1146 | |
1147 | /* Try and write out any pending writes to the device. If WAIT is non-zero, |
1148 | will wait for any activity to cease. */ |
1149 | /* Be careful that the global lock is already locked. */ |
1150 | error_t |
1151 | dev_sync (int wait) |
1152 | { |
1153 | if (err) |
1154 | return err; |
1155 | |
1156 | if (!output_buffer || phys_device == MACH_PORT_NULL((mach_port_t) 0)) |
1157 | return 0; |
1158 | |
1159 | while (buffer_readable (output_buffer) >= dev_blksize) |
1160 | { |
1161 | err = start_output (! wait); |
1162 | if (err) |
1163 | return err; |
1164 | |
1165 | if (!wait) |
1166 | return 0; |
1167 | |
1168 | if (pthread_hurd_cond_wait_np (output_buffer->wait, &global_lock)) |
1169 | return EINTR((0x10 << 26) | ((4) & 0x3fff)); |
1170 | } |
1171 | |
1172 | /* XXX: When the size of output_buffer is non-zero and less than |
1173 | DEV_BLKSIZE, the rest will be ignored or discarded. */ |
1174 | return 0; |
1175 | } |
1176 | |
1177 | /* Unused stubs. */ |
1178 | kern_return_t |
1179 | device_read_reply (mach_port_t reply, kern_return_t returncode, |
1180 | io_buf_ptr_t data, mach_msg_type_number_t amount) |
1181 | { |
1182 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1183 | } |
1184 | |
1185 | kern_return_t |
1186 | device_write_reply (mach_port_t reply, kern_return_t returncode, int amount) |
1187 | { |
1188 | return EOPNOTSUPP((0x10 << 26) | ((45) & 0x3fff)); |
1189 | } |