/* Socket I/O operations Copyright (C) 1995 Free Software Foundation, Inc. Written by Miles Bader This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "sock.h" #include "pipe.h" #include "connq.h" /* Read data from an IO object. If offset if -1, read from the object maintained file pointer. If the object is not seekable, offset is ignored. The amount desired to be read is in amount. */ error_t S_io_read (struct sock_user *user, char **data, mach_msg_type_number_t *data_len, off_t offset, mach_msg_type_number_t amount) { error_t err; struct pipe *pipe; if (!user) return EOPNOTSUPP; err = sock_aquire_read_pipe (user->sock, &pipe); if (!err) { err = pipe_read (pipe, sock->flags & SOCK_NONBLOCK, NULL, data, data_len, amount, NULL, NULL, NULL, NULL); pipe_release (pipe); } return err; } /* Write data to an IO object. If offset is -1, write at the object maintained file pointer. If the object is not seekable, offset is ignored. The amount successfully written is returned in amount. A given user should not have more than one outstanding io_write on an object at a time; servers implement congestion control by delaying responses to io_write. Servers may drop data (returning ENOBUFS) if they recevie more than one write when not prepared for it. */ error_t S_io_write (struct sock_user *user, char *data, mach_msg_type_number_t data_len, off_t offset, mach_msg_type_number_t *amount) { error_t err; struct pipe *pipe; if (!user) return EOPNOTSUPP; err = sock_aquire_write_pipe (user->sock, &pipe); if (!err) err = pipe_write (pipe, data, data_len, amount); pipe_release (pipe); return err; } /* Cause a pending request on this object to immediately return. The exact semantics are dependent on the specific object. */ error_t S_interrupt_operation (struct sock_user *user) { struct pipe *pipe; if (!user) return EOPNOTSUPP; /* Interrupt pending reads on this socket. We don't bother with writes since they never block. */ if (sock_aquire_read_pipe (user->sock, &pipe) == 0) { /* Indicate to currently waiting threads they've been interrupted. */ pipe->interrupt_seq_num++; pipe_kick (pipe); pipe_release (pipe); } return 0; } /* Tell how much data can be read from the object without blocking for a "long time" (this should be the same meaning of "long time" used by the nonblocking flag. */ error_t S_io_readable (struct sock_user *user, mach_msg_type_number_t *amount) { error_t err; struct pipe *pipe; if (!user) return EOPNOTSUPP; err = sock_aquire_read_pipe (user->sock, &pipe); if (!err) { *amount = pipe_readable (user->sock->read_pipe, 1); pipe_release (pipe); } return err; } /* Change current read/write offset */ error_t S_io_seek (struct sock_user *user, off_t offset, int whence, off_t *new_offset) { return user ? ESPIPE : EOPNOTSUPP; } /* Return a new port with the same semantics as the existing port. */ error_t S_io_duplicate (struct sock_user *user, mach_port_t *new_port, mach_msg_type_name_t *new_port_type) { struct sock *sock; struct sock_user *new_user; if (!user) return EOPNOTSUPP; sock = user->sock; mutex_lock (&sock->lock); sock->refs++; mutex_unlock (&sock->lock); new_user = port_allocate_port (sock_user_bucket, sizeof (struct sock_user), sock_user_class); new_user->sock = sock; *new_port = ports_get_right (new_user); *new_port_type = MACH_MSG_TYPE_MAKE_SEND; return 0; } /* SELECT_TYPE is the bitwise OR of SELECT_READ, SELECT_WRITE, and SELECT_URG. Block until one of the indicated types of i/o can be done "quickly", and return the types that are then available. ID_TAG is returned as passed; it is just for the convenience of the user in matching up reply messages with specific requests sent. */ error_t S_io_select (struct sock_user *user, int *select_type, int *id_tag) { error_t err = 0; struct sock *sock; if (!user) return EOPNOTSUPP; *select_type |= ~SELECT_URG; /* We never return these. */ sock = user->sock; mutex_lock (&sock->lock); if (sock->connq) /* Sock is used for accepting connections, not I/O. For these, you can only select for reading, which will block until a connection request comes along. */ { mutex_unlock (&sock->lock); if (*select_type & SELECT_WRITE) /* Meaningless for a non-i/o socket. */ return EBADF; if (*select_type & SELECT_READ) /* Wait for a connect. Passing in NULL for REQ means that the request won't be dequeued. */ return connq_listen (sock->connq, sock->flags & SOCK_NONBLOCK, NULL, NULL); } else /* Sock is a normal read/write socket. */ { if ((*select_type & SELECT_WRITE) && !sock->write_pipe) { mutex_unlock (&sock->lock); return EBADF; } /* Otherwise, pipes are always writable... */ if (*select_type & SELECT_READ) { struct pipe *pipe = sock->read_pipe; pipe_aquire (pipe); /* We unlock SOCK here, as it's not subsequently used, and we might go to sleep waiting for readable data. */ mutex_unlock (&sock->lock); if (!pipe) return EBADF; if (! pipe_is_readable (pipe, 1)) /* Nothing to read on PIPE yet... */ if (*select_type & ~SELECT_READ) /* But there's other stuff to report, so return that. */ *select_type &= ~SELECT_READ; else /* The user only cares about reading, so wait until something is readable. */ err = pipe_wait (pipe, 0, 1); pipe_release (pipe); } else mutex_unlock (&sock->lock); } return err; } /* Return the current status of the object. Not all the fields of the io_statuf_t are meaningful for all objects; however, the access and modify times, the optimal IO size, and the fs type are meaningful for all objects. */ error_t S_io_stat (struct sock_user *user, struct stat *st) { struct sock *sock; void copy_time (time_value_t from, time_t *to_sec, unsigned long *to_usec) { *to_sec = from.seconds; *to_usec = from.microseconds; } if (!user) return EOPNOTSUPP; sock = user->sock; bzero (st, sizeof (struct stat)); st->st_fstype = FSTYPE_SOCKET; st->st_fsid = getpid (); st->st_ino = sock->id; st->st_blksize = vm_page_size * 8; mutex_lock (&sock->lock); /* Make sure the pipes don't go away... */ if (sock->read_pipe) copy_time (&sock->read_pipe->read_time, &st->st_atime, &st->atime_usec); if (sock->write_pipe) copy_time (&sock->read_pipe->write_time, &st->st_mtime, &st->mtime_usec); copy_time (&sock->change_time, &st->st_ctime, &st->ctime_usec); return 0; } error_t S_io_get_openmodes (struct sock_user *user, int *bits) { unsigned flags; if (!user) return EOPNOTSUPP; flags = user->sock->flags; *bits = (flags & SOCK_NONBLOCK ? O_NONBLOCK : 0) | (flags & SOCK_SHUTDOWN_READ ? 0 : O_READ) | (flags & SOCK_SHUTDOWN_WRITE ? 0 : O_WRITE); return 0; } error_t S_io_set_all_openmodes (struct sock_user *user, int bits) { if (!user) return EOPNOTSUPP; mutex_lock (&user->sock->lock); if (bits & SOCK_NONBLOCK) user->sock->flags |= SOCK_NONBLOCK; else user->sock->flags &= ~SOCK_NONBLOCK; mutex_unlock (user->sock->lock); return 0; } error_t S_io_set_some_openmodes (struct sock_user *user, int bits) { if (!user) return EOPNOTSUPP; mutex_lock (&user->sock->lock); if (bits & SOCK_NONBLOCK) user->sock->flags |= SOCK_NONBLOCK; mutex_unlock (user->sock->lock); return 0; } error_t S_io_clear_some_openmodes (struct sock_user *user, int bits) { if (!user) return EOPNOTSUPP; mutex_lock (&user->sock->lock); if (bits & SOCK_NONBLOCK) user->sock->flags &= ~SOCK_NONBLOCK; mutex_unlock (user->sock->lock); return 0; } /* Stubs for currently unsupported rpcs. */ error_t S_io_async(struct sock_user *user, mach_port_t notify_port, mach_port_t *async_id_port, mach_msg_type_name_t *async_id_port_type) { return EOPNOTSUPP; } error_t S_io_mod_owner(struct sock_user *user, pid_t owner) { return EOPNOTSUPP; } error_t S_io_get_owner(struct sock_user *user, pid_t *owner) { return EOPNOTSUPP; } error_t S_io_get_icky_async_id (struct sock_user *user, mach_port_t *icky_async_id_port, mach_msg_type_name_t *icky_async_id_port_type) { return EOPNOTSUPP; } error_t S_io_map (struct sock_user *user, mach_port_t *memobj_rd, mach_msg_type_name_t *memobj_rd_type, mach_port_t *memobj_wt, mach_msg_type_name_t *memobj_wt_type) { return EOPNOTSUPP; } error_t S_io_map_cntl (struct sock_user *user, mach_port_t *mem, mach_msg_type_name_t *mem_type) { return EOPNOTSUPP; } error_t S_io_get_conch (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_release_conch (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_eofnotify (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_prenotify (struct sock_user *user, vm_offset_t start, vm_offset_t end) { return EOPNOTSUPP; } error_t S_io_postnotify (struct sock_user *user, vm_offset_t start, vm_offset_t end) { return EOPNOTSUPP; } error_t S_io_readsleep (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_readnotify (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_sigio (struct sock_user *user) { return EOPNOTSUPP; } error_t S_io_server_version (struct sock_user *user, char *name, int *maj, int *min, int *edit) { return EOPNOTSUPP; }