summaryrefslogtreecommitdiff
path: root/pflocal/io.c
diff options
context:
space:
mode:
authorMiles Bader <miles@gnu.org>1995-07-01 23:31:27 +0000
committerMiles Bader <miles@gnu.org>1995-07-01 23:31:27 +0000
commit1cfe679b50e77b061b6561d2025488a748f616bb (patch)
treec4907cf2b12ca84e037f9386c0e7ddbaebe305c7 /pflocal/io.c
parentc11bc452717c1e4fd3e50cf0029cf997e332e5f0 (diff)
Formerly io.c.~2~
Diffstat (limited to 'pflocal/io.c')
-rw-r--r--pflocal/io.c173
1 files changed, 71 insertions, 102 deletions
diff --git a/pflocal/io.c b/pflocal/io.c
index 5e5a6a6a..278f6e30 100644
--- a/pflocal/io.c
+++ b/pflocal/io.c
@@ -24,20 +24,22 @@
maintained file pointer. If the object is not seekable, offset is
ignored. The amount desired to be read is in amount. */
error_t
-S_io_read (struct socket_user *user,
+S_io_read (struct sock_user *user,
char **data, mach_msg_type_number_t *data_len,
off_t offset, mach_msg_type_number_t amount)
{
+ error_t err = 0;
+ unsigned readable;
struct pipe *pipe;
if (!user)
return EOPNOTSUPP;
- pipe = socket_aquire_read_pipe (user->socket);
+ pipe = sock_aquire_read_pipe (user->sock);
if (pipe == NULL)
return EBADF;
- while (! pipe_readable (pipe) && pipe->writer)
+ while ((readable = pipe_readable (pipe)) == 0 && pipe->writer)
{
unsigned seq_num = pipe->interrupt_seq_num;
condition_wait (&pipe->pending_reads, &pipe->lock);
@@ -48,35 +50,28 @@ S_io_read (struct socket_user *user,
}
}
- if (amount > pipe->end - pipe->start)
- amount = pipe->end - pipe->start;
-
- if (amount > 0)
- {
- if (*datalen < amount)
- vm_allocate (mach_task_self (), (vm_address_t *)data, amount, 1);
- *datalen = amount;
- bcopy (pipe->start, *data, amount);
- pipe->start += amount;
- timestamp (&pipe->read_time);
- }
+ if (readable)
+ err = pipe_read (pipe, data, data_len, amount);
+ if (readable && !err)
+ timestamp (&pipe->read_time);
pipe_release (pipe);
- return 0;
+ return err;
}
/* Cause a pending request on this object to immediately return. The
exact semantics are dependent on the specific object. */
error_t
-S_interrupt_operation (struct socket_user *user)
+S_interrupt_operation (struct sock_user *user)
{
struct pipe *pipe;
if (!user)
return EOPNOTSUPP;
- /* Interrupt pending reads on this socket. */
- pipe = socket_aquire_read_pipe (user->socket);
+ /* Interrupt pending reads on this socket. We don't bother with writes
+ since they never block. */
+ pipe = sock_aquire_read_pipe (user->sock);
if (pipe != NULL)
{
/* Indicate to currently waiting threads they've been interrupted. */
@@ -94,13 +89,13 @@ S_interrupt_operation (struct socket_user *user)
return 0;
}
-S_io_get_openmodes (struct socket_user *user, int *bits)
+S_io_get_openmodes (struct sock_user *user, int *bits)
{
if (!user)
return EOPNOTSUPP;
*bits =
- (user->socket->read_pipe ? O_READ : 0)
- | (user->socket->write_pipe ? O_WRITE : 0);
+ (user->sock->read_pipe ? O_READ : 0)
+ | (user->sock->write_pipe ? O_WRITE : 0);
return 0;
}
@@ -108,26 +103,26 @@ S_io_get_openmodes (struct socket_user *user, int *bits)
a "long time" (this should be the same meaning of "long time" used
by the nonblocking flag. */
error_t
-S_io_readable (struct socket_user *user, mach_msg_type_number_t *amount)
+S_io_readable (struct sock_user *user, mach_msg_type_number_t *amount)
{
error_t err = 0;
if (!user)
return EOPNOTSUPP;
- mutex_lock (&user->socket->lock);
- if (user->socket->read_pipe)
- *amount = user->socket->read_pipe->end - user->socket->read_pipe->start;
+ mutex_lock (&user->sock->lock);
+ if (user->sock->read_pipe)
+ *amount = pipe_readable (user->sock->read_pipe);
else
err = EBADF;
- mutex_unlock (&user->socket->lock);
+ mutex_unlock (&user->sock->lock);
return err;
}
/* Change current read/write offset */
error_t
-S_io_seek (struct socket_user *user,
+S_io_seek (struct sock_user *user,
off_t offset, int whence, off_t *new_offset)
{
return user ? ESPIPE : EOPNOTSUPP;
@@ -135,25 +130,25 @@ S_io_seek (struct socket_user *user,
/* Return a new port with the same semantics as the existing port. */
error_t
-S_io_duplicate (struct socket_user *user,
+S_io_duplicate (struct sock_user *user,
mach_port_t *new_port, mach_msg_type_name_t *new_port_type)
{
- struct socket *socket;
- struct socket_user *new_user;
+ struct sock *sock;
+ struct sock_user *new_user;
if (!user)
return EOPNOTSUPP;
- socket = user->socket;
- mutex_lock (&socket->lock);
- socket->refs++;
- mutex_unlock (&socket->lock);
+ sock = user->sock;
+ mutex_lock (&sock->lock);
+ sock->refs++;
+ mutex_unlock (&sock->lock);
new_user =
- port_allocate_port (socket_user_bucket,
- sizeof (struct socket_user),
- socket_user_class);
- new_user->socket = socket;
+ port_allocate_port (sock_user_bucket,
+ sizeof (struct sock_user),
+ sock_user_class);
+ new_user->sock = sock;
*new_port = ports_get_right (new_user);
*new_port_type = MACH_MSG_TYPE_MAKE_SEND;
@@ -168,64 +163,38 @@ S_io_duplicate (struct socket_user *user,
responses to io_write. Servers may drop data (returning ENOBUFS)
if they recevie more than one write when not prepared for it. */
error_t
-S_io_write (struct socket_user *user,
+S_io_write (struct sock_user *user,
char *data, mach_msg_type_number_t data_len,
off_t offset, mach_msg_type_number_t *amount)
{
+ error_t err = 0;
struct pipe *pipe;
if (!user)
return EOPNOTSUPP;
- pipe = socket_aquire_write_pipe (user->socket);
+ pipe = sock_aquire_write_pipe (user->sock);
if (pipe == NULL)
return EBADF;
if (pipe->reader == NULL)
+ err = EPIPE;
+ if (!err)
+ err = pipe_write(pipe, data, data_len, amount);
+ if (!err)
{
- pipe_release (pipe);
- return EPIPE;
- }
-
- if (pipe->buffer + pipe->alloced - pipe->end < data_len)
- /* Not enough room in the buffer for the additional data, so grow it. */
- {
- int pipe_amount = pipe->end - pipe->start;
-
- pipe->alloced = pipe_amount + data_len;
-
- if (pipe->start != pipe->buffer)
- /* There is free space at the front of the buffer. Get rid of it. */
+ timestamp (&pipe->write_time);
+
+ /* And wakeup anyone that might be interested in it. */
+ condition_signal (&pipe->pending_reads, &pipe->lock);
+ mutex_lock (&pipe->lock); /* Get back the lock on PIPE. */
+
+ /* Only wakeup selects if there's still data available. */
+ if (pipe_readable (pipe))
{
- char *new_buffer = malloc (pipe->alloced);
- bcopy (pipe->start, new_buffer, pipe_amount);
- free (pipe->buffer);
- pipe->buffer = new_buffer;
+ condition_signal (&pipe->pending_selects, &pipe->lock);
+ mutex_lock (&pipe->lock); /* Get back the lock on PIPE. */
}
- else
- pipe->buffer = realloc (pipe->buffer, pipe->alloced);
-
- /* Now the data is guaranteed to start at the beginning of the buffer. */
- pipe->start = pipe->buffer;
- pipe->end = pipe->start + pipe_amount;
- }
-
- /* Add the new data. */
- assert (pipe->buffer + pipe->alloced - pipe->end >= data_len);
- bcopy (data, pipe->end, data_len);
- pipe->end += data_len;
- *amount = data_len;
- timestamp (&pipe->read_time);
-
- /* And wakeup anyone that might be interested in it. */
- condition_signal (&pipe->pending_reads, &pipe->lock);
- mutex_lock (&pipe->lock); /* Get back the lock on PIPE. */
-
- /* Only wakeup selects if there's still data available. */
- if (pipe->start < start->end)
- {
- condition_signal (&pipe->pending_selects, &pipe->lock);
- mutex_lock (&pipe->lock); /* Get back the lock on PIPE. */
}
pipe_release (pipe);
@@ -238,34 +207,34 @@ S_io_write (struct socket_user *user,
is just for the convenience of the user in matching up reply messages with
specific requests sent. */
error_t
-S_io_select (struct socket_user *user, int *select_type, int *id_tag)
+S_io_select (struct sock_user *user, int *select_type, int *id_tag)
{
- struct socket *socket;
+ struct sock *sock;
if (!user)
return EOPNOTSUPP;
- socket = user->socket;
- mutex_lock (&socket->lock);
+ sock = user->sock;
+ mutex_lock (&sock->lock);
*select_type |= ~SELECT_URG;
- if ((*select_type & SELECT_WRITE) && !socket->write_pipe)
+ if ((*select_type & SELECT_WRITE) && !sock->write_pipe)
{
- mutex_unlock (&socket->lock);
+ mutex_unlock (&sock->lock);
return EBADF;
}
/* Otherwise, pipes are always writable... */
if (*select_type & SELECT_READ)
{
- struct pipe *pipe = socket->read_pipe;
+ struct pipe *pipe = sock->read_pipe;
if (pipe)
pipe_aquire (pipe);
- /* We unlock SOCKET here, as it's not subsequently used, and we might
+ /* We unlock SOCK here, as it's not subsequently used, and we might
go to sleep waiting for readable data. */
- mutex_unlock (&socket->lock);
+ mutex_unlock (&sock->lock);
if (!pipe)
return EBADF;
@@ -292,7 +261,7 @@ S_io_select (struct socket_user *user, int *select_type, int *id_tag)
pipe_release (pipe);
}
else
- mutex_unlock (&socket->lock);
+ mutex_unlock (&sock->lock);
return 0;
}
@@ -302,9 +271,9 @@ S_io_select (struct socket_user *user, int *select_type, int *id_tag)
modify times, the optimal IO size, and the fs type are meaningful
for all objects. */
error_t
-S_io_stat (struct socket_user *user, struct stat *st)
+S_io_stat (struct sock_user *user, struct stat *st)
{
- struct socket *socket;
+ struct sock *sock;
void copy_time (time_value_t from, time_t *to_sec, unsigned long *to_usec)
{
*to_sec = from.seconds;
@@ -314,23 +283,23 @@ S_io_stat (struct socket_user *user, struct stat *st)
if (!user)
return EOPNOTSUPP;
- socket = user->socket;
+ sock = user->sock;
bzero (st, sizeof (struct stat));
st->st_fstype = FSTYPE_SOCKET;
st->st_fsid = getpid ();
- st->st_ino = socket->id;
+ st->st_ino = sock->id;
st->st_blksize = vm_page_size * 8;
- mutex_lock (&socket->lock); /* Make sure the pipes don't go away... */
+ mutex_lock (&sock->lock); /* Make sure the pipes don't go away... */
- if (socket->read_pipe)
- copy_time (&socket->read_pipe->read_time, &st->st_atime, &st->atime_usec);
- if (socket->write_pipe)
- copy_time (&socket->read_pipe->write_time, &st->st_mtime, &st->mtime_usec);
- copy_time (&socket->change_time, &st->st_ctime, &st->ctime_usec);
+ if (sock->read_pipe)
+ copy_time (&sock->read_pipe->read_time, &st->st_atime, &st->atime_usec);
+ if (sock->write_pipe)
+ copy_time (&sock->read_pipe->write_time, &st->st_mtime, &st->mtime_usec);
+ copy_time (&sock->change_time, &st->st_ctime, &st->ctime_usec);
return 0;
}