diff options
author | Michael I. Bushnell <mib@gnu.org> | 1995-12-20 01:49:18 +0000 |
---|---|---|
committer | Michael I. Bushnell <mib@gnu.org> | 1995-12-20 01:49:18 +0000 |
commit | 0dacb8a6d3b47c4910772349b48e357735633e88 (patch) | |
tree | 2cd09adce58a5c0067c4e718e92cd938a36771bd | |
parent | d6f441166420f08076aca71e705738b05f14a2e2 (diff) |
(trivfs_S_io_read): After a block on the input queue, don't go back
and check the input queue if there is a signal in progress; wait for
the signal to complete first.
(send_signal): Release global_lock around signal RPC. Call
report_sig_start and report_sig_end around signal RPC.
(call_asyncs): Likewise.
(report_sig_start, report_sig_end): New functions.
(sigs_in_progress, input_sig_wait, input_sig_wakeup): New variables.
-rw-r--r-- | term/users.c | 47 |
1 files changed, 47 insertions, 0 deletions
diff --git a/term/users.c b/term/users.c index 43646e3c..1c8cf4b8 100644 --- a/term/users.c +++ b/term/users.c @@ -63,6 +63,10 @@ struct winsize window_size; static void call_asyncs (void); +static int sigs_in_progress; +static struct condition input_sig_wait = CONDITION_INITIALIZER; +static int input_sig_wakeup; + /* Attach this on the hook of any protid that is a ctty. */ struct protid_hook { @@ -500,6 +504,28 @@ trivfs_S_io_read (struct trivfs_protid *cred, mutex_unlock (&global_lock); return EINTR; } + + /* If a signal is being delivered, and we got woken up by + arriving input, then there's a possible race; we have to not + read from the queue as long as the signal is in progress. + Now, you might think that we should not read from the queue + when a signal is in progress even if we didn't block, but + that's not so. It's specifically that we have to get + *interrupted* by signals in progress (when the signallee is + this thread and wants to interrupt is) that is the race we + are avoiding. A reader who gets in after the signal begins, + while the signal is in progress, is harmless, because this + case is indiscernable from one where the reader gets in after + the signal has completed. */ + if (sigs_in_progress) + { + input_sig_wakeup++; + if (hurd_condition_wait (&input_sig_wait, &global_lock)) + { + mutex_unlock (&global_lock); + return EINTR; + } + } } avail = qsize (inputq); @@ -1729,6 +1755,23 @@ trivfs_S_io_map (struct trivfs_protid *cred, return EOPNOTSUPP; } +static void +report_sig_start () +{ + sigs_in_progress++; +} + +static void +report_sig_end () +{ + sigs_in_progress--; + if ((sigs_in_progress == 0) && input_sig_wakeup) + { + input_sig_wakeup = 0; + condition_broadcast (&input_sig_wait); + } +} + /* Call all the scheduled async I/O handlers */ static void call_asyncs () @@ -1744,9 +1787,11 @@ call_asyncs () if ((termflags & ICKY_ASYNC) && !(termflags & NO_OWNER)) { + report_sig_start (); mutex_unlock (&global_lock); hurd_sig_post (foreground_id, SIGIO, async_icky_id); mutex_lock (&global_lock); + report_sig_end (); } for (ar = async_requests, prevp = &async_requests; @@ -1778,9 +1823,11 @@ send_signal (int signo) right = ports_get_right (cttyid); mach_port_insert_right (mach_task_self (), right, right, MACH_MSG_TYPE_MAKE_SEND); + report_sig_start (); mutex_unlock (&global_lock); hurd_sig_post (foreground_id, signo, right); mutex_lock (&global_lock); + report_sig_end (); mach_port_deallocate (mach_task_self (), right); } } |