summaryrefslogtreecommitdiff
path: root/libports
diff options
context:
space:
mode:
Diffstat (limited to 'libports')
-rw-r--r--libports/bucket-iterate.c2
-rw-r--r--libports/manage-multithread.c7
-rw-r--r--libports/manage-one-thread.c2
3 files changed, 9 insertions, 2 deletions
diff --git a/libports/bucket-iterate.c b/libports/bucket-iterate.c
index d376e6f8..e439cb19 100644
--- a/libports/bucket-iterate.c
+++ b/libports/bucket-iterate.c
@@ -31,7 +31,7 @@ _ports_bucket_class_iterate (struct port_bucket *bucket,
error_t (*fun)(void *))
{
/* This is obscenely ineffecient. ihash and ports need to cooperate
- more closely to do it effeciently. */
+ more closely to do it efficiently. */
struct item
{
struct item *next;
diff --git a/libports/manage-multithread.c b/libports/manage-multithread.c
index 6ec1e49f..82fa2ac6 100644
--- a/libports/manage-multithread.c
+++ b/libports/manage-multithread.c
@@ -60,6 +60,7 @@ ports_manage_port_operations_multithread (struct port_bucket *bucket,
assert (nreqthreads);
nreqthreads--;
if (nreqthreads == 0)
+ /* No thread would be listening for requests, spawn one. */
spawn = 1;
spin_unlock (&lock);
@@ -150,6 +151,12 @@ ports_manage_port_operations_multithread (struct port_bucket *bucket,
else
{
spin_lock (&lock);
+ if (nreqthreads == 1)
+ {
+ /* No other thread is listening for requests, continue. */
+ spin_unlock (&lock);
+ goto startover;
+ }
nreqthreads--;
totalthreads--;
spin_unlock (&lock);
diff --git a/libports/manage-one-thread.c b/libports/manage-one-thread.c
index 57b8a9a9..4ea740b2 100644
--- a/libports/manage-one-thread.c
+++ b/libports/manage-one-thread.c
@@ -69,7 +69,7 @@ ports_manage_port_operations_one_thread (struct port_bucket *bucket,
}
else
{
- /* No need to check cancel threshhold here, because
+ /* No need to check cancel threshold here, because
in a single threaded server the cancel is always
handled in order. */
status = demuxer (inp, outheadp);