summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael I. Bushnell <mib@gnu.org>1992-10-06 18:31:16 +0000
committerMichael I. Bushnell <mib@gnu.org>1992-10-06 18:31:16 +0000
commitda328cb2c579c5b60725cbba903eff753061e17a (patch)
treec8f6634ad0298e628fa96bbd94f58dd504ef5d55
parent964ab87456cf79f0f5787796c3e8917502d9f9d8 (diff)
Initial revision
-rw-r--r--libthreads/cprocs.c1122
-rw-r--r--libthreads/cthread_internals.h198
-rw-r--r--libthreads/cthreads.c451
-rw-r--r--libthreads/cthreads.h556
-rw-r--r--libthreads/i386/csw.S139
-rw-r--r--libthreads/i386/thread.c114
-rw-r--r--libthreads/malloc.c349
-rw-r--r--libthreads/mig_support.c157
-rw-r--r--libthreads/stack.c382
9 files changed, 3468 insertions, 0 deletions
diff --git a/libthreads/cprocs.c b/libthreads/cprocs.c
new file mode 100644
index 00000000..e721abd2
--- /dev/null
+++ b/libthreads/cprocs.c
@@ -0,0 +1,1122 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cprocs.c,v $
+ * Revision 2.15 92/03/06 14:09:31 rpd
+ * Replaced swtch_pri with yield.
+ * [92/03/06 rpd]
+ *
+ * Revision 2.14 91/08/28 11:19:16 jsb
+ * Fixed the loop in cproc_fork_child that frees cprocs.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.13 91/07/31 18:33:04 dbg
+ * Fix some more bad types. Ints are NOT pointers.
+ *
+ * Fix argument type mismatch in cproc_create.
+ * [91/07/30 17:32:59 dbg]
+ *
+ * Revision 2.12 91/05/14 17:56:11 mrt
+ * Correcting copyright
+ *
+ * Revision 2.11 91/02/14 14:19:26 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:40:50 mrt]
+ *
+ * Revision 2.10 90/11/05 14:36:41 rpd
+ * Added cproc_fork_{prepare,parent,child}.
+ * [90/11/02 rwd]
+ *
+ * Fix for positive stack growth.
+ * [90/11/01 rwd]
+ *
+ * Add spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.9 90/10/12 13:07:12 rpd
+ * Fix type
+ * [90/10/10 15:09:59 rwd]
+ *
+ * Comment code.
+ * [90/10/02 rwd]
+ *
+ * Revision 2.8 90/09/09 14:34:44 rpd
+ * Remove special mutex. Remove thread_calls and debug_mutex
+ * [90/08/24 rwd]
+ * Fix up old call to cthread_msg_busy to new format.
+ * [90/08/22 rwd]
+ *
+ * Revision 2.7 90/08/06 15:09:17 rwd
+ * Fixed arguments to cthread_mach_msg.
+ * [90/06/26 rwd]
+ * Add additional STATISTICS.
+ * [90/06/07 rwd]
+ *
+ * Attempt to reduce number of times a cthread is released to to a
+ * msg_receive by adding min/max instead of single number to
+ * cthread_msg calls.
+ * [90/06/06 rwd]
+ *
+ * Revision 2.6 90/06/02 15:13:36 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:46:16 rpd]
+ *
+ * Revision 2.5 90/05/29 18:40:11 rwd
+ * Don't incr special field until the mutex grab is successful.
+ * [90/05/09 rwd]
+ *
+ * Revision 2.4 90/03/14 21:12:02 rwd
+ * Added WAIT_DEBUG code for deadlock debugging.
+ * [90/03/01 rwd]
+ * Insert cprocs in cproc_list as allocated.
+ * [90/03/01 10:20:16 rwd]
+ *
+ * Revision 2.3 90/01/19 14:36:57 rwd
+ * Make cthread_msg_busy only release new thread if this is still
+ * busy. Ie don't release two on back to back calls.
+ * [90/01/11 rwd]
+ * Add THREAD_CALL code. Add CPROC_ARUN state.
+ * [90/01/03 rwd]
+ * Add new cthread_msg_rpc call
+ * [89/12/20 rwd]
+ * Change cproc_self pointer to top of stack. Now need to change
+ * the stack of the first thread.
+ * [89/12/12 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:13 rwd
+ * Added CPROC_CONDWAIT state to deal with lock held
+ * across mutex_unlock problem.
+ * [89/11/29 rwd]
+ * Changed mutexes to not hand off. MUTEX_EXTRA conditional is
+ * now obsolete.
+ * [89/11/27 rwd]
+ *
+ * Add MUTEX_EXTRA code for extra kernel threads to serve special
+ * mutexes in time of need.
+ * [89/11/25 rwd]
+ * Add MUTEX_SPECIAL and DEBUG_MUTEX code
+ * [89/11/24 rwd]
+ * Changed mutex_lock to mutex_lock_solid. Mutex_lock is now a
+ * macro which tries the spin_lock before making a subroutine call.
+ * Mutex_unlock is now a macro with mutex_unlock_solid for worst case.
+ * [89/11/13 rwd]
+ *
+ * Rewrite most to merge coroutine and thread implementation.
+ * New routines are cthread_set_kernel_limit, cthread_kernel_limit,
+ * cthread_wire, cthread_unwire, and cthread_receive.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:07:10 rwd
+ * Created.
+ *
+ * 11-Apr-89 David Golub (dbg) at Carnegie-Mellon University
+ * Made condition_yield loop break if swtch_pri returns TRUE (in
+ * case we fix it).
+ *
+ * 31-Mar-89 David Golub (dbg) at Carnegie-Mellon University
+ * Change cond_signal, cond_broadcast, and cproc_continue so that
+ * the condition's spin lock is not held while continuing the
+ * process.
+ *
+ * 16-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Changes for stand-alone library to run on pure kernel:
+ * . made IPC_WAIT standard, as calls that are used if IPC_WAIT == 0
+ * vanished a year ago.
+ * . Removed (as much as possible) references to stdio or other U*X
+ * features.
+ *
+ *
+ * 01-Apr-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed condition_clear(c) to acquire c->lock,
+ * to serialize after any threads still doing condition_signal(c).
+ * Suggested by Dan Julin.
+ *
+ * 19-Feb-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Extended the inline scripts to handle spin_unlock() and mutex_unlock().
+ *
+ * 28-Jan-88 David Golub (dbg) at Carnegie Mellon University
+ * Removed thread_data argument from thread_create
+ * and converted to new thread_set_state call.
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added inline expansion for cthread_sp() function.
+ *
+ * 21-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Fixed uninitialized reply_port in cproc_alloc() (found by rds).
+ *
+ * 14-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Tried using return value of swtch() to guide condition_wait().
+ * Performance was worse than using a hybrid spin/yield/block
+ * scheme, so the version using swtch() was commented out.
+ * Disabled IPC_WAIT in released version.
+ *
+ * 13-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added IPC_WAIT option.
+ * If defined, thread synchronization (condition_wait() and
+ * cproc_continue()) are implemented using msg_receive() and
+ * msg_send() instead of thread_suspend() and thread_resume().
+ *
+ * 11-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Moved thread reply port to cproc structure in cthread_internals.h,
+ * because mig calls are made while cproc is idle (no cthread structure).
+ * Changed cproc_switch() and cproc_start (COROUTINE implementation)
+ * to use address of saved context, rather than address of enclosing cproc,
+ * to eliminate dependency on cproc layout.
+ */
+/*
+ * File: cprocs.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Aug, 1987
+ *
+ * Implementation of cprocs (lightweight processes)
+ * and primitive synchronization operations.
+ */
+
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+#include <mach/message.h>
+
+/*
+ * C Threads imports:
+ */
+extern void alloc_stack();
+extern void cproc_switch(); /* cproc context switch */
+extern void cproc_start_wait(); /* cproc idle thread */
+extern vm_offset_t cproc_stack_base(); /* return start of stack */
+extern vm_offset_t stack_init();
+
+/*
+ * Port_entry's are used by cthread_mach_msg to store information
+ * about each port/port_set for which it is managing threads
+ */
+
+typedef struct port_entry {
+ struct port_entry *next; /* next port_entry */
+ mach_port_t port; /* which port/port_set */
+ struct cthread_queue queue; /* queue of runnable threads for
+ this port/port_set */
+ int min; /* minimum number of kernel threads
+ to be used by this port/port_set */
+ int max; /* maximum number of kernel threads
+ to be used by this port/port_set */
+ int held; /* actual number of kernel threads
+ currentlt in use */
+ spin_lock_t lock; /* lock governing all above fields */
+} *port_entry_t;
+
+#define PORT_ENTRY_NULL ((port_entry_t) 0)
+
+/* Available to outside for statistics */
+
+int cthread_wait_stack_size = 8192; /* stack size for idle threads */
+int cthread_max_kernel_threads = 0; /* max kernel threads */
+int cthread_kernel_threads = 0; /* current kernel threads */
+private spin_lock_t n_kern_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+#ifdef STATISTICS
+int cthread_ready = 0; /* currently runnable */
+int cthread_running = 1; /* currently running */
+int cthread_waiting = 0; /* currently waiting */
+int cthread_wired = 0; /* currently wired */
+private spin_lock_t wired_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+int cthread_wait_stacks = 0; /* total cthread waiting stacks */
+int cthread_waiters = 0; /* total of watiers */
+int cthread_wakeup = 0; /* total times woken when starting to
+ block */
+int cthread_blocked = 0; /* total blocked */
+int cthread_rnone = 0; /* total times no cthread available
+ to meet minimum for port_entry */
+int cthread_yields = 0; /* total cthread_yields */
+int cthread_none = 0; /* total idle wakeups w/o runnable */
+int cthread_switches = 0; /* total number of cproc_switches */
+int cthread_no_mutex = 0; /* total number times woken to get
+ mutex and couldn't */
+private spin_lock_t mutex_count_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+#endif STATISTICS
+
+cproc_t cproc_list = NO_CPROC; /* list of all cprocs */
+private cproc_list_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+private int cprocs_started = FALSE; /* initialized? */
+private struct cthread_queue ready = QUEUE_INITIALIZER;
+ /* ready queue */
+private int ready_count = 0; /* number of ready threads on ready
+ queue - number of messages sent */
+private spin_lock_t ready_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+private mach_port_t wait_port = MACH_PORT_NULL;
+ /* port on which idle threads wait */
+private int wait_count = 0; /* number of waiters - messages pending
+ to wake them */
+private struct cthread_queue waiters = QUEUE_INITIALIZER;
+ /* queue of cthreads to run as idle */
+private spin_lock_t waiters_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+private port_entry_t port_list = PORT_ENTRY_NULL;
+ /* master list of port_entries */
+private spin_lock_t port_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above queue */
+private mach_msg_header_t wakeup_msg; /* prebuilt message used by idle
+ threads */
+
+/*
+ * Return current value for max kernel threads
+ * Note: 0 means no limit
+ */
+
+cthread_kernel_limit()
+{
+ return cthread_max_kernel_threads;
+}
+
+/*
+ * Set max number of kernel threads
+ * Note: This will not currently terminate existing threads
+ * over maximum.
+ */
+
+cthread_set_kernel_limit(n)
+ int n;
+{
+ cthread_max_kernel_threads = n;
+}
+
+/*
+ * Wire a cthread to its current kernel thread
+ */
+
+void cthread_wire()
+{
+ register cproc_t p = cproc_self();
+ kern_return_t r;
+
+ /*
+ * A wired thread has a port associated with it for all
+ * of its wait/block cases. We also prebuild a wakeup
+ * message.
+ */
+
+ if (p->wired == MACH_PORT_NULL) {
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &p->wired), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ p->wired, p->wired,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+ p->msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ p->msg.msgh_size = 0; /* initialized in call */
+ p->msg.msgh_remote_port = p->wired;
+ p->msg.msgh_local_port = MACH_PORT_NULL;
+ p->msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ p->msg.msgh_id = 0;
+#ifdef STATISTICS
+ spin_lock(&wired_lock);
+ cthread_wired++;
+ spin_unlock(&wired_lock);
+#endif STATISTICS
+ }
+}
+
+/*
+ * Unwire a cthread. Deallocate its wait port.
+ */
+
+void cthread_unwire()
+{
+ register cproc_t p = cproc_self();
+ kern_return_t r;
+
+ if (p->wired != MACH_PORT_NULL) {
+ MACH_CALL(mach_port_mod_refs(mach_task_self(), p->wired,
+ MACH_PORT_RIGHT_SEND, -1), r);
+ MACH_CALL(mach_port_mod_refs(mach_task_self(), p->wired,
+ MACH_PORT_RIGHT_RECEIVE, -1), r);
+ p->wired = MACH_PORT_NULL;
+#ifdef STATISTICS
+ spin_lock(&wired_lock);
+ cthread_wired--;
+ spin_unlock(&wired_lock);
+#endif STATISTICS
+ }
+}
+
+private cproc_t
+cproc_alloc()
+{
+ register cproc_t p = (cproc_t) malloc(sizeof(struct cproc));
+
+ p->incarnation = NO_CTHREAD;
+ p->reply_port = MACH_PORT_NULL;
+
+ spin_lock_init(&p->lock);
+ p->wired = MACH_PORT_NULL;
+ p->state = CPROC_RUNNING;
+ p->busy = 0;
+ spin_lock(&cproc_list_lock);
+ p->list = cproc_list;
+ cproc_list = p;
+ spin_unlock(&cproc_list_lock);
+
+ return p;
+}
+
+/*
+ * Called by cthread_init to set up initial data structures.
+ */
+
+vm_offset_t
+cproc_init()
+{
+ kern_return_t r;
+
+ cproc_t p = cproc_alloc();
+
+ cthread_kernel_threads = 1;
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &wait_port), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ wait_port, wait_port,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+
+ wakeup_msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ wakeup_msg.msgh_size = 0; /* initialized in call */
+ wakeup_msg.msgh_remote_port = wait_port;
+ wakeup_msg.msgh_local_port = MACH_PORT_NULL;
+ wakeup_msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ wakeup_msg.msgh_id = 0;
+
+ cprocs_started = TRUE;
+
+
+ /*
+ * We pass back the new stack which should be switched to
+ * by crt0. This guarantess correct size and alignment.
+ */
+ return (stack_init(p));
+}
+
+/*
+ * Insert cproc on ready queue. Make sure it is ready for queue by
+ * synching on its lock. Just send message to wired cproc.
+ */
+
+private int cproc_ready(p, preq)
+ register cproc_t p;
+ register int preq;
+{
+ register cproc_t s=cproc_self();
+ kern_return_t r;
+
+ if (p->wired != MACH_PORT_NULL) {
+ r = mach_msg(&p->msg, MACH_SEND_MSG,
+ sizeof p->msg, 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+#ifdef CHECK_STATUS
+ if (r != MACH_MSG_SUCCESS) {
+ mach_error("mach_msg", r);
+ exit(1);
+ }
+#endif CHECK_STATUS
+ return TRUE;
+ }
+ spin_lock(&p->lock); /* is it ready to be queued? It
+ can appear on a queue before
+ being switched from. This lock
+ is released by cproc_switch as
+ its last operation. */
+ if (p->state & CPROC_SWITCHING) {
+ /*
+ * We caught it early on. Just set to RUNNING
+ * and we will save a lot of time.
+ */
+ p->state = (p->state & ~CPROC_SWITCHING) | CPROC_RUNNING;
+ spin_unlock(&p->lock);
+ return TRUE;
+ }
+ spin_unlock(&p->lock);
+
+ spin_lock(&ready_lock);
+
+ if (preq) {
+ cthread_queue_preq(&ready, p);
+ } else {
+ cthread_queue_enq(&ready, p);
+ }
+#ifdef STATISTICS
+ cthread_ready++;
+#endif STATISTICS
+ ready_count++;
+
+ if ((s->state & CPROC_CONDWAIT) && !(s->wired)) {
+ /*
+ * This is an optimiztion. Don't bother waking anyone to grab
+ * this guy off the ready queue since my thread will block
+ * momentarily for the condition wait.
+ */
+
+ spin_unlock(&ready_lock);
+ return TRUE;
+ }
+
+ if ((ready_count > 0) && wait_count) {
+ wait_count--;
+ ready_count--;
+ spin_unlock(&ready_lock);
+ r = mach_msg(&wakeup_msg, MACH_SEND_MSG,
+ sizeof wakeup_msg, 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+#ifdef CHECK_STATUS
+ if (r != MACH_MSG_SUCCESS) {
+ mach_error("mach_msg", r);
+ exit(1);
+ }
+#endif CHECK_STATUS
+ return TRUE;
+ }
+ spin_unlock(&ready_lock);
+ return FALSE;
+}
+
+/*
+ * This is only run on a partial "waiting" stack and called from
+ * cproc_start_wait
+ */
+
+void
+cproc_waiting(p)
+ register cproc_t p;
+{
+ mach_msg_header_t msg;
+ register cproc_t new;
+ kern_return_t r;
+
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_waiting++;
+ cthread_waiters++;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ for (;;) {
+ MACH_CALL(mach_msg(&msg, MACH_RCV_MSG,
+ 0, sizeof msg, wait_port,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL), r);
+ spin_lock(&ready_lock);
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new != NO_CPROC) break;
+ wait_count++;
+ ready_count++;
+#ifdef STATISTICS
+ cthread_none++;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ }
+#ifdef STATISTICS
+ cthread_ready--;
+ cthread_running++;
+ cthread_waiting--;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ spin_lock(&new->lock);
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ spin_lock(&waiters_lock);
+ cthread_queue_enq(&waiters, p);
+ spin_lock(&p->lock);
+ spin_unlock(&waiters_lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+}
+
+/*
+ * Get a waiter with stack
+ *
+ */
+
+cproc_t
+cproc_waiter()
+{
+ register cproc_t waiter;
+
+ spin_lock(&waiters_lock);
+ cthread_queue_deq(&waiters, cproc_t, waiter);
+ spin_unlock(&waiters_lock);
+ if (waiter == NO_CPROC) {
+ vm_address_t base;
+ kern_return_t r;
+#ifdef STATISTICS
+ spin_lock(&waiters_lock);
+ cthread_wait_stacks++;
+ spin_unlock(&waiters_lock);
+#endif STATISTICS
+ waiter = cproc_alloc();
+ MACH_CALL(vm_allocate(mach_task_self(), &base,
+ cthread_wait_stack_size, TRUE), r);
+ waiter->stack_base = base;
+ waiter->stack_size = cthread_wait_stack_size;
+ }
+ return (waiter);
+}
+
+
+/*
+ * Current cproc is blocked so switch to any ready cprocs, or, if
+ * none, go into the wait state.
+ *
+ * You must hold cproc_self()->lock when called.
+ */
+
+cproc_block()
+{
+ register cproc_t waiter, new, p = cproc_self();
+ register int extra;
+
+ if (p->wired != MACH_PORT_NULL) {
+ mach_msg_header_t msg;
+ kern_return_t r;
+
+ spin_unlock(&p->lock);
+ MACH_CALL(mach_msg(&msg, MACH_RCV_MSG,
+ 0, sizeof msg, p->wired,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL), r);
+ return;
+ }
+ p->state = CPROC_SWITCHING;
+ spin_unlock(&p->lock);
+ spin_lock(&ready_lock);
+#ifdef STATISTICS
+ cthread_blocked++;
+#endif STATISTICS
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new) {
+#ifdef STATISTICS
+ cthread_ready--;
+ cthread_switches++;
+#endif STATISTICS
+ ready_count--;
+ spin_unlock(&ready_lock);
+ spin_lock(&p->lock);
+ if (p->state == CPROC_RUNNING) { /* have we been saved */
+ spin_unlock(&p->lock);
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_wakeup++;
+ cthread_switches--;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ cproc_ready(new, 1); /* requeue at head were it was */
+ } else {
+ p->state = CPROC_BLOCKED;
+ spin_lock(&new->lock); /* incase still switching */
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+ }
+ } else {
+ wait_count++;
+#ifdef STATISTICS
+ cthread_running--;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ waiter = cproc_waiter();
+ spin_lock(&p->lock);
+ if (p->state == CPROC_RUNNING) { /* we have been saved */
+ spin_unlock(&p->lock);
+ spin_lock(&ready_lock);
+ wait_count--;
+#ifdef STATISTICS
+ cthread_running++;
+ cthread_wakeup++;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ spin_lock(&waiters_lock);
+ cthread_queue_preq(&waiters, waiter);
+ spin_unlock(&waiters_lock);
+ } else {
+ p->state = CPROC_BLOCKED;
+ spin_lock(&waiter->lock); /* in case still switching */
+ spin_unlock(&waiter->lock);
+ cproc_start_wait(&p->context, waiter,
+ cproc_stack_base(waiter, sizeof(ur_cthread_t *)),
+ &p->lock);
+ }
+ }
+}
+
+/*
+ * Implement C threads using MACH threads.
+ */
+cproc_t
+cproc_create()
+{
+ register cproc_t child = cproc_alloc();
+ register kern_return_t r;
+ extern void cproc_setup();
+ extern void cproc_prepare();
+ extern void cthread_body();
+ thread_t n;
+
+ alloc_stack(child);
+ spin_lock(&n_kern_lock);
+ if (cthread_max_kernel_threads == 0 ||
+ cthread_kernel_threads < cthread_max_kernel_threads) {
+ cthread_kernel_threads++;
+ spin_unlock(&n_kern_lock);
+ MACH_CALL(thread_create(mach_task_self(), &n), r);
+ cproc_setup(child, n, cthread_body); /* machine dependent */
+ MACH_CALL(thread_resume(n), r);
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_running++;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ } else {
+ spin_unlock(&n_kern_lock);
+ child->state = CPROC_BLOCKED;
+ cproc_prepare(child, &child->context,
+ cproc_stack_base(child, 0));
+ cproc_ready(child,0);
+ }
+ return child;
+}
+
+void
+condition_wait(c, m)
+ register condition_t c;
+ mutex_t m;
+{
+ register cproc_t p = cproc_self();
+
+ p->state = CPROC_CONDWAIT | CPROC_SWITCHING;
+
+ spin_lock(&c->lock);
+ cthread_queue_enq(&c->queue, p);
+ spin_unlock(&c->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)c;
+#endif WAIT_DEBUG
+
+ mutex_unlock(m);
+
+ spin_lock(&p->lock);
+ if (p->state & CPROC_SWITCHING) {
+ cproc_block();
+ } else {
+ p->state = CPROC_RUNNING;
+ spin_unlock(&p->lock);
+ }
+
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+
+ /*
+ * Re-acquire the mutex and return.
+ */
+ mutex_lock(m);
+}
+
+void
+cond_signal(c)
+ register condition_t c;
+{
+ register cproc_t p;
+
+ spin_lock(&c->lock);
+ cthread_queue_deq(&c->queue, cproc_t, p);
+ spin_unlock(&c->lock);
+ if (p != NO_CPROC) {
+ cproc_ready(p,0);
+ }
+}
+
+void
+cond_broadcast(c)
+ register condition_t c;
+{
+ register cproc_t p;
+ struct cthread_queue blocked_queue;
+
+ cthread_queue_init(&blocked_queue);
+
+ spin_lock(&c->lock);
+ for (;;) {
+ register int old_state;
+
+ cthread_queue_deq(&c->queue, cproc_t, p);
+ if (p == NO_CPROC)
+ break;
+ cthread_queue_enq(&blocked_queue, p);
+ }
+ spin_unlock(&c->lock);
+
+ for(;;) {
+ cthread_queue_deq(&blocked_queue, cproc_t, p);
+ if (p == NO_CPROC)
+ break;
+ cproc_ready(p,0);
+ }
+}
+
+void
+cthread_yield()
+{
+ register cproc_t new, p = cproc_self();
+
+ if (p->wired != MACH_PORT_NULL) {
+ yield();
+ return;
+ }
+ spin_lock(&ready_lock);
+#ifdef STATISTICS
+ cthread_yields++;
+#endif STATISTICS
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new) {
+ cthread_queue_enq(&ready, p);
+ spin_lock(&p->lock);
+ p->state = CPROC_BLOCKED;
+ spin_unlock(&ready_lock);
+ spin_lock(&new->lock);
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+ } else {
+ spin_unlock(&ready_lock);
+ yield();
+ }
+}
+
+/*
+ * Mutex objects.
+ */
+
+void
+mutex_lock_solid(m)
+ register mutex_t m;
+{
+ register cproc_t p = cproc_self();
+ register int queued;
+ register int tried = 0;
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)m;
+#endif WAIT_DEBUG
+ while (1) {
+ spin_lock(&m->lock);
+ if (cthread_queue_head(&m->queue, cproc_t) == NO_CPROC) {
+ cthread_queue_enq(&m->queue, p);
+ queued = 1;
+ } else {
+ queued = 0;
+ }
+ if (spin_try_lock(&m->held)) {
+ if (queued) cthread_queue_deq(&m->queue, cproc_t, p);
+ spin_unlock(&m->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ return;
+ } else {
+ if (!queued) cthread_queue_enq(&m->queue, p);
+ spin_lock(&p->lock);
+ spin_unlock(&m->lock);
+ cproc_block();
+ if (spin_try_lock(&m->held)) {
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ return;
+ }
+#ifdef STATISTICS
+ spin_lock(&mutex_count_lock);
+ cthread_no_mutex++;
+ spin_unlock(&mutex_count_lock);
+#endif STATISTICS
+ }
+ }
+}
+
+void
+mutex_unlock_solid(m)
+ register mutex_t m;
+{
+ register cproc_t new;
+
+ if (!spin_try_lock(&m->held))
+ return;
+ spin_lock(&m->lock);
+ cthread_queue_deq(&m->queue, cproc_t, new);
+ spin_unlock(&m->held);
+ spin_unlock(&m->lock);
+ if (new) {
+ cproc_ready(new,0);
+ }
+}
+
+/*
+ * Use instead of mach_msg in a multi-threaded server so as not
+ * to tie up excessive kernel threads. This uses a simple linked list for
+ * ports since this should never be more than a few.
+ */
+
+/*
+ * A cthread holds a reference to a port_entry even after it receives a
+ * message. This reference is not released until the thread does a
+ * cthread_msg_busy. This allows the fast case of a single mach_msg
+ * call to occur as often as is possible.
+ */
+
+private port_entry_t get_port_entry(port, min, max)
+ mach_port_t port;
+{
+ register port_entry_t i;
+
+ spin_lock(&port_lock);
+ for(i=port_list;i!=PORT_ENTRY_NULL;i=i->next)
+ if (i->port == port) {
+ spin_unlock(&port_lock);
+ return i;
+ }
+ i = (port_entry_t)malloc(sizeof(struct port_entry));
+ cthread_queue_init(&i->queue);
+ i->port = port;
+ i->next = port_list;
+ port_list = i;
+ i->min = min;
+ i->max = max;
+ i->held = 0;
+ spin_lock_init(&i->lock);
+ spin_unlock(&port_lock);
+ return i;
+}
+
+cthread_msg_busy(port, min, max)
+ mach_port_t port;
+{
+ register port_entry_t port_entry;
+ register cproc_t new, p = cproc_self();
+
+ if (p->busy) {
+ port_entry = get_port_entry(port, min, max);
+ spin_lock(&port_entry->lock);
+ p->busy = 0;
+ if (port_entry->held <= port_entry->min) {
+ cthread_queue_deq(&port_entry->queue, cproc_t, new);
+ if (new != NO_CPROC){
+ spin_unlock(&port_entry->lock);
+ cproc_ready(new,0);
+ } else {
+ port_entry->held--;
+ spin_unlock(&port_entry->lock);
+#ifdef STATISTICS
+ spin_lock(&port_lock);
+ cthread_rnone++;
+ spin_unlock(&port_lock);
+#endif STATISTICS
+ }
+ } else {
+ port_entry->held--;
+ spin_unlock(&port_entry->lock);
+ }
+ }
+
+}
+
+cthread_msg_active(port, min, max)
+mach_port_t port;
+{
+ register cproc_t p = cproc_self();
+ register port_entry_t port_entry;
+
+ if (!p->busy) {
+ port_entry = get_port_entry(port, min, max);
+ if (port_entry == 0) return;
+ spin_lock(&port_entry->lock);
+ if (port_entry->held < port_entry->max) {
+ port_entry->held++;
+ p->busy = (int)port_entry;
+ }
+ spin_unlock(&port_entry->lock);
+ }
+}
+
+mach_msg_return_t
+cthread_mach_msg(header, option,
+ send_size, rcv_size, rcv_name,
+ timeout, notify, min, max)
+ register mach_msg_header_t *header;
+ register mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size;
+ register mach_port_t rcv_name;
+ mach_msg_timeout_t timeout;
+ mach_port_t notify;
+ int min, max;
+{
+ register port_entry_t port_entry;
+ register cproc_t p = cproc_self();
+ register int sent=0;
+ mach_msg_return_t r;
+ port_entry_t op = (port_entry_t)p->busy;
+
+ port_entry = get_port_entry(rcv_name, min, max);
+
+ if (op && (port_entry_t)op != port_entry)
+ cthread_msg_busy(op->port, op->min, op->max);
+ spin_lock(&port_entry->lock);
+ if (!(port_entry == (port_entry_t)p->busy)) {
+ if (port_entry->held >= max) {
+ if (option & MACH_SEND_MSG) {
+ spin_unlock(&port_entry->lock);
+ r = mach_msg(header, option &~ MACH_RCV_MSG,
+ send_size, 0, MACH_PORT_NULL,
+ timeout, notify);
+ if (r != MACH_MSG_SUCCESS) return r;
+ spin_lock(&port_entry->lock);
+ sent=1;
+ }
+ if (port_entry->held >= max) {
+ spin_lock(&p->lock);
+ cthread_queue_preq(&port_entry->queue, p);
+ spin_unlock(&port_entry->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)port_entry;
+#endif WAIT_DEBUG
+ cproc_block();
+ } else {
+ port_entry->held++;
+ spin_unlock(&port_entry->lock);
+ }
+ } else {
+ port_entry->held++;
+ spin_unlock(&port_entry->lock);
+ }
+ } else {
+ spin_unlock(&port_entry->lock);
+ }
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ p->busy = (int)port_entry;
+ if ((option & MACH_SEND_MSG) && !sent) {
+ r = mach_msg(header, option,
+ send_size, rcv_size, rcv_name,
+ timeout, notify);
+ } else {
+ r = mach_msg(header, option &~ MACH_SEND_MSG,
+ 0, rcv_size, rcv_name,
+ timeout, notify);
+ }
+ return r;
+}
+
+cproc_fork_prepare()
+{
+ register cproc_t p = cproc_self();
+
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_COPY);
+ spin_lock(&port_lock);
+ spin_lock(&cproc_list_lock);
+}
+
+cproc_fork_parent()
+{
+ register cproc_t p = cproc_self();
+
+ spin_unlock(&cproc_list_lock);
+ spin_unlock(&port_lock);
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_NONE);
+}
+
+cproc_fork_child()
+{
+ register cproc_t l,p = cproc_self();
+ cproc_t m;
+ register port_entry_t pe;
+ port_entry_t pet;
+ kern_return_t r;
+
+
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_NONE);
+ spin_lock_init(&n_kern_lock);
+ cthread_kernel_threads=0;
+#ifdef STATISTICS
+ cthread_ready = 0;
+ cthread_running = 1;
+ cthread_waiting = 0;
+ cthread_wired = 0;
+ spin_lock_init(&wired_lock);
+ cthread_wait_stacks = 0;
+ cthread_waiters = 0;
+ cthread_wakeup = 0;
+ cthread_blocked = 0;
+ cthread_rnone = 0;
+ cthread_yields = 0;
+ cthread_none = 0;
+ cthread_switches = 0;
+ cthread_no_mutex = 0;
+ spin_lock_init(&mutex_count_lock);
+#endif STATISTICS
+
+ for(l=cproc_list;l!=NO_CPROC;l=m) {
+ m=l->next;
+ if (l!=p)
+ free(l);
+ }
+
+ cproc_list = p;
+ p->next = NO_CPROC;
+ spin_lock_init(&cproc_list_lock);
+ cprocs_started = FALSE;
+ cthread_queue_init(&ready);
+ ready_count = 0;
+ spin_lock_init(&ready_lock);
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &wait_port), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ wait_port, wait_port,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+ wakeup_msg.msgh_remote_port = wait_port;
+ wait_count = 0;
+ cthread_queue_init(&waiters);
+ spin_lock_init(&waiters_lock);
+ for(pe=port_list;pe!=PORT_ENTRY_NULL;pe=pet) {
+ pet = pe->next;
+ free(pe);
+ }
+ port_list = PORT_ENTRY_NULL;
+ spin_lock_init(&port_lock);
+
+ if (p->wired) cthread_wire();
+}
diff --git a/libthreads/cthread_internals.h b/libthreads/cthread_internals.h
new file mode 100644
index 00000000..ed8ce445
--- /dev/null
+++ b/libthreads/cthread_internals.h
@@ -0,0 +1,198 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthread_internals.h,v $
+ * Revision 2.14 92/08/03 18:03:56 jfriedl
+ * Made state element of struct cproc volatile.
+ * [92/08/02 jfriedl]
+ *
+ * Revision 2.13 92/03/06 14:09:24 rpd
+ * Added yield, defined using thread_switch.
+ * [92/03/06 rpd]
+ *
+ * Revision 2.12 92/03/01 00:40:23 rpd
+ * Removed exit declaration. It conflicted with the real thing.
+ * [92/02/29 rpd]
+ *
+ * Revision 2.11 91/08/28 11:19:23 jsb
+ * Fixed MACH_CALL to allow multi-line expressions.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.10 91/07/31 18:33:33 dbg
+ * Protect against redefinition of ASSERT.
+ * [91/07/30 17:33:21 dbg]
+ *
+ * Revision 2.9 91/05/14 17:56:24 mrt
+ * Correcting copyright
+ *
+ * Revision 2.8 91/02/14 14:19:42 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:02 mrt]
+ *
+ * Revision 2.7 90/11/05 14:36:55 rpd
+ * Added spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.6 90/09/09 14:34:51 rpd
+ * Remove special field.
+ * [90/08/24 rwd]
+ *
+ * Revision 2.5 90/06/02 15:13:44 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:52:47 rpd]
+ *
+ * Revision 2.4 90/03/14 21:12:11 rwd
+ * Added waiting_for field for debugging deadlocks.
+ * [90/03/01 rwd]
+ * Added list field to keep a master list of all cprocs.
+ * [90/03/01 rwd]
+ *
+ * Revision 2.3 90/01/19 14:37:08 rwd
+ * Keep track of real thread for use in thread_* substitutes.
+ * Add CPROC_ARUN for about to run and CPROC_HOLD to avoid holding
+ * spin_locks over system calls.
+ * [90/01/03 rwd]
+ * Add busy field to be used by cthread_msg calls to make sure we
+ * have the right number of blocked kernel threads.
+ * [89/12/21 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:28 rwd
+ * Added CPROC_CONDWAIT state
+ * [89/11/28 rwd]
+ * Added on_special field.
+ * [89/11/26 rwd]
+ * Removed MSGOPT conditionals
+ * [89/11/25 rwd]
+ * Removed old debugging code. Add wired port/flag. Add state
+ * for small state machine.
+ * [89/10/30 rwd]
+ * Added CPDEBUG code
+ * [89/10/26 rwd]
+ * Change TRACE to {x;} else.
+ * [89/10/24 rwd]
+ * Rewrote to work for limited number of kernel threads. This is
+ * basically a merge of coroutine and thread. Added
+ * cthread_receivce call for use by servers.
+ * [89/10/23 rwd]
+ *
+ */
+/*
+ * cthread_internals.h
+ *
+ *
+ * Private definitions for the C Threads implementation.
+ *
+ * The cproc structure is used for different implementations
+ * of the basic schedulable units that execute cthreads.
+ *
+ */
+
+
+#include "options.h"
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/thread_switch.h>
+
+#if !defined(__STDC__) && !defined(volatile)
+# ifdef __GNUC__
+# define volatile __volatile__
+# else
+# define volatile /* you lose */
+# endif
+#endif
+
+/*
+ * Low-level thread implementation.
+ * This structure must agree with struct ur_cthread in cthreads.h
+ */
+typedef struct cproc {
+ struct cproc *next; /* for lock, condition, and ready queues */
+ cthread_t incarnation; /* for cthread_self() */
+
+ struct cproc *list; /* for master cproc list */
+#ifdef WAIT_DEBUG
+ volatile char *waiting_for; /* address of mutex/cond waiting for */
+#endif WAIT_DEBUG
+
+ mach_port_t reply_port; /* for mig_get_reply_port() */
+
+ int context;
+ spin_lock_t lock;
+ volatile int state; /* current state */
+#define CPROC_RUNNING 0
+#define CPROC_SWITCHING 1
+#define CPROC_BLOCKED 2
+#define CPROC_CONDWAIT 4
+
+ mach_port_t wired; /* is cthread wired to kernel thread */
+ int busy; /* used with cthread_msg calls */
+
+ mach_msg_header_t msg;
+
+ unsigned int stack_base;
+ unsigned int stack_size;
+} *cproc_t;
+
+#define NO_CPROC ((cproc_t) 0)
+#define cproc_self() ((cproc_t) ur_cthread_self())
+
+/*
+ * C Threads imports:
+ */
+extern char *malloc();
+
+/*
+ * Mach imports:
+ */
+extern void mach_error();
+
+/*
+ * Macro for MACH kernel calls.
+ */
+#ifdef CHECK_STATUS
+#define MACH_CALL(expr, ret) \
+ if (((ret) = (expr)) != KERN_SUCCESS) { \
+ quit(1, "error in %s at %d: %s\n", __FILE__, __LINE__, \
+ mach_error_string(ret)); \
+ } else
+#else CHECK_STATUS
+#define MACH_CALL(expr, ret) (ret) = (expr)
+#endif CHECK_STATUS
+
+#define private static
+#ifndef ASSERT
+#define ASSERT(x)
+#endif
+#define TRACE(x)
+
+/*
+ * What we do to yield the processor:
+ * (This depresses the thread's priority for up to 10ms.)
+ */
+
+#define yield() \
+ (void) thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, 10)
diff --git a/libthreads/cthreads.c b/libthreads/cthreads.c
new file mode 100644
index 00000000..1964c335
--- /dev/null
+++ b/libthreads/cthreads.c
@@ -0,0 +1,451 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.c,v $
+ * Revision 2.11 92/07/20 13:33:37 cmaeda
+ * In cthread_init, do machine dependent initialization if it's defined.
+ * [92/05/11 14:41:08 cmaeda]
+ *
+ * Revision 2.10 91/08/28 11:19:26 jsb
+ * Fixed mig_init initialization in cthread_fork_child.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.9 91/07/31 18:34:23 dbg
+ * Fix bad self-pointer reference.
+ *
+ * Don't declare _setjmp and _longjmp; they are included by
+ * cthreads.h.
+ * [91/07/30 17:33:50 dbg]
+ *
+ * Revision 2.8 91/05/14 17:56:31 mrt
+ * Correcting copyright
+ *
+ * Revision 2.7 91/02/14 14:19:47 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:07 mrt]
+ *
+ * Revision 2.6 90/11/05 14:37:03 rpd
+ * Added cthread_fork_{prepare,parent,child}.
+ * [90/11/02 rwd]
+ *
+ * Add spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.5 90/08/07 14:30:58 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.4 90/06/02 15:13:49 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:56:44 rpd]
+ *
+ * Revision 2.3 90/01/19 14:37:12 rwd
+ * Make cthread_init return pointer to new stack.
+ * [89/12/18 19:17:45 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:37 rwd
+ * Change cproc and cthread counters to globals with better names.
+ * [89/11/02 rwd]
+ *
+ * Revision 2.1 89/08/03 17:09:34 rwd
+ * Created.
+ *
+ *
+ * 31-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread_exit() logic for the case of the main thread,
+ * to fix thread and stack memory leak found by Camelot group.
+ *
+ * 21-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added consistency check in beginning of cthread_body().
+ *
+ * 11-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Removed cthread_port() and cthread_set_port().
+ * Removed port deallocation from cthread_free().
+ * Minor changes to cthread_body(), cthread_exit(), and cthread_done().
+ *
+ * 10-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed call to mig_init() in cthread_init() to pass 1 as argument.
+ *
+ * 31-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added call to mig_init() from cthread_init().
+ */
+/*
+ * File: cthreads.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: July, 1987
+ *
+ * Implementation of fork, join, exit, etc.
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+/*
+ * C Threads imports:
+ */
+extern void cproc_create();
+extern vm_offset_t cproc_init();
+extern void mig_init();
+
+/*
+ * Mach imports:
+ */
+
+/*
+ * C library imports:
+ */
+
+/*
+ * Thread status bits.
+ */
+#define T_MAIN 0x1
+#define T_RETURNED 0x2
+#define T_DETACHED 0x4
+
+#ifdef DEBUG
+int cthread_debug = FALSE;
+#endif DEBUG
+
+private struct cthread_queue cthreads = QUEUE_INITIALIZER;
+private struct mutex cthread_lock = MUTEX_INITIALIZER;
+private struct condition cthread_needed = CONDITION_INITIALIZER;
+private struct condition cthread_idle = CONDITION_INITIALIZER;
+int cthread_cprocs = 0;
+int cthread_cthreads = 0;
+int cthread_max_cprocs = 0;
+
+private cthread_t free_cthreads = NO_CTHREAD; /* free list */
+private spin_lock_t free_lock = SPIN_LOCK_INITIALIZER; /* unlocked */
+
+private struct cthread initial_cthread = { 0 };
+
+private cthread_t
+cthread_alloc(func, arg)
+ cthread_fn_t func;
+ any_t arg;
+{
+ register cthread_t t = NO_CTHREAD;
+
+ if (free_cthreads != NO_CTHREAD) {
+ /*
+ * Don't try for the lock unless
+ * the list is likely to be nonempty.
+ * We can't be sure, though, until we lock it.
+ */
+ spin_lock(&free_lock);
+ t = free_cthreads;
+ if (t != NO_CTHREAD)
+ free_cthreads = t->next;
+ spin_unlock(&free_lock);
+ }
+ if (t == NO_CTHREAD) {
+ /*
+ * The free list was empty.
+ * We may have only found this out after
+ * locking it, which is why this isn't an
+ * "else" branch of the previous statement.
+ */
+ t = (cthread_t) malloc(sizeof(struct cthread));
+ }
+ *t = initial_cthread;
+ t->func = func;
+ t->arg = arg;
+ return t;
+}
+
+private void
+cthread_free(t)
+ register cthread_t t;
+{
+ spin_lock(&free_lock);
+ t->next = free_cthreads;
+ free_cthreads = t;
+ spin_unlock(&free_lock);
+}
+
+int
+cthread_init()
+{
+ static int cthreads_started = FALSE;
+ register cproc_t p;
+ register cthread_t t;
+ vm_offset_t stack;
+
+ if (cthreads_started)
+ return 0;
+ stack = cproc_init();
+ cthread_cprocs = 1;
+ t = cthread_alloc((cthread_fn_t) 0, (any_t) 0);
+
+#ifdef cthread_md_init
+ cthread_md_init();
+#endif
+
+ cthread_cthreads = 1;
+ t->state |= T_MAIN;
+ cthread_set_name(t, "main");
+
+ /* cproc_self() doesn't work yet, because
+ we haven't yet switched to the new stack. */
+
+ p = *(cproc_t *)&ur_cthread_ptr(stack);
+ p->incarnation = t;
+ mig_init(p); /* enable multi-threaded mig interfaces */
+
+ cthreads_started = TRUE;
+ return stack;
+}
+
+/*
+ * Used for automatic initialization by crt0.
+ * Cast needed since too many C compilers choke on the type void (*)().
+ */
+int (*_cthread_init_routine)() = (int (*)()) cthread_init;
+
+/*
+ * Procedure invoked at the base of each cthread.
+ */
+void
+cthread_body(self)
+ cproc_t self;
+{
+ register cthread_t t;
+
+ ASSERT(cproc_self() == self);
+ TRACE(printf("[idle] cthread_body(%x)\n", self));
+ mutex_lock(&cthread_lock);
+ for (;;) {
+ /*
+ * Dequeue a thread invocation request.
+ */
+ cthread_queue_deq(&cthreads, cthread_t, t);
+ if (t != NO_CTHREAD) {
+ /*
+ * We have a thread to execute.
+ */
+ mutex_unlock(&cthread_lock);
+ cthread_assoc(self, t); /* assume thread's identity */
+ if (_setjmp(t->catch) == 0) { /* catch for cthread_exit() */
+ /*
+ * Execute the fork request.
+ */
+ t->result = (*(t->func))(t->arg);
+ }
+ /*
+ * Return result from thread.
+ */
+ TRACE(printf("[%s] done()\n", cthread_name(t)));
+ mutex_lock(&t->lock);
+ if (t->state & T_DETACHED) {
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ } else {
+ t->state |= T_RETURNED;
+ mutex_unlock(&t->lock);
+ condition_signal(&t->done);
+ }
+ cthread_assoc(self, NO_CTHREAD);
+ mutex_lock(&cthread_lock);
+ cthread_cthreads -= 1;
+ } else {
+ /*
+ * Queue is empty.
+ * Signal that we're idle in case the main thread
+ * is waiting to exit, then wait for reincarnation.
+ */
+ condition_signal(&cthread_idle);
+ condition_wait(&cthread_needed, &cthread_lock);
+ }
+ }
+}
+
+cthread_t
+cthread_fork(func, arg)
+ cthread_fn_t func;
+ any_t arg;
+{
+ register cthread_t t;
+
+ TRACE(printf("[%s] fork()\n", cthread_name(cthread_self())));
+ mutex_lock(&cthread_lock);
+ t = cthread_alloc(func, arg);
+ cthread_queue_enq(&cthreads, t);
+ if (++cthread_cthreads > cthread_cprocs && (cthread_max_cprocs == 0 || cthread_cprocs < cthread_max_cprocs)) {
+ cthread_cprocs += 1;
+ cproc_create();
+ }
+ mutex_unlock(&cthread_lock);
+ condition_signal(&cthread_needed);
+ return t;
+}
+
+void
+cthread_detach(t)
+ cthread_t t;
+{
+ TRACE(printf("[%s] detach(%s)\n", cthread_name(cthread_self()), cthread_name(t)));
+ mutex_lock(&t->lock);
+ if (t->state & T_RETURNED) {
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ } else {
+ t->state |= T_DETACHED;
+ mutex_unlock(&t->lock);
+ }
+}
+
+any_t
+cthread_join(t)
+ cthread_t t;
+{
+ any_t result;
+
+ TRACE(printf("[%s] join(%s)\n", cthread_name(cthread_self()), cthread_name(t)));
+ mutex_lock(&t->lock);
+ ASSERT(! (t->state & T_DETACHED));
+ while (! (t->state & T_RETURNED))
+ condition_wait(&t->done, &t->lock);
+ result = t->result;
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ return result;
+}
+
+void
+cthread_exit(result)
+ any_t result;
+{
+ register cthread_t t = cthread_self();
+
+ TRACE(printf("[%s] exit()\n", cthread_name(t)));
+ t->result = result;
+ if (t->state & T_MAIN) {
+ mutex_lock(&cthread_lock);
+ while (cthread_cthreads > 1)
+ condition_wait(&cthread_idle, &cthread_lock);
+ mutex_unlock(&cthread_lock);
+ exit((int) result);
+ } else {
+ _longjmp(t->catch, TRUE);
+ }
+}
+
+/*
+ * Used for automatic finalization by crt0. Cast needed since too many C
+ * compilers choke on the type void (*)().
+ */
+int (*_cthread_exit_routine)() = (int (*)()) cthread_exit;
+
+void
+cthread_set_name(t, name)
+ cthread_t t;
+ char *name;
+{
+ t->name = name;
+}
+
+char *
+cthread_name(t)
+ cthread_t t;
+{
+ return (t == NO_CTHREAD
+ ? "idle"
+ : (t->name == 0 ? "?" : t->name));
+}
+
+int
+cthread_limit()
+{
+ return cthread_max_cprocs;
+}
+
+void
+cthread_set_limit(n)
+ int n;
+{
+ cthread_max_cprocs = n;
+}
+
+int
+cthread_count()
+{
+ return cthread_cthreads;
+}
+
+cthread_fork_prepare()
+{
+ spin_lock(&free_lock);
+ mutex_lock(&cthread_lock);
+ malloc_fork_prepare();
+ cproc_fork_prepare();
+}
+
+cthread_fork_parent()
+{
+ cproc_fork_parent();
+ malloc_fork_parent();
+ mutex_unlock(&cthread_lock);
+ spin_unlock(&free_lock);
+}
+
+cthread_fork_child()
+{
+ cthread_t t;
+ cproc_t p;
+
+ cproc_fork_child();
+ malloc_fork_child();
+ mutex_unlock(&cthread_lock);
+ spin_unlock(&free_lock);
+ condition_init(&cthread_needed);
+ condition_init(&cthread_idle);
+
+ cthread_max_cprocs = 0;
+
+ stack_fork_child();
+
+ while (TRUE) { /* Free cthread runnable list */
+ cthread_queue_deq(&cthreads, cthread_t, t);
+ if (t == NO_CTHREAD) break;
+ free((char *) t);
+ }
+
+ while (free_cthreads != NO_CTHREAD) { /* Free cthread free list */
+ t = free_cthreads;
+ free_cthreads = free_cthreads->next;
+ free((char *) t);
+ }
+
+ cthread_cprocs = 1;
+ t = cthread_self();
+ cthread_cthreads = 1;
+ t->state |= T_MAIN;
+ cthread_set_name(t, "main");
+
+ p = cproc_self();
+ p->incarnation = t;
+ mig_init(p); /* enable multi-threaded mig interfaces */
+}
diff --git a/libthreads/cthreads.h b/libthreads/cthreads.h
new file mode 100644
index 00000000..d5fbd401
--- /dev/null
+++ b/libthreads/cthreads.h
@@ -0,0 +1,556 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.h,v $
+ * Revision 2.12 92/05/22 18:38:36 jfriedl
+ * From Mike Kupfer <kupfer@sprite.Berkeley.EDU>:
+ * Add declaration for cthread_wire().
+ * Merge in Jonathan Chew's changes for thread-local data.
+ * Use MACRO_BEGIN and MACRO_END.
+ *
+ * Revision 1.8 91/03/25 14:14:49 jjc
+ * For compatibility with cthread_data:
+ * 1) Added private_data field to cthread structure
+ * for use by POSIX thread specific data routines.
+ * 2) Conditionalized old data field used by cthread_data
+ * under CTHREAD_DATA for binary compatibility.
+ * 3) Changed macros, cthread_set_data and cthread_data,
+ * into routines which use the POSIX routines for
+ * source compatibility.
+ * Also, conditionalized under CTHREAD_DATA.
+ * [91/03/18 jjc]
+ * Added support for multiplexing the thread specific global
+ * variable, cthread_data, using the POSIX threads interface
+ * for thread private data.
+ * [91/03/14 jjc]
+ *
+ * Revision 2.11 91/08/03 18:20:15 jsb
+ * Removed the infamous line 122.
+ * [91/08/01 22:40:24 jsb]
+ *
+ * Revision 2.10 91/07/31 18:35:42 dbg
+ * Fix the standard-C conditional: it's __STDC__.
+ *
+ * Allow for macro-redefinition of cthread_sp, spin_try_lock,
+ * spin_unlock (from machine/cthreads.h).
+ * [91/07/30 17:34:28 dbg]
+ *
+ * Revision 2.9 91/05/14 17:56:42 mrt
+ * Correcting copyright
+ *
+ * Revision 2.8 91/02/14 14:19:52 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:15 mrt]
+ *
+ * Revision 2.7 90/11/05 14:37:12 rpd
+ * Include machine/cthreads.h. Added spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.6 90/10/12 13:07:24 rpd
+ * Channge to allow for positive stack growth.
+ * [90/10/10 rwd]
+ *
+ * Revision 2.5 90/09/09 14:34:56 rpd
+ * Remove mutex_special and debug_mutex.
+ * [90/08/24 rwd]
+ *
+ * Revision 2.4 90/08/07 14:31:14 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.3 90/01/19 14:37:18 rwd
+ * Add back pointer to cthread structure.
+ * [90/01/03 rwd]
+ * Change definition of cthread_init and change ur_cthread_self macro
+ * to reflect movement of self pointer on stack.
+ * [89/12/18 19:18:34 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:49 rwd
+ * Change spin_try_lock to int.
+ * [89/11/30 rwd]
+ * Changed mutex macros to deal with special mutexs
+ * [89/11/26 rwd]
+ * Make mutex_{set,clear}_special routines instead of macros.
+ * [89/11/25 rwd]
+ * Added mutex_special to specify a need to context switch on this
+ * mutex.
+ * [89/11/21 rwd]
+ *
+ * Made mutex_lock a macro trying to grab the spin_lock first.
+ * [89/11/13 rwd]
+ * Removed conditionals. Mutexes are more like conditions now.
+ * Changed for limited kernel thread version.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:09:40 rwd
+ * Created.
+ *
+ *
+ * 28-Oct-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Implemented spin_lock() as test and test-and-set logic
+ * (using mutex_try_lock()) in sync.c. Changed ((char *) 0)
+ * to 0, at Mike Jones's suggestion, and turned on ANSI-style
+ * declarations in either C++ or _STDC_.
+ *
+ * 29-Sep-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed NULL to ((char *) 0) to avoid dependency on <stdio.h>,
+ * at Alessandro Forin's suggestion.
+ *
+ * 08-Sep-88 Alessandro Forin (af) at Carnegie Mellon University
+ * Changed queue_t to cthread_queue_t and string_t to char *
+ * to avoid conflicts.
+ *
+ * 01-Apr-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed compound statement macros to use the
+ * do { ... } while (0) trick, so that they work
+ * in all statement contexts.
+ *
+ * 19-Feb-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Made spin_unlock() and mutex_unlock() into procedure calls
+ * rather than macros, so that even smart compilers can't reorder
+ * the clearing of the lock. Suggested by Jeff Eppinger.
+ * Removed the now empty <machine>/cthreads.h.
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread_self() to mask the current SP to find
+ * the self pointer stored at the base of the stack.
+ *
+ * 22-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Fixed bugs in mutex_set_name and condition_set_name
+ * due to bad choice of macro formal parameter name.
+ *
+ * 21-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Moved #include <machine/cthreads.h> to avoid referring
+ * to types before they are declared (required by C++).
+ *
+ * 9-Jul-87 Michael Jones (mbj) at Carnegie Mellon University
+ * Added conditional type declarations for C++.
+ * Added _cthread_init_routine and _cthread_exit_routine variables
+ * for automatic initialization and finalization by crt0.
+ */
+/*
+ * File: cthreads.h
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Jul, 1987
+ *
+ * Definitions for the C Threads package.
+ *
+ */
+
+
+#ifndef _CTHREADS_
+#define _CTHREADS_ 1
+
+#include <machine/cthreads.h>
+
+#if c_plusplus || __STDC__
+
+#ifndef C_ARG_DECLS
+#define C_ARG_DECLS(arglist) arglist
+#endif /* not C_ARG_DECLS */
+
+typedef void *any_t;
+
+#else /* not (c_plusplus || __STDC__) */
+
+#ifndef C_ARG_DECLS
+#define C_ARG_DECLS(arglist) ()
+#endif /* not C_ARG_DECLS */
+
+typedef char *any_t;
+
+#endif /* not (c_plusplus || __STDC__) */
+
+#include <mach/mach.h>
+#include <mach/machine/vm_param.h>
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif /* TRUE */
+
+#ifndef MACRO_BEGIN
+
+#ifdef lint
+int NEVER;
+#else lint
+#define NEVER 0
+#endif lint
+
+#define MACRO_BEGIN do {
+#define MACRO_END } while (NEVER)
+
+#endif MACRO_BEGIN
+
+/*
+ * C Threads package initialization.
+ */
+
+extern int cthread_init();
+extern any_t calloc C_ARG_DECLS((unsigned n, unsigned size));
+
+/*
+ * Queues.
+ */
+typedef struct cthread_queue {
+ struct cthread_queue_item *head;
+ struct cthread_queue_item *tail;
+} *cthread_queue_t;
+
+typedef struct cthread_queue_item {
+ struct cthread_queue_item *next;
+} *cthread_queue_item_t;
+
+#define NO_QUEUE_ITEM ((cthread_queue_item_t) 0)
+
+#define QUEUE_INITIALIZER { NO_QUEUE_ITEM, NO_QUEUE_ITEM }
+
+#define cthread_queue_alloc() ((cthread_queue_t) calloc(1, sizeof(struct cthread_queue)))
+#define cthread_queue_init(q) ((q)->head = (q)->tail = 0)
+#define cthread_queue_free(q) free((any_t) (q))
+
+#define cthread_queue_enq(q, x) \
+ MACRO_BEGIN \
+ (x)->next = 0; \
+ if ((q)->tail == 0) \
+ (q)->head = (cthread_queue_item_t) (x); \
+ else \
+ (q)->tail->next = (cthread_queue_item_t) (x); \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_preq(q, x) \
+ MACRO_BEGIN \
+ if ((q)->tail == 0) \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ ((cthread_queue_item_t) (x))->next = (q)->head; \
+ (q)->head = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_head(q, t) ((t) ((q)->head))
+
+#define cthread_queue_deq(q, t, x) \
+ MACRO_BEGIN \
+ if (((x) = (t) ((q)->head)) != 0 && \
+ ((q)->head = (cthread_queue_item_t) ((x)->next)) == 0) \
+ (q)->tail = 0; \
+ MACRO_END
+
+#define cthread_queue_map(q, t, f) \
+ MACRO_BEGIN \
+ register cthread_queue_item_t x, next; \
+ for (x = (cthread_queue_item_t) ((q)->head); x != 0; x = next) { \
+ next = x->next; \
+ (*(f))((t) x); \
+ } \
+ MACRO_END
+
+/*
+ * Spin locks.
+ */
+extern void
+spin_lock_solid C_ARG_DECLS((spin_lock_t *p));
+
+#ifndef spin_unlock
+extern void
+spin_unlock C_ARG_DECLS((spin_lock_t *p));
+#endif
+
+#ifndef spin_try_lock
+extern int
+spin_try_lock C_ARG_DECLS((spin_lock_t *p));
+#endif
+
+#define spin_lock(p) if (!spin_try_lock(p)) spin_lock_solid(p); else
+
+/*
+ * Mutex objects.
+ */
+typedef struct mutex {
+ spin_lock_t lock;
+ char *name;
+ struct cthread_queue queue;
+ spin_lock_t held;
+} *mutex_t;
+
+#define MUTEX_INITIALIZER { SPIN_LOCK_INITIALIZER, 0, QUEUE_INITIALIZER, SPIN_LOCK_INITIALIZER}
+
+#define mutex_alloc() ((mutex_t) calloc(1, sizeof(struct mutex)))
+#define mutex_init(m) \
+ MACRO_BEGIN \
+ spin_lock_init(&(m)->lock); \
+ cthread_queue_init(&(m)->queue); \
+ spin_lock_init(&(m)->held); \
+ MACRO_END
+#define mutex_set_name(m, x) ((m)->name = (x))
+#define mutex_name(m) ((m)->name != 0 ? (m)->name : "?")
+#define mutex_clear(m) /* nop */???
+#define mutex_free(m) free((any_t) (m))
+
+extern void
+mutex_lock_solid C_ARG_DECLS((mutex_t m)); /* blocking */
+
+extern void
+mutex_unlock_solid C_ARG_DECLS((mutex_t m));
+
+#define mutex_try_lock(m) spin_try_lock(&(m)->held)
+#define mutex_lock(m) \
+ MACRO_BEGIN \
+ if (!spin_try_lock(&(m)->held)) { \
+ mutex_lock_solid(m); \
+ } \
+ MACRO_END
+#define mutex_unlock(m) \
+ MACRO_BEGIN \
+ if (spin_unlock(&(m)->held), \
+ cthread_queue_head(&(m)->queue, int) != 0) { \
+ mutex_unlock_solid(m); \
+ } \
+ MACRO_END
+
+/*
+ * Condition variables.
+ */
+typedef struct condition {
+ spin_lock_t lock;
+ struct cthread_queue queue;
+ char *name;
+} *condition_t;
+
+#define CONDITION_INITIALIZER { SPIN_LOCK_INITIALIZER, QUEUE_INITIALIZER, 0 }
+
+#define condition_alloc() ((condition_t) calloc(1, sizeof(struct condition)))
+#define condition_init(c) \
+ MACRO_BEGIN \
+ spin_lock_init(&(c)->lock); \
+ cthread_queue_init(&(c)->queue); \
+ MACRO_END
+#define condition_set_name(c, x) ((c)->name = (x))
+#define condition_name(c) ((c)->name != 0 ? (c)->name : "?")
+#define condition_clear(c) \
+ MACRO_BEGIN \
+ condition_broadcast(c); \
+ spin_lock(&(c)->lock); \
+ MACRO_END
+#define condition_free(c) \
+ MACRO_BEGIN \
+ condition_clear(c); \
+ free((any_t) (c)); \
+ MACRO_END
+
+#define condition_signal(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head) { \
+ cond_signal(c); \
+ } \
+ MACRO_END
+
+#define condition_broadcast(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head) { \
+ cond_broadcast(c); \
+ } \
+ MACRO_END
+
+extern void
+cond_signal C_ARG_DECLS((condition_t c));
+
+extern void
+cond_broadcast C_ARG_DECLS((condition_t c));
+
+extern void
+condition_wait C_ARG_DECLS((condition_t c, mutex_t m));
+
+/*
+ * Threads.
+ */
+
+typedef any_t (*cthread_fn_t) C_ARG_DECLS((any_t arg));
+
+#include <setjmp.h>
+
+typedef struct cthread {
+ struct cthread *next;
+ struct mutex lock;
+ struct condition done;
+ int state;
+ jmp_buf catch;
+ cthread_fn_t func;
+ any_t arg;
+ any_t result;
+ char *name;
+#ifdef CTHREAD_DATA
+ any_t data;
+#endif CTHREAD_DATA
+ any_t private_data;
+ struct ur_cthread *ur;
+} *cthread_t;
+
+#define NO_CTHREAD ((cthread_t) 0)
+
+extern cthread_t
+cthread_fork C_ARG_DECLS((cthread_fn_t func, any_t arg));
+
+extern void
+cthread_detach C_ARG_DECLS((cthread_t t));
+
+extern any_t
+cthread_join C_ARG_DECLS((cthread_t t));
+
+extern void
+cthread_yield();
+
+extern void
+cthread_exit C_ARG_DECLS((any_t result));
+
+/*
+ * This structure must agree with struct cproc in cthread_internals.h
+ */
+typedef struct ur_cthread {
+ struct ur_cthread *next;
+ cthread_t incarnation;
+} *ur_cthread_t;
+
+#ifndef cthread_sp
+extern int
+cthread_sp();
+#endif
+
+extern int cthread_stack_mask;
+
+#ifdef STACK_GROWTH_UP
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ((sp) & cthread_stack_mask))
+#else STACK_GROWTH_UP
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ( ((sp) | cthread_stack_mask) + 1 \
+ - sizeof(ur_cthread_t *)) )
+#endif STACK_GROWTH_UP
+
+#define ur_cthread_self() (ur_cthread_ptr(cthread_sp()))
+
+#define cthread_assoc(id, t) ((((ur_cthread_t) (id))->incarnation = (t)), \
+ ((t) ? ((t)->ur = (ur_cthread_t)(id)) : 0))
+#define cthread_self() (ur_cthread_self()->incarnation)
+
+extern void
+cthread_set_name C_ARG_DECLS((cthread_t t, char *name));
+
+extern char *
+cthread_name C_ARG_DECLS((cthread_t t));
+
+extern int
+cthread_count();
+
+extern void
+cthread_set_limit C_ARG_DECLS((int n));
+
+extern int
+cthread_limit();
+
+extern void
+cthread_wire C_ARG_DECLS((void));
+
+#ifdef CTHREAD_DATA
+/*
+ * Set or get thread specific "global" variable
+ *
+ * The thread given must be the calling thread (ie. thread_self).
+ * XXX This is for compatibility with the old cthread_data. XXX
+ */
+extern int
+cthread_set_data C_ARG_DECLS((cthread_t t, any_t x));
+
+extern any_t
+cthread_data C_ARG_DECLS((cthread_t t));
+#endif CTHREAD_DATA
+
+/*
+ * Support for POSIX thread specific data
+ *
+ * Multiplexes a thread specific "global" variable
+ * into many thread specific "global" variables.
+ */
+#define CTHREAD_DATA_VALUE_NULL (any_t)0
+#define CTHREAD_KEY_INVALID (cthread_key_t)-1
+
+typedef int cthread_key_t;
+
+/*
+ * Create key to private data visible to all threads in task.
+ * Different threads may use same key, but the values bound to the key are
+ * maintained on a thread specific basis.
+ */
+extern int
+cthread_keycreate C_ARG_DECLS((cthread_key_t *key));
+
+/*
+ * Get value currently bound to key for calling thread
+ */
+extern int
+cthread_getspecific C_ARG_DECLS((cthread_key_t key, any_t *value));
+
+/*
+ * Bind value to given key for calling thread
+ */
+extern int
+cthread_setspecific C_ARG_DECLS((cthread_key_t key, any_t value));
+
+/*
+ * Debugging support.
+ */
+#ifdef DEBUG
+
+#ifndef ASSERT
+/*
+ * Assertion macro, similar to <assert.h>
+ */
+#include <stdio.h>
+#define ASSERT(p) \
+ MACRO_BEGIN \
+ if (!(p)) { \
+ fprintf(stderr, \
+ "File %s, line %d: assertion p failed.\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ MACRO_END
+
+#endif ASSERT
+
+#define SHOULDNT_HAPPEN 0
+
+extern int cthread_debug;
+
+#else DEBUG
+
+#ifndef ASSERT
+#define ASSERT(p)
+#endif ASSERT
+
+#endif DEBUG
+
+#endif _CTHREADS_
diff --git a/libthreads/i386/csw.S b/libthreads/i386/csw.S
new file mode 100644
index 00000000..efc739f0
--- /dev/null
+++ b/libthreads/i386/csw.S
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: csw.s,v $
+ * Revision 2.7 91/07/31 18:36:32 dbg
+ * Fix for ANSI C preprocessor.
+ * [91/07/30 17:35:16 dbg]
+ *
+ * Revision 2.6 91/05/14 17:56:56 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/05/08 13:35:49 dbg
+ * Unlock lock with a locked instruction (xchg).
+ * [91/03/20 dbg]
+ *
+ * Revision 2.4 91/02/14 14:20:02 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:15:27 mrt]
+ *
+ * Revision 2.3 91/01/08 16:46:20 rpd
+ * Don't use Times - horta doesn't like it for some reason.
+ * [91/01/06 rpd]
+ *
+ * Revision 2.2 90/05/03 15:54:37 dbg
+ * Created.
+ * [90/02/05 dbg]
+ *
+ */
+#include <i386/asm.h>
+
+/*
+ * Suspend the current thread and resume the next one.
+ *
+ * void cproc_switch(int *cur, int *next, int *lock)
+ */
+ENTRY(cproc_switch)
+ pushl %ebp / save ebp
+ movl %esp,%ebp / set frame pointer to get arguments
+ pushl %ebx / save ebx
+ pushl %esi / esi
+ pushl %edi / edi
+ movl B_ARG0,%eax / get cur
+ movl %esp,(%eax) / save current esp
+ movl B_ARG2,%edx / get address of lock before switching
+ / stacks
+ movl B_ARG1,%eax / get next
+ movl (%eax),%esp / get new stack pointer
+ xorl %eax,%eax / unlock
+ xchgl %eax,(%edx) / the lock - now old thread can run
+
+ popl %edi / restore di
+ popl %esi / si
+ popl %ebx / bx
+ popl %ebp / and bp (don`t use "leave" - bp
+ / still points to old stack)
+ ret
+
+/*
+ * Create a new stack frame for a 'waiting' thread,
+ * save current thread's frame, and switch to waiting thread.
+ *
+ * void cproc_start_wait(int *cur,
+ * cproc_t child,
+ * int stackp,
+ * int *lock)
+ */
+ENTRY(cproc_start_wait)
+ pushl %ebp / save ebp
+ movl %esp,%ebp / set frame pointer
+ pushl %ebx / save ebx
+ pushl %esi / esi
+ pushl %edi / edi
+ movl B_ARG0,%eax / get cur
+ movl %esp,(%eax) / save current esp
+ movl B_ARG1,%eax / get child thread
+ movl B_ARG3,%edx / point to lock before switching stack
+ movl B_ARG2,%esp / get new stack
+ pushl %eax / push child thread as argument
+ movl $0,%ebp / (clear frame pointer)
+ xorl %eax,%eax / unlock
+ xchgl %eax,(%edx) / the lock - now old thread can run
+ call _cproc_waiting / call cproc_waiting
+ /*NOTREACHED*/
+
+/*
+ * Set up a thread's stack so that when cproc_switch switches to
+ * it, it will start up as if it called
+ * cproc_body(child)
+ *
+ * void cproc_prepare(cproc_t child, int *context, int stack)
+ */
+ENTRY(cproc_prepare)
+ pushl %ebp / save ebp
+ movl %esp,%ebp / set frame pointer
+ movl B_ARG2,%edx / get child`s stack
+ subl $28,%edx
+ / make room for context:
+ / 0 saved edi ()
+ / 4 saved esi ()
+ / 8 saved ebx ()
+ / 12 saved ebp ()
+ / 16 return PC from cproc_switch
+ / 20 return PC from cthread_body
+ / 24 argument to cthread_body
+ movl $0,12(%edx) / clear frame pointer
+ movl $_cthread_body,16(%edx)
+ / resume at cthread_body
+ movl $0,20(%edx) / fake return address from cthread_body
+ movl B_ARG0,%ecx / get child thread pointer
+ movl %ecx,24(%edx) / set as argument to cthread_body
+ movl B_ARG1,%ecx / get pointer to context
+ movl %edx,(%ecx) / save context
+ leave
+ ret
+
diff --git a/libthreads/i386/thread.c b/libthreads/i386/thread.c
new file mode 100644
index 00000000..155146b2
--- /dev/null
+++ b/libthreads/i386/thread.c
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: thread.c,v $
+ * Revision 2.6 91/07/31 18:37:07 dbg
+ * Undefine cthread_sp macro around function definition.
+ * [91/07/30 17:36:23 dbg]
+ *
+ * Revision 2.5 91/05/14 17:57:27 mrt
+ * Correcting copyright
+ *
+ * Revision 2.4 91/02/14 14:20:21 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:20:10 mrt]
+ *
+ * Revision 2.3 90/06/02 15:13:53 rpd
+ * Added definition of cthread_sp.
+ * [90/06/02 rpd]
+ *
+ * Revision 2.2 90/05/03 15:55:03 dbg
+ * Created (from 68020 version).
+ * [90/02/05 dbg]
+ *
+ */
+/*
+ * i386/thread.c
+ *
+ */
+
+#ifndef lint
+static char rcs_id[] = "$Header: cvs-sans-libpthread/hurd/libthreads/i386/thread.c,v 1.1 1992/10/06 18:31:16 mib Exp $";
+#endif not lint
+
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+
+#include <mach/mach.h>
+
+/*
+ * C library imports:
+ */
+extern bzero();
+
+/*
+ * Set up the initial state of a MACH thread
+ * so that it will invoke cthread_body(child)
+ * when it is resumed.
+ */
+void
+cproc_setup(child, thread, routine)
+ register cproc_t child;
+ int thread;
+ int routine;
+{
+ register int *top = (int *) (child->stack_base + child->stack_size);
+ struct i386_thread_state state;
+ register struct i386_thread_state *ts = &state;
+ kern_return_t r;
+ unsigned int count;
+
+ /*
+ * Set up i386 call frame and registers.
+ * Read registers first to get correct segment values.
+ */
+ count = i386_THREAD_STATE_COUNT;
+ MACH_CALL(thread_get_state(thread,i386_THREAD_STATE,(thread_state_t) &state,&count),r);
+
+ ts->eip = routine;
+ *--top = (int) child; /* argument to function */
+ *--top = 0; /* fake return address */
+ ts->uesp = (int) top; /* set stack pointer */
+ ts->ebp = 0; /* clear frame pointer */
+
+ MACH_CALL(thread_set_state(thread,i386_THREAD_STATE,(thread_state_t) &state,i386_THREAD_STATE_COUNT),r);
+}
+
+#ifdef cthread_sp
+#undef cthread_sp
+#endif
+
+int
+cthread_sp()
+{
+ int x;
+
+ return (int) &x;
+}
+
diff --git a/libthreads/malloc.c b/libthreads/malloc.c
new file mode 100644
index 00000000..b6a31c80
--- /dev/null
+++ b/libthreads/malloc.c
@@ -0,0 +1,349 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: malloc.c,v $
+ * Revision 2.7 91/05/14 17:57:34 mrt
+ * Correcting copyright
+ *
+ * Revision 2.6 91/02/14 14:20:26 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:21 mrt]
+ *
+ * Revision 2.5 90/11/05 14:37:33 rpd
+ * Added malloc_fork* code.
+ * [90/11/02 rwd]
+ *
+ * Add spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.4 90/08/07 14:31:28 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.3 90/06/02 15:14:00 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:56:57 rpd]
+ *
+ * Revision 2.2 89/12/08 19:53:59 rwd
+ * Removed conditionals.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:09:46 rwd
+ * Created.
+ *
+ *
+ * 13-Sep-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed realloc() to copy min(old size, new size) bytes.
+ * Bug found by Mike Kupfer at Olivetti.
+ */
+/*
+ * File: malloc.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: July, 1988
+ *
+ * Memory allocator for use with multiple threads.
+ */
+
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+/*
+ * C library imports:
+ */
+extern bcopy();
+
+/*
+ * Structure of memory block header.
+ * When free, next points to next block on free list.
+ * When allocated, fl points to free list.
+ * Size of header is 4 bytes, so minimum usable block size is 8 bytes.
+ */
+typedef union header {
+ union header *next;
+ struct free_list *fl;
+} *header_t;
+
+#define MIN_SIZE 8 /* minimum block size */
+
+typedef struct free_list {
+ spin_lock_t lock; /* spin lock for mutual exclusion */
+ header_t head; /* head of free list for this size */
+#ifdef DEBUG
+ int in_use; /* # mallocs - # frees */
+#endif DEBUG
+} *free_list_t;
+
+/*
+ * Free list with index i contains blocks of size 2^(i+3) including header.
+ * Smallest block size is 8, with 4 bytes available to user.
+ * Size argument to malloc is a signed integer for sanity checking,
+ * so largest block size is 2^31.
+ */
+#define NBUCKETS 29
+
+static struct free_list malloc_free_list[NBUCKETS];
+
+static void
+more_memory(size, fl)
+ int size;
+ register free_list_t fl;
+{
+ register int amount;
+ register int n;
+ vm_address_t where;
+ register header_t h;
+ kern_return_t r;
+
+ if (size <= vm_page_size) {
+ amount = vm_page_size;
+ n = vm_page_size / size;
+ /*
+ * We lose vm_page_size - n*size bytes here.
+ */
+ } else {
+ amount = size;
+ n = 1;
+ }
+ MACH_CALL(vm_allocate(mach_task_self(), &where, (vm_size_t) amount, TRUE), r);
+ h = (header_t) where;
+ do {
+ h->next = fl->head;
+ fl->head = h;
+ h = (header_t) ((char *) h + size);
+ } while (--n != 0);
+}
+
+char *
+malloc(size)
+ register unsigned int size;
+{
+ register int i, n;
+ register free_list_t fl;
+ register header_t h;
+
+ if ((int) size <= 0) /* sanity check */
+ return 0;
+ size += sizeof(union header);
+ /*
+ * Find smallest power-of-two block size
+ * big enough to hold requested size plus header.
+ */
+ i = 0;
+ n = MIN_SIZE;
+ while (n < size) {
+ i += 1;
+ n <<= 1;
+ }
+ ASSERT(i < NBUCKETS);
+ fl = &malloc_free_list[i];
+ spin_lock(&fl->lock);
+ h = fl->head;
+ if (h == 0) {
+ /*
+ * Free list is empty;
+ * allocate more blocks.
+ */
+ more_memory(n, fl);
+ h = fl->head;
+ if (h == 0) {
+ /*
+ * Allocation failed.
+ */
+ spin_unlock(&fl->lock);
+ return 0;
+ }
+ }
+ /*
+ * Pop block from free list.
+ */
+ fl->head = h->next;
+#ifdef DEBUG
+ fl->in_use += 1;
+#endif DEBUG
+ spin_unlock(&fl->lock);
+ /*
+ * Store free list pointer in block header
+ * so we can figure out where it goes
+ * at free() time.
+ */
+ h->fl = fl;
+ /*
+ * Return pointer past the block header.
+ */
+ return ((char *) h) + sizeof(union header);
+}
+
+free(base)
+ char *base;
+{
+ register header_t h;
+ register free_list_t fl;
+ register int i;
+
+ if (base == 0)
+ return;
+ /*
+ * Find free list for block.
+ */
+ h = (header_t) (base - sizeof(union header));
+ fl = h->fl;
+ i = fl - malloc_free_list;
+ /*
+ * Sanity checks.
+ */
+ if (i < 0 || i >= NBUCKETS) {
+ ASSERT(0 <= i && i < NBUCKETS);
+ return;
+ }
+ if (fl != &malloc_free_list[i]) {
+ ASSERT(fl == &malloc_free_list[i]);
+ return;
+ }
+ /*
+ * Push block on free list.
+ */
+ spin_lock(&fl->lock);
+ h->next = fl->head;
+ fl->head = h;
+#ifdef DEBUG
+ fl->in_use -= 1;
+#endif DEBUG
+ spin_unlock(&fl->lock);
+ return;
+}
+
+char *
+realloc(old_base, new_size)
+ char *old_base;
+ unsigned int new_size;
+{
+ register header_t h;
+ register free_list_t fl;
+ register int i;
+ unsigned int old_size;
+ char *new_base;
+
+ if (old_base == 0)
+ return 0;
+ /*
+ * Find size of old block.
+ */
+ h = (header_t) (old_base - sizeof(union header));
+ fl = h->fl;
+ i = fl - malloc_free_list;
+ /*
+ * Sanity checks.
+ */
+ if (i < 0 || i >= NBUCKETS) {
+ ASSERT(0 <= i && i < NBUCKETS);
+ return 0;
+ }
+ if (fl != &malloc_free_list[i]) {
+ ASSERT(fl == &malloc_free_list[i]);
+ return 0;
+ }
+ /*
+ * Free list with index i contains blocks of size 2^(i+3) including header.
+ */
+ old_size = (1 << (i+3)) - sizeof(union header);
+ /*
+ * Allocate new block, copy old bytes, and free old block.
+ */
+ new_base = malloc(new_size);
+ if (new_base != 0)
+ bcopy(old_base, new_base, (int) (old_size < new_size ? old_size : new_size));
+ free(old_base);
+ return new_base;
+}
+
+#ifdef DEBUG
+void
+print_malloc_free_list()
+{
+ register int i, size;
+ register free_list_t fl;
+ register int n;
+ register header_t h;
+ int total_used = 0;
+ int total_free = 0;
+
+ fprintf(stderr, " Size In Use Free Total\n");
+ for (i = 0, size = MIN_SIZE, fl = malloc_free_list;
+ i < NBUCKETS;
+ i += 1, size <<= 1, fl += 1) {
+ spin_lock(&fl->lock);
+ if (fl->in_use != 0 || fl->head != 0) {
+ total_used += fl->in_use * size;
+ for (n = 0, h = fl->head; h != 0; h = h->next, n += 1)
+ ;
+ total_free += n * size;
+ fprintf(stderr, "%10d %10d %10d %10d\n",
+ size, fl->in_use, n, fl->in_use + n);
+ }
+ spin_unlock(&fl->lock);
+ }
+ fprintf(stderr, " all sizes %10d %10d %10d\n",
+ total_used, total_free, total_used + total_free);
+}
+#endif DEBUG
+
+void malloc_fork_prepare()
+/*
+ * Prepare the malloc module for a fork by insuring that no thread is in a
+ * malloc critical section.
+ */
+{
+ register int i;
+
+ for (i = 0; i < NBUCKETS; i++) {
+ spin_lock(&malloc_free_list[i].lock);
+ }
+}
+
+void malloc_fork_parent()
+/*
+ * Called in the parent process after a fork() to resume normal operation.
+ */
+{
+ register int i;
+
+ for (i = NBUCKETS-1; i >= 0; i--) {
+ spin_unlock(&malloc_free_list[i].lock);
+ }
+}
+
+void malloc_fork_child()
+/*
+ * Called in the child process after a fork() to resume normal operation.
+ */
+{
+ register int i;
+
+ for (i = NBUCKETS-1; i >= 0; i--) {
+ spin_unlock(&malloc_free_list[i].lock);
+ }
+}
diff --git a/libthreads/mig_support.c b/libthreads/mig_support.c
new file mode 100644
index 00000000..e9c834fb
--- /dev/null
+++ b/libthreads/mig_support.c
@@ -0,0 +1,157 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: mig_support.c,v $
+ * Revision 2.6 91/05/14 17:57:41 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/02/14 14:20:30 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:26 mrt]
+ *
+ * Revision 2.4 90/08/07 14:31:41 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.3 90/08/07 14:27:48 rpd
+ * When we recycle the global reply port by giving it to the first
+ * cthread, clear the global reply port. This will take care of
+ * someone accidently calling this twice.
+ * [90/08/07 rwd]
+ *
+ * Revision 2.2 90/06/02 15:14:04 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:56:50 rpd]
+ *
+ * Revision 2.1 89/08/03 17:09:50 rwd
+ * Created.
+ *
+ * 18-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Replaced task_data() by thread_reply().
+ *
+ *
+ * 27-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed mig_support.c to avoid deadlock that can occur
+ * if tracing is turned on during calls to mig_get_reply_port().
+ *
+ * 10-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed mig_support.c to use MACH_CALL.
+ * Changed "is_init" to "multithreaded" and reversed its sense.
+ *
+ * 30-Jul-87 Mary Thompson (mrt) at Carnegie Mellon University
+ * Created.
+ */
+/*
+ * File: mig_support.c
+ * Author: Mary R. Thompson, Carnegie Mellon University
+ * Date: July, 1987
+ *
+ * Routines to set and deallocate the mig reply port for the current thread.
+ * Called from mig-generated interfaces.
+ *
+ */
+
+
+#include <mach/mach.h>
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+private boolean_t multithreaded = FALSE;
+/* use a global reply port before becoming multi-threaded */
+private mach_port_t mig_reply_port = MACH_PORT_NULL;
+
+/*
+ * Called by mach_init with 0 before cthread_init is
+ * called and again with initial cproc at end of cthread_init.
+ */
+void
+mig_init(initial)
+ register cproc_t initial;
+{
+ if (initial == NO_CPROC) {
+ /* called from mach_init before cthread_init,
+ possibly after a fork. clear global reply port. */
+
+ multithreaded = FALSE;
+ mig_reply_port = MACH_PORT_NULL;
+ } else {
+ /* recycle global reply port as this cthread's reply port */
+
+ multithreaded = TRUE;
+ initial->reply_port = mig_reply_port;
+ mig_reply_port = MACH_PORT_NULL;
+ }
+}
+
+/*
+ * Called by mig interface code whenever a reply port is needed.
+ */
+mach_port_t
+mig_get_reply_port()
+{
+ register mach_port_t reply_port;
+
+ if (multithreaded) {
+ register cproc_t self;
+
+ self = cproc_self();
+ ASSERT(self != NO_CPROC);
+
+ if ((reply_port = self->reply_port) == MACH_PORT_NULL)
+ self->reply_port = reply_port = mach_reply_port();
+ } else {
+ if ((reply_port = mig_reply_port) == MACH_PORT_NULL)
+ mig_reply_port = reply_port = mach_reply_port();
+ }
+
+ return reply_port;
+}
+
+/*
+ * Called by mig interface code after a timeout on the reply port.
+ * May also be called by user.
+ */
+void
+mig_dealloc_reply_port()
+{
+ register mach_port_t reply_port;
+
+ if (multithreaded) {
+ register cproc_t self;
+
+ self = cproc_self();
+ ASSERT(self != NO_CPROC);
+
+ reply_port = self->reply_port;
+ self->reply_port = MACH_PORT_NULL;
+ } else {
+ reply_port = mig_reply_port;
+ mig_reply_port = MACH_PORT_NULL;
+ }
+
+ (void) mach_port_mod_refs(mach_task_self(), reply_port,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+}
diff --git a/libthreads/stack.c b/libthreads/stack.c
new file mode 100644
index 00000000..2764470f
--- /dev/null
+++ b/libthreads/stack.c
@@ -0,0 +1,382 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: stack.c,v $
+ * Revision 2.13 92/01/14 16:48:54 rpd
+ * Fixed addr_range_check to deallocate the object port from vm_region.
+ * [92/01/14 rpd]
+ *
+ * Revision 2.12 92/01/03 20:37:10 dbg
+ * Export cthread_stack_size, and use it if non-zero instead of
+ * probing the stack. Fix error in deallocating unused initial
+ * stack (STACK_GROWTH_UP case).
+ * [91/08/28 dbg]
+ *
+ * Revision 2.11 91/07/31 18:39:34 dbg
+ * Fix some bad stack references (stack direction).
+ * [91/07/30 17:36:50 dbg]
+ *
+ * Revision 2.10 91/05/14 17:58:49 mrt
+ * Correcting copyright
+ *
+ * Revision 2.9 91/02/14 14:21:08 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:35 mrt]
+ *
+ * Revision 2.8 90/11/05 18:10:46 rpd
+ * Added cproc_stack_base. Add stack_fork_child().
+ * [90/11/01 rwd]
+ *
+ * Revision 2.7 90/11/05 14:37:51 rpd
+ * Fixed addr_range_check for new vm_region semantics.
+ * [90/11/02 rpd]
+ *
+ * Revision 2.6 90/10/12 13:07:34 rpd
+ * Deal with positively growing stacks.
+ * [90/10/10 rwd]
+ * Deal with initial user stacks that are not perfectly aligned.
+ * [90/09/26 11:51:46 rwd]
+ *
+ * Leave extra stack page around in case it is needed before we
+ * switch stacks.
+ * [90/09/25 rwd]
+ *
+ * Revision 2.5 90/08/07 14:31:46 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.4 90/06/02 15:14:18 rpd
+ * Moved cthread_sp to machine-dependent files.
+ * [90/04/24 rpd]
+ * Converted to new IPC.
+ * [90/03/20 20:56:35 rpd]
+ *
+ * Revision 2.3 90/01/19 14:37:34 rwd
+ * Move self pointer to top of stack
+ * [89/12/12 rwd]
+ *
+ * Revision 2.2 89/12/08 19:49:52 rwd
+ * Back out change from af.
+ * [89/12/08 rwd]
+ *
+ * Revision 2.1.1.3 89/12/06 12:54:17 rwd
+ * Gap fix from af
+ * [89/12/06 rwd]
+ *
+ * Revision 2.1.1.2 89/11/21 15:01:40 rwd
+ * Add RED_ZONE ifdef.
+ * [89/11/20 rwd]
+ *
+ * Revision 2.1.1.1 89/10/24 13:00:44 rwd
+ * Remove conditionals.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:10:05 rwd
+ * Created.
+ *
+ * 18-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Altered for stand-alone use:
+ * use vm_region to probe for the bottom of the initial thread's
+ * stack.
+ *
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread stack allocation to use aligned stacks
+ * and store self pointer at base of stack.
+ * Added inline expansion for cthread_sp() function.
+ */
+/*
+ * File: stack.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Dec, 1987
+ *
+ * C Thread stack allocation.
+ *
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+#define BYTES_TO_PAGES(b) (((b) + vm_page_size - 1) / vm_page_size)
+
+int cthread_stack_mask;
+vm_size_t cthread_stack_size;
+private vm_address_t next_stack_base;
+
+vm_offset_t cproc_stack_base(); /* forward */
+
+/*
+ * Set up a stack segment for a thread.
+ * Segment has a red zone (invalid page)
+ * for early detection of stack overflow.
+ * The cproc_self pointer is stored at the top.
+ *
+ * --------- (high address)
+ * | self |
+ * | ... |
+ * | |
+ * | stack |
+ * | |
+ * | ... |
+ * | |
+ * ---------
+ * | |
+ * |invalid|
+ * | |
+ * --------- (stack base)
+ * --------- (low address)
+ *
+ * or the reverse, if the stack grows up.
+ */
+
+private void
+setup_stack(p, base)
+ register cproc_t p;
+ register vm_address_t base;
+{
+ register kern_return_t r;
+
+ p->stack_base = base;
+ /*
+ * Stack size is segment size minus size of self pointer
+ */
+ p->stack_size = cthread_stack_size;
+ /*
+ * Protect red zone.
+ */
+#ifdef RED_ZONE
+ MACH_CALL(vm_protect(mach_task_self(), base + vm_page_size, vm_page_size, FALSE, VM_PROT_NONE), r);
+#endif RED_ZONE
+ /*
+ * Store self pointer.
+ */
+ *(cproc_t *)&ur_cthread_ptr(base) = p;
+}
+
+vm_offset_t
+addr_range_check(start_addr, end_addr, desired_protection)
+ vm_offset_t start_addr, end_addr;
+ vm_prot_t desired_protection;
+{
+ register vm_offset_t addr;
+
+ addr = start_addr;
+ while (addr < end_addr) {
+ vm_offset_t r_addr;
+ vm_size_t r_size;
+ vm_prot_t r_protection,
+ r_max_protection;
+ vm_inherit_t r_inheritance;
+ boolean_t r_is_shared;
+ memory_object_name_t r_object_name;
+ vm_offset_t r_offset;
+ kern_return_t kr;
+
+ r_addr = addr;
+ kr = vm_region(mach_task_self(), &r_addr, &r_size,
+ &r_protection, &r_max_protection, &r_inheritance,
+ &r_is_shared, &r_object_name, &r_offset);
+ if ((kr == KERN_SUCCESS) && MACH_PORT_VALID(r_object_name))
+ (void) mach_port_deallocate(mach_task_self(), r_object_name);
+
+ if ((kr != KERN_SUCCESS) ||
+ (r_addr > addr) ||
+ ((r_protection & desired_protection) != desired_protection))
+ return (0);
+ addr = r_addr + r_size;
+ }
+ return (addr);
+}
+
+/*
+ * Probe for bottom and top of stack.
+ * Assume:
+ * 1. stack grows DOWN
+ * 2. There is an unallocated region below the stack.
+ */
+void
+probe_stack(stack_bottom, stack_top)
+ vm_offset_t *stack_bottom;
+ vm_offset_t *stack_top;
+{
+ /*
+ * Since vm_region returns the region starting at
+ * or ABOVE the given address, we cannot use it
+ * directly to search downwards. However, we
+ * also want a size that is the closest power of
+ * 2 to the stack size (so we can mask off the stack
+ * address and get the stack base). So we probe
+ * in increasing powers of 2 until we find a gap
+ * in the stack.
+ */
+ vm_offset_t start_addr, end_addr;
+ vm_offset_t last_start_addr, last_end_addr;
+ vm_size_t stack_size;
+
+ /*
+ * Start with a page
+ */
+ start_addr = cthread_sp() & ~(vm_page_size - 1);
+ end_addr = start_addr + vm_page_size;
+
+ stack_size = vm_page_size;
+
+ /*
+ * Increase the tentative stack size, by doubling each
+ * time, until we have exceeded the stack (some of the
+ * range is not valid).
+ */
+ do {
+ /*
+ * Save last addresses
+ */
+ last_start_addr = start_addr;
+ last_end_addr = end_addr;
+
+ /*
+ * Double the stack size
+ */
+ stack_size <<= 1;
+ start_addr = end_addr - stack_size;
+
+ /*
+ * Check that the entire range exists and is writable
+ */
+ } while (end_addr = (addr_range_check(start_addr,
+ end_addr,
+ VM_PROT_READ|VM_PROT_WRITE)));
+ /*
+ * Back off to previous power of 2.
+ */
+ *stack_bottom = last_start_addr;
+ *stack_top = last_end_addr;
+}
+
+vm_offset_t
+stack_init(p)
+ cproc_t p;
+{
+ vm_offset_t stack_bottom,
+ stack_top,
+ start;
+ vm_size_t size;
+ kern_return_t r;
+
+ void alloc_stack();
+
+ /*
+ * Probe for bottom and top of stack, as a power-of-2 size.
+ */
+ probe_stack(&stack_bottom, &stack_top);
+
+ /*
+ * Use the stack size found for the Cthread stack size,
+ * if not already specified.
+ */
+ if (cthread_stack_size == 0)
+ cthread_stack_size = stack_top - stack_bottom;
+#ifdef STACK_GROWTH_UP
+ cthread_stack_mask = ~(cthread_stack_size - 1);
+#else STACK_GROWTH_UP
+ cthread_stack_mask = cthread_stack_size - 1;
+#endif STACK_GROWTH_UP
+
+ /*
+ * Guess at first available region for stack.
+ */
+ next_stack_base = 0;
+
+ /*
+ * Set up stack for main thread.
+ */
+ alloc_stack(p);
+
+ /*
+ * Delete rest of old stack.
+ */
+
+#ifdef STACK_GROWTH_UP
+ start = (cthread_sp() | (vm_page_size - 1)) + 1 + vm_page_size;
+ size = stack_top - start;
+#else STACK_GROWTH_UP
+ start = stack_bottom;
+ size = (cthread_sp() & ~(vm_page_size - 1)) - stack_bottom -
+ vm_page_size;
+#endif STACK_GROWTH_UP
+ MACH_CALL(vm_deallocate(mach_task_self(),start,size),r);
+
+ /*
+ * Return new stack; it gets passed back to the caller
+ * of cthread_init who must switch to it.
+ */
+ return cproc_stack_base(p, sizeof(ur_cthread_t *));
+}
+
+/*
+ * Allocate a stack segment for a thread.
+ * Stacks are never deallocated.
+ *
+ * The variable next_stack_base is used to align stacks.
+ * It may be updated by several threads in parallel,
+ * but mutual exclusion is unnecessary: at worst,
+ * the vm_allocate will fail and the thread will try again.
+ */
+
+void
+alloc_stack(p)
+ cproc_t p;
+{
+ vm_address_t base = next_stack_base;
+
+ for (base = next_stack_base;
+ vm_allocate(mach_task_self(), &base, cthread_stack_size, FALSE) != KERN_SUCCESS;
+ base += cthread_stack_size)
+ ;
+ next_stack_base = base + cthread_stack_size;
+ setup_stack(p, base);
+}
+
+vm_offset_t
+cproc_stack_base(cproc, offset)
+ register cproc_t cproc;
+ register int offset;
+{
+#ifdef STACK_GROWTH_UP
+ return (cproc->stack_base + offset);
+#else STACK_GROWTH_UP
+ return (cproc->stack_base + cproc->stack_size - offset);
+#endif STACK_GROWTH_UP
+
+}
+
+void stack_fork_child()
+/*
+ * Called in the child after a fork(). Resets stack data structures to
+ * coincide with the reality that we now have a single cproc and cthread.
+ */
+{
+ next_stack_base = 0;
+}