summaryrefslogtreecommitdiff
path: root/libthreads
diff options
context:
space:
mode:
Diffstat (limited to 'libthreads')
-rw-r--r--libthreads/ChangeLog368
-rw-r--r--libthreads/GNUmakefile.old38
-rw-r--r--libthreads/Makefile52
-rw-r--r--libthreads/Makefile.CMU93
-rw-r--r--libthreads/Makefile.GNU34
-rw-r--r--libthreads/Makefile.GNU234
-rw-r--r--libthreads/call.c81
-rw-r--r--libthreads/cancel-cond.c116
-rw-r--r--libthreads/cprocs.c1246
-rw-r--r--libthreads/cthread_data.c188
-rw-r--r--libthreads/cthread_internals.h209
-rw-r--r--libthreads/cthreads.c481
-rw-r--r--libthreads/cthreads.h639
-rw-r--r--libthreads/i386/csw.S185
-rw-r--r--libthreads/i386/cthread_inline.awk86
-rw-r--r--libthreads/i386/cthreads.h89
-rw-r--r--libthreads/i386/lock.s70
-rw-r--r--libthreads/i386/thread.c123
-rw-r--r--libthreads/mig_support.c194
-rw-r--r--libthreads/options.h90
-rw-r--r--libthreads/rwlock.c2
-rw-r--r--libthreads/rwlock.h111
-rw-r--r--libthreads/stack.c423
-rw-r--r--libthreads/sync.c83
24 files changed, 5035 insertions, 0 deletions
diff --git a/libthreads/ChangeLog b/libthreads/ChangeLog
new file mode 100644
index 00000000..db4d3f86
--- /dev/null
+++ b/libthreads/ChangeLog
@@ -0,0 +1,368 @@
+2000-01-10 Mark Kettenis <kettenis@gnu.org>
+
+ * cprocs.c: Include <assert.h>
+
+2000-01-09 Roland McGrath <roland@baalperazim.frob.com>
+
+ * cprocs.c (cproc_alloc): Initialize P->wired and P->msg here (code
+ from cthread_wire).
+ (cthread_wire): Reduce to just an assert, cthreads always wired.
+ (chtread_unwire): Abort if called.
+
+1999-06-13 Roland McGrath <roland@baalperazim.frob.com>
+
+ * cthreads.h (MACRO_BEGIN, MACRO_END): #undef before unconditionally
+ redefining. Use GCC extension for statement expression with value 0.
+
+1999-05-29 Roland McGrath <roland@baalperazim.frob.com>
+
+ * cthreads.h (mutex_clear): Change again, to call mutex_init.
+
+ * cthreads.h (mutex_clear): Change from syntax error to no-op (with
+ warning avoidance).
+
+1998-11-12 Mark Kettenis <kettenis@phys.uva.nl>
+
+ * cthreads.c (cthread_init): Move cthread_alloc call before
+ cproc_init call, since cthread_alloc uses malloc, and malloc won't
+ work between initializing the new stack and switching over to it.
+
+1998-07-20 Roland McGrath <roland@baalperazim.frob.com>
+
+ * i386/csw.S (cproc_prepare): Take address of cthread_body as third
+ arg, so we don't have to deal with PIC magic to find its address
+ without producing a text reloc.
+ * cprocs.c (cproc_create): Pass &cthread_body to cproc_prepare.
+
+Tue Jun 9 13:50:09 1998 Thomas Bushnell, n/BSG <tb@mit.edu>
+
+ * cthreads.c (cthread_fork_prepare): Don't call
+ malloc_fork_prepare since we are no longer providing our own
+ malloc in this library.
+ (cthread_fork_parent): Likewise, for malloc_fork_parent.
+ (cthread_fork_child): Likewise, for malloc_fork_child.
+
+Wed Aug 20 15:39:44 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * cthreads.c (cthread_body): Wire self before calling user work
+ function. This way all cthreads will be wired, which the ports
+ library (and hurd_thread_cancel, etc.) depend on.
+
+Fri Aug 8 13:21:17 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * Makefile (SRCS): Delete malloc.c; libc now has a thread safe
+ malloc.
+ (malloc.c): Delete file.
+
+Mon Jun 9 21:18:46 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * cthreads.c (cthread_fork): Delete debugging oddity that crept
+ into source.
+
+Thu Apr 3 20:29:27 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * i386/csw.S: Define __ELF__ too.
+
+Wed Feb 19 23:29:55 1997 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * stack.c (__hurd_threadvar_stack_mask,
+ __hurd_threadvar_stack_offset, __hurd_threadvar_max): Make extern.
+
+Tue Feb 18 16:28:36 1997 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * i386/thread.c (cproc_setup): Correctly leave space at top of
+ stack for GNU per-thread variables.
+
+Mon Nov 18 16:36:56 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * cprocs.c (cproc_create): Cast CHILD in assignment.
+
+Mon Oct 21 22:05:48 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * i386/csw.S (CALL_MCOUNT): New macro.
+ (cproc_swtich, cproc_start_wait, cproc_prepare): Use CALL_MCOUNT.
+
+ * rwlock.h: Change extern inline protection to use new canonical
+ method.
+ * rwlock.c: Rewritten.
+
+Wed Sep 4 09:06:09 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * Makefile (VPATH): Look in $(asm_syntax) instead of old
+ $(machine); Hurd configure now folds equivalent CPU types into
+ asm_syntax.
+
+Thu Aug 29 12:50:37 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+
+ * i386/csw.S: Include <mach/i386/asm.h> instead of <i386/asm.h>.
+ Define ELF before inclusion.
+
+Sat Jul 20 15:47:24 1996 Michael I. Bushnell, p/BSG <mib@gnu.ai.mit.edu>
+
+ * Makefile (lndist-i386-files): Look for I386SRCS and I386HDRS in
+ $(srcdir).
+
+Thu Jun 6 07:29:31 1996 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * malloc.c (more_memory): Use assert_perror instead of MACH_CALL.
+ "cthread_internals.h": Include removed.
+
+Thu May 9 11:13:42 1996 Michael I. Bushnell, p/BSG <mib@gnu.ai.mit.edu>
+
+ * Makefile (installhdrs, installhdrsubdir): Install headers using
+ the generic technique, now that installhdrsubdir is available.
+
+ * rwlock.h: If _RWLOCK_DEFINE_FUNCTIONS is defined, then clear
+ _EXTERN_INLINE, but *after* header files have been included.
+ * rwlock.c (_RWLOCK_DEFINE_FUNCTIONS): New macro; use in place of
+ clearing _EXTERN_INLINE, which conflicts with machine-sp.h.
+
+Sat May 4 05:33:57 1996 Roland McGrath <roland@delasyd.gnu.ai.mit.edu>
+
+ * cthreads.h [lint] (NEVER): Spurious global variable removed.
+ [!lint] (NEVER): Useless macro removed.
+
+ * Makefile (SRCS): Add rwlock.c.
+ (LCLHDRS): Add rwlock.h.
+ (install): Depend on $(includedir)/rwlock.h.
+ ($(includedir)/%.h: %.h): New rule.
+ ($(includedir)/cthreads.h): Target removed, obviated by that rule.
+ * rwlock.h: Moved to libthreads from libshouldbeinlibc.
+ (_EXTERN_INLINE): New macro.
+ Use it for all the inline defns.
+ * rwlock.c: New file.
+
+Thu Apr 11 17:55:24 1996 Michael I. Bushnell, p/BSG <mib@gnu.ai.mit.edu>
+
+ * Makefile (CFLAGS): Turn off -Wall.
+
+ * Makefile (VPATH): Fix up for new configure reality.
+
+Thu Mar 7 15:52:38 1996 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * malloc.c (realloc): Use LOG2_MIN_SIZE.
+ (LOG2_MIN_SIZE): New macro.
+
+ * malloc.c (realloc): Don't bother allocating a new block if the
+ new size request fits in the old one and doesn't waste any space.
+ Only free the old block if we successfully got a new one.
+
+Wed Mar 6 18:05:57 1996 Miles Bader <miles@gnu.ai.mit.edu>
+
+ * malloc.c [MCHECK] (struct header): New type.
+ (union header): Only define if !MCHECK.
+ (HEADER_SIZE, HEADER_NEXT, HEADER_FREE, HEADER_CHECK): New macros.
+ [MCHECK] (MIN_SIZE): Add correct definition for this case.
+ (more_memory, malloc, free, realloc): Use above macros, and add
+ appropiate checks & frobs in MCHECK case.
+
+Wed Jan 31 20:05:57 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * cancel-cond.c: Add assert to check for signal bug.
+
+Wed Jan 24 13:38:11 1996 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * cthreads.h: Use prototypes for functions of zero args.
+
+Sun Dec 10 08:41:36 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * stack.c (addr_range_check, probe_stack): Functions #if 0'd out.
+ (stack_init): Don't call probe_stack or frob old stack at all.
+ Default cthread_stack_size to 16 pages if it is zero.
+
+Wed Dec 6 14:48:37 1995 Michael I. Bushnell, p/BSG <mib@gnu.ai.mit.edu>
+
+ * cprocs.c (condition_unimplies): Take address of (*impp)->next in
+ assignment to IMPP on loop step instruction.
+
+Wed Oct 4 16:22:27 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * i386/csw.S (JUMPTARGET): New macro, versions for [PIC] and not.
+ Use it in place of EXT.
+ * Makefile (csw_pic.o): Bogus braindead target from hell removed.
+ Bushnell will be shot.
+
+Fri Sep 22 13:51:22 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * cprocs.c: Include hurd/threadvar.h.
+
+Sat Sep 16 13:42:02 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * Makefile (CPPFLAGS): Variable removed.
+ (lndist-i386-files): Use $(top_srcdir) in place of $(srcdir).
+
+Wed Sep 13 15:49:17 1995 Michael I. Bushnell, p/BSG <mib@duality.gnu.ai.mit.edu>
+
+ * cthreads.h (CONDITION_INITIALIZER): Provide initial zero for
+ IMPLICATIONS member.
+ (condition_init): Bother initializing NAME and IMPLICATIONS members.
+
+Wed Aug 30 11:10:27 1995 Michael I. Bushnell, p/BSG <mib@duality.gnu.ai.mit.edu>
+
+ * cthreads.h (condition_implies, condition_unimplies): New
+ functions.
+ (struct condition): New member `implications'.
+ (cond_imp): New structure.
+ (cond_signal): Return int now.
+ (condition_broadcast): Always call cond_broadcast if this
+ condition has implications.
+ (condition_signal): Always call cond_signal if this condition has
+ implications.
+ * cprocs.c (cond_signal): If this condition has implications,
+ see if one of them needs to be signalled when we have no waiters.
+ (cond_broadcast): Signal the implications list too.
+ (condition_implies, condition_unimplies): New functions.
+
+ * cthreads.h (hurd_condition_wait): Provide declaration.
+
+Tue Aug 29 10:48:59 1995 Michael I. Bushnell, p/BSG <mib@duality.gnu.ai.mit.edu>
+
+ * cthread_internals.h (cproc_block): Provide decl.
+
+Sat Aug 26 14:08:15 1995 Miles Bader <miles@churchy.gnu.ai.mit.edu>
+
+ * cancel-cond.c (hurd_condition_wait_cancel): Name changed to
+ `hurd_condition_wait'.
+
+Tue Aug 22 19:26:38 1995 Miles Bader <miles@churchy.gnu.ai.mit.edu>
+
+ * Makefile (SRCS): Add a backslash.
+
+Mon Aug 21 12:52:38 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * Makefile (SRCS): Add cancel-cond.c.
+ * cancel-cond.c: New file.
+
+Thu Jul 6 13:39:25 1995 Michael I Bushnell <mib@duality.gnu.ai.mit.edu>
+
+ * Makefile (install): Don't *always* install cthreads.h; do it
+ only if it's new.
+
+ * Makefile: Removed dependencies that are now automatically
+ generated.
+
+ * Makefile (csw_pic.o): Provide slightly cheating rule.
+
+Fri May 12 14:25:35 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * i386/csw.S: Use EXT macro instead of explicit underscores.
+ * i386/asm.h: File removed; it is installed by libc.
+ * Makefile (I386HDRS): Variable removed.
+ ($(OBJS)): Don't depend on i386/asm.h.
+
+ * Makefile (CPPFLAGS): Use $(srcdir) instead of $(hurdsource).
+
+Wed Apr 12 14:33:06 1995 Michael I Bushnell <mib@duality.gnu.ai.mit.edu>
+
+ * Makefile (lndist, lndist-i386-files): Change $(hurdsource) to
+ $(srcdir).
+ ($(hurdsource)/hurd-snap/$(dir)/i386): Likewise.
+
+Tue Apr 4 17:04:45 1995 Roland McGrath <roland@churchy.gnu.ai.mit.edu>
+
+ * Makefile (CPPFLAGS): Define.
+ ($(OBJS) rule): Fix typo in target.
+ (install-cthreads.h): Use $(INSTALL_DATA).
+
+ * cthreads.h (mutex_lock, mutex_unlock): Use __ names for *_solid.
+ * cprocs.c (mutex_lock_solid, mutex_unlock_solid): Renamed to __*.
+ (_cthread_mutex_lock_routine, _cthread_mutex_unlock_routine): Variables
+ removed.
+
+Fri Jul 22 10:47:51 1994 Michael I Bushnell <mib@geech.gnu.ai.mit.edu>
+
+ * Makefile: Rewritten in accord with new scheme.
+
+Tue Jul 19 12:22:01 1994 Michael I Bushnell (mib@churchy.gnu.ai.mit.edu)
+
+ * Makefile (generic-sources): Restored malloc.c.
+ (malloc-sources, malloc-objects): Deleted variables.
+ (DIST_FILES): Deleted $(malloc-sources)
+ (all): Deleted libmalloc.a.
+ (libmalloc.a): Delted target.
+ ($(hurdinst)/lib/libmalloc.a): Deleted target.
+ (install): Deleted $(hurdinst)/lib/libmalloc.a.
+ (clean): Deleted libmalloc.a.
+
+Tue Jul 5 14:17:28 1994 Michael I Bushnell (mib@churchy.gnu.ai.mit.edu)
+
+ * Makefile (SRCS, TAGSHDRS): New variables.
+ (TAGS): Deleted local definition; now it's in Makeconf.
+
+Thu May 19 00:54:54 1994 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * cprocs.c (_cthread_mutex_lock_routine,
+ _cthread_mutex_unlock_routine): New variables.
+
+Thu May 5 19:33:49 1994 Michael I Bushnell (mib@geech.gnu.ai.mit.edu)
+
+ * Makefile: (generic-sources): Removed malloc.c.
+ (malloc-sources, malloc-objects): new variables.
+ (DIST_FILES): added $(malloc-sources).
+ (libmalloc.a, $(hurdinst)/lib/libmalloc.a): New targets.
+ ($(hurdinst)/lib/malloc.o): Deleted target.
+ (all): added libmalloc.a.
+ (install): Changed $(hurdinst)/lib/malloc.o to
+ $(hurdinst)/lib/libmalloc.a
+ (clean): Added libmaloc.a.
+
+Thu May 5 04:30:34 1994 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * cthreads.c (cthread_init): Pass STACK instead of P to mig_init.
+
+ * cprocs.c (cproc_create): Pass normal offset value to
+ cproc_stack_base. Explicitly store CHILD ptr at base of its stack.
+
+ * stack.c (stack_init): Set __hurd_threadvar_stack_mask to find
+ the lowest address of the stack segment.
+ [STACK_GROWTH_UP]: Set __hurd_threadvar_stack_offset to
+ sizeof(ur_cthread_t*).
+ [! STACK_GROWTH_UP]: Set __hurd_threadvar_stack_offset to the size
+ of the stack minus space for the cproc_self ptr and thread variables.
+
+ * malloc.c (malloc, free, realloc): Change declarations to
+ standard types, so as not to conflict with the declarations in
+ <stdlib.h>.
+
+ * cthread_internals.h: #if 0 out declaration of malloc.
+ (struct cproc): #if 0 out `reply_port' member; libc implements that.
+ * cprocs.c (cproc_alloc): #if 0 out initialization of P->reply_port.
+
+ * Makefile (generic-sources): Omit sync.c. libc implements that.
+
+ * cprocs.c (cproc_block): Add __hurd_threadvar_max * sizeof (long
+ int) to 2nd arg to cproc_stack_base.
+
+ * stack.c: Include <hurd/threadvar.h>.
+ (__hurd_threadvar_stack_mask, __hurd_threadvar_stack_offset,
+ __hurd_threadvar_max): Define variables (uninitialized).
+ (stack_init): Set __hurd_threadvar_stack_mask to cthread_stack_mask.
+ Set __hurd_threadvar_stack_offset to point past the cproc_self ptr.
+ Add __hurd_threadvar_max * sizeof (long int) to 2nd arg to
+ cproc_stack_base.
+
+ * cthreads.h: #if 0 include of <machine/cthreads.h>.
+ Instead, include <machine-sp.h>.
+ (cthread_sp): Define using __thread_stack_pointer.
+ #if 0 out spin_lock definitions. Just include <spin-lock.h> instead.
+ (struct mutex): Move `held' member to be first in the structure.
+
+Wed May 4 14:55:15 1994 Michael I Bushnell (mib@churchy.gnu.ai.mit.edu)
+
+ * i386/thread.c: Changed inclusions because we aren't using
+ -I flags the way CMU's makefiles do.
+
+ * i386/csw.S: Convert comment character to /* */ pairs.
+
+ * Renamed csw.s to csw.S so that GCC knows to run cpp on it.
+ * Makefile (machine-sources): Change csw.s to csw.S.
+
+Wed May 4 07:11:46 1994 Roland McGrath (roland@churchy.gnu.ai.mit.edu)
+
+ * Makefile (GEN_SRCS): Renamed generic-sources.
+ (I386_SRCS): Replaced with machine-sources, omitting directory.
+ (SRCS): Renamed sources. Include $(machine-sources) and prepend
+ $(machine)/ directory name.
+ (headers): Define variable.
+ (OBJS): Renamed objects.
+ (VPATH): Define to $(machine).
diff --git a/libthreads/GNUmakefile.old b/libthreads/GNUmakefile.old
new file mode 100644
index 00000000..cc0f6de8
--- /dev/null
+++ b/libthreads/GNUmakefile.old
@@ -0,0 +1,38 @@
+CPPFLAGS = -nostdinc -I. -I/home/gd3/hurdinst/include
+CFLAGS = -g -O
+CPP=/usr1/gnu/DIST/lib/gcc-lib/i386-compaq-mach/2.4.5/cpp
+AS = as
+AR = ar
+RANLIB = ranlib
+CC = gcc
+
+VPATH=.:i386
+
+OBJS = cprocs.o cthreads.o malloc.o \
+ mig_support.o stack.o sync.o \
+ thread.o lock.o csw.o cthread_data.o
+
+all: libthreads.a
+
+install: all
+ cp libthreads.a /home/gd3/hurdinst/lib/libthreads.a
+ ranlib /home/gd3/hurdinst/lib/libthreads.a
+ cp cthreads.h /home/gd3/hurdinst/include/cthreads.h
+ cp i386/cthreads.h /home/gd3/hurdinst/include/i386/cthreads.h
+
+clean:
+ rm -f lib*.a Makedep* a.out core errs \
+ *.d *.s *.S *.o *.BAK *.CKP */*.BAK */*.CKP
+
+libthreads.a: $(OBJS)
+ rm -f $@
+ $(AR) crv $@ $(OBJS)
+ $(RANLIB) $@
+
+%.o: %.s
+ $(CPP) $(CPPFLAGS) $< > $*.as
+ $(AS) -o $@ $*.as
+ rm -f $*.as
+
+TAGS: *.c *.h
+ etags *.c *.h
diff --git a/libthreads/Makefile b/libthreads/Makefile
new file mode 100644
index 00000000..5b2bc62a
--- /dev/null
+++ b/libthreads/Makefile
@@ -0,0 +1,52 @@
+#
+# Copyright (C) 1994, 1995, 1996, 1997 Free Software Foundation, Inc.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; either version 2, or (at
+# your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+dir := libthreads
+makemode := library
+
+# In GNU mig_support.c, sync.c and machine/lock.s are omitted; that work is
+# all done in libc.
+SRCS := call.c cprocs.c cthread_data.c cthreads.c stack.c \
+ cancel-cond.c rwlock.c
+I386SRCS := i386/csw.S i386/thread.c
+
+# In GNU machine/cthreads.h is omitted; that work is done in libc headers.
+LCLHDRS := cthread_internals.h options.h cthreads.h rwlock.h
+
+OBJS = $(addsuffix .o,$(basename $(notdir $(SRCS) $(I386SRCS))))
+
+OTHERTAGS = $(I386SRCS) $(I386HDRS)
+
+libname = libthreads
+installhdrs = cthreads.h rwlock.h
+installhdrsubdir = .
+
+VPATH += $(srcdir)/$(asm_syntax)
+
+include ../Makeconf
+
+# The threads library was written by CMU. If you've ever experienced
+# what that means, you'll understand this line.
+CFLAGS := $(filter-out -Wall,$(CFLAGS))
+
+lndist: lndist-i386-files
+
+lndist-i386-files: $(top_srcdir)/hurd-snap/$(dir)/i386
+ ln $(addprefix $(srcdir)/,$(I386SRCS) $(I386HDRS)) $<
+
+$(top_srcdir)/hurd-snap/$(dir)/i386:
+ mkdir $@
diff --git a/libthreads/Makefile.CMU b/libthreads/Makefile.CMU
new file mode 100644
index 00000000..5c6cf544
--- /dev/null
+++ b/libthreads/Makefile.CMU
@@ -0,0 +1,93 @@
+#
+# Mach Operating System
+# Copyright (c) 1991,1990,1989 Carnegie Mellon University
+# All Rights Reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+# CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+# ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# Carnegie Mellon requests users of this software to return to
+#
+# Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+# School of Computer Science
+# Carnegie Mellon University
+# Pittsburgh PA 15213-3890
+#
+# any improvements or extensions that they make and grant Carnegie Mellon
+# the rights to redistribute these changes.
+#
+#
+# HISTORY
+# $Log: Makefile,v $
+# Revision 2.21 92/05/22 18:38:31 jfriedl
+# From Mike Kupfer <kupfer@sprite.Berkeley.EDU>:
+# Enable CTHREAD_DATA for the kernel bootstrap program.
+#
+# Revision 2.20 92/03/05 22:46:34 rpd
+# Changed to use double-colon rules for top-level targets.
+# [92/02/28 rpd]
+#
+# Revision 2.19 92/01/23 15:22:53 rpd
+# Revised for new Makefile organization.
+# [92/01/16 rpd]
+#
+# Revision 1.4 91/09/09 15:55:51 kupfer
+# MK63 merge.
+#
+# Revision 1.2 91/08/23 19:12:32 kupfer
+# Put back the changes for the Posix thread local data support.
+#
+
+# Define CTHREAD_DATA to enable source compatibility with the old
+# "cthread_data" interface.
+DEFS = -DCTHREAD_DATA
+
+include ${MAKETOP}Makefile-common
+
+# find machine-dependent files in machine subdirectory
+
+VPATH = .:${cpu}
+
+all :: libthreads.a
+
+install :: ${INSTALLDIR}/lib/libthreads.a
+
+release :: ${TRELEASEDIR}/lib/libthreads.a
+
+clean ::
+ ${RM} ${INSTALLDIR}/lib/libthreads.a
+
+# installation rules
+
+${INSTALLDIR}/lib/libthreads.a : libthreads.a
+ ${RM} $@
+ ${CP} $? $@
+ ${CHMOD_LIB} $@
+
+# release rules
+
+${TRELEASEDIR}/lib/libthreads.a : ${FRELEASEDIR}/lib/libthreads.a
+ ${RM} $@
+ ${CP} $? $@
+
+# build rules
+
+OBJS = cprocs.o cthreads.o malloc.o \
+ mig_support.o stack.o sync.o \
+ thread.o lock.o csw.o cthread_data.o
+
+libthreads.a : ${OBJS}
+ ${RM} $@
+ ${AR} cq $@ ${OBJS}
+ ${RANLIB} $@
+
+# For lint, do ``lint -I. -un *.c mips/*.c''
+
+-include Makedep
diff --git a/libthreads/Makefile.GNU b/libthreads/Makefile.GNU
new file mode 100644
index 00000000..bff8ed15
--- /dev/null
+++ b/libthreads/Makefile.GNU
@@ -0,0 +1,34 @@
+CPPFLAGS = -nostdinc -I. -I/usr1/gnu/DIST/include
+CFLAGS = -g -O
+CPP = /usr1/gnu/DIST/lib/gcc-cpp
+AS = as
+AR = ar
+RANLIB = ranlib
+CC = gcc
+
+VPATH=.:i386
+
+OBJS = cprocs.o cthreads.o malloc.o \
+ mig_support.o stack.o sync.o \
+ thread.o lock.o csw.o
+
+all: libthreads.a
+
+install: all
+ cp libthreads.a /usr1/gnu/DIST/lib/libthreads.a
+ ranlib /usr1/gnu/DIST/lib/libthreads.a
+
+clean:
+ rm -f lib*.a Makedep* a.out core errs \
+ *.d *.s *.S *.o *.BAK *.CKP */*.BAK */*.CKP
+
+libthreads.a: $(OBJS)
+ rm -f $@
+ $(AR) crv $@ $(OBJS)
+ $(RANLIB) $@
+
+%.o: %.s
+ $(CPP) $(CPPFLAGS) $< > $*.as
+ $(AS) -o $@ $*.as
+ rm -f $*.as
+
diff --git a/libthreads/Makefile.GNU2 b/libthreads/Makefile.GNU2
new file mode 100644
index 00000000..7dead299
--- /dev/null
+++ b/libthreads/Makefile.GNU2
@@ -0,0 +1,34 @@
+CPPFLAGS = -nostdinc -I. -I/usr1/gnu/DIST/include -I/usr1/gnu/DIST/hurd/include
+CFLAGS = -g -O
+CPP = /usr1/gnu/DIST/lib/gcc-cpp
+AS = as
+AR = ar
+RANLIB = ranlib
+CC = gcc
+
+VPATH=.:i386
+
+OBJS = cprocs.o cthreads.o malloc.o \
+ mig_support.o stack.o sync.o \
+ thread.o lock.o csw.o cthread_data.o
+
+all: libthreads.a
+
+install: all
+ cp libthreads.a /usr1/gnu/DIST/lib/libthreads.a
+ ranlib /usr1/gnu/DIST/lib/libthreads.a
+
+clean:
+ rm -f lib*.a Makedep* a.out core errs \
+ *.d *.s *.S *.o *.BAK *.CKP */*.BAK */*.CKP
+
+libthreads.a: $(OBJS)
+ rm -f $@
+ $(AR) crv $@ $(OBJS)
+ $(RANLIB) $@
+
+%.o: %.s
+ $(CPP) $(CPPFLAGS) $< > $*.as
+ $(AS) -o $@ $*.as
+ rm -f $*.as
+
diff --git a/libthreads/call.c b/libthreads/call.c
new file mode 100644
index 00000000..26efc8fc
--- /dev/null
+++ b/libthreads/call.c
@@ -0,0 +1,81 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: call.c,v $
+ * Revision 2.4 91/05/14 17:56:00 mrt
+ * Correcting copyright
+ *
+ * Revision 2.3 91/02/14 14:19:20 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:40:44 mrt]
+ *
+ * Revision 2.2 90/01/19 14:36:50 rwd
+ * Created. Routines to replace thread_* and cthread_call_on.
+ * [90/01/03 rwd]
+ *
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+#ifdef THREAD_CALLS
+kern_return_t cthread_get_state(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+
+kern_return_t cthread_set_state(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+
+kern_return_t cthread_abort(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+
+kern_return_t cthread_resume(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+
+kern_return_t cthread_suspend(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+
+kern_return_t cthread_call_on(thread)
+cthread_t thread;
+{
+ cproc_t p = thread->ur;
+}
+#endif THREAD_CALLS
diff --git a/libthreads/cancel-cond.c b/libthreads/cancel-cond.c
new file mode 100644
index 00000000..b7780d03
--- /dev/null
+++ b/libthreads/cancel-cond.c
@@ -0,0 +1,116 @@
+/* Modified condition_wait that checks for cancellation.
+Copyright (C) 1995, 1996 Free Software Foundation, Inc.
+This file is part of the GNU C Library.
+
+The GNU C Library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Library General Public License as
+published by the Free Software Foundation; either version 2 of the
+License, or (at your option) any later version.
+
+The GNU C Library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Library General Public License for more details.
+
+You should have received a copy of the GNU Library General Public
+License along with the GNU C Library; see the file COPYING.LIB. If
+not, write to the Free Software Foundation, Inc., 675 Mass Ave,
+Cambridge, MA 02139, USA. */
+
+#include <hurd/signal.h>
+#include <cthreads.h>
+#include "cthread_internals.h"
+#include <assert.h>
+
+/* Just like condition_wait, but cancellable. Returns true if cancelled. */
+int
+hurd_condition_wait (condition_t c, mutex_t m)
+{
+ /* This function will be called by hurd_thread_cancel while we are blocked
+ in the condition_wait. We wake up all threads blocked on C,
+ so our thread will wake up and notice the cancellation flag. */
+ void cancel_me (void)
+ {
+ condition_broadcast (c);
+ }
+ struct hurd_sigstate *ss = _hurd_self_sigstate ();
+ cproc_t p = cproc_self ();
+ int cancel;
+
+ assert (ss->intr_port == MACH_PORT_NULL); /* Sanity check for signal bugs. */
+
+ p->state = CPROC_CONDWAIT | CPROC_SWITCHING;
+
+ /* Atomically enqueue our cproc on the condition variable's queue of
+ waiters, and mark our sigstate to indicate that `cancel_me' must be
+ called to wake us up. We must hold the sigstate lock while acquiring
+ the condition variable's lock and tweaking it, so that
+ hurd_thread_cancel can never suspend us and then deadlock in
+ condition_broadcast waiting for the condition variable's lock. */
+
+ spin_lock (&ss->lock);
+ spin_lock (&c->lock);
+ cancel = ss->cancel;
+ if (cancel)
+ /* We were cancelled before doing anything. Don't block at all. */
+ ss->cancel = 0;
+ else
+ {
+ /* Put us on the queue so that condition_broadcast will know to wake
+ us up. */
+ cthread_queue_enq (&c->queue, p);
+ /* Tell hurd_thread_cancel how to unblock us. */
+ ss->cancel_hook = &cancel_me;
+ }
+ spin_unlock (&c->lock);
+ spin_unlock (&ss->lock);
+
+ if (cancel)
+ {
+ /* Cancelled on entry. Just leave the mutex locked. */
+ m = NULL;
+ p->state = CPROC_RUNNING;
+ }
+ else
+ {
+ /* Now unlock the mutex and block until woken. */
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)c;
+#endif WAIT_DEBUG
+
+ mutex_unlock (m);
+
+ spin_lock (&p->lock);
+ if (p->state & CPROC_SWITCHING)
+ cproc_block ();
+ else
+ {
+ /* We were woken up someplace before reacquiring P->lock.
+ We can just continue on. */
+ p->state = CPROC_RUNNING;
+ spin_unlock(&p->lock);
+ }
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ }
+
+ spin_lock (&ss->lock);
+ /* Clear the hook, now that we are done blocking. */
+ ss->cancel_hook = NULL;
+ /* Check the cancellation flag; we might have unblocked due to
+ cancellation rather than a normal condition_signal or
+ condition_broadcast (or we might have just happened to get cancelled
+ right after waking up). */
+ cancel |= ss->cancel;
+ ss->cancel = 0;
+ spin_unlock (&ss->lock);
+
+ if (m)
+ /* Reacquire the mutex and return. */
+ mutex_lock (m);
+
+ return cancel;
+}
diff --git a/libthreads/cprocs.c b/libthreads/cprocs.c
new file mode 100644
index 00000000..fe7615cf
--- /dev/null
+++ b/libthreads/cprocs.c
@@ -0,0 +1,1246 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cprocs.c,v $
+ * Revision 1.11 2000/01/09 23:00:18 roland
+ * 2000-01-09 Roland McGrath <roland@baalperazim.frob.com>
+ *
+ * * cprocs.c (cproc_alloc): Initialize P->wired and P->msg here (code
+ * from cthread_wire).
+ * (cthread_wire): Reduce to just an assert, cthreads always wired.
+ * (chtread_unwire): Abort if called.
+ *
+ * Revision 1.10 1998/07/20 06:59:14 roland
+ * 1998-07-20 Roland McGrath <roland@baalperazim.frob.com>
+ *
+ * * i386/csw.S (cproc_prepare): Take address of cthread_body as third
+ * arg, so we don't have to deal with PIC magic to find its address
+ * without producing a text reloc.
+ * * cprocs.c (cproc_create): Pass &cthread_body to cproc_prepare.
+ *
+ * Revision 1.9 1996/11/18 23:54:51 thomas
+ * Mon Nov 18 16:36:56 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+ *
+ * * cprocs.c (cproc_create): Cast CHILD in assignment.
+ *
+ * Revision 1.8 1995/12/06 19:48:34 mib
+ * (condition_unimplies): Take address of (*impp)->next in assignment to
+ * IMPP on loop step instruction.
+ *
+ * Revision 1.7 1995/09/22 17:51:10 roland
+ * Include hurd/threadvar.h.
+ *
+ * Revision 1.6 1995/08/30 15:57:47 mib
+ * Repair typos.
+ *
+ * Revision 1.5 1995/08/30 15:50:53 mib
+ * (cond_signal): If this condition has implications, see if one of them
+ * needs to be signalled when we have no waiters.
+ * (cond_broadcast): Signal the implications list too.
+ * (condition_implies, condition_unimplies): New functions.
+ *
+ * Revision 1.4 1995/04/04 21:04:29 roland
+ * (mutex_lock_solid, mutex_unlock_solid): Renamed to __*.
+ * (_cthread_mutex_lock_routine, _cthread_mutex_unlock_routine): Variables
+ * removed.
+ *
+ * Revision 1.3 1994/05/19 04:55:30 roland
+ * entered into RCS
+ *
+ * Revision 2.15 92/03/06 14:09:31 rpd
+ * Replaced swtch_pri with yield.
+ * [92/03/06 rpd]
+ *
+ * Revision 2.14 91/08/28 11:19:16 jsb
+ * Fixed the loop in cproc_fork_child that frees cprocs.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.13 91/07/31 18:33:04 dbg
+ * Fix some more bad types. Ints are NOT pointers.
+ *
+ * Fix argument type mismatch in cproc_create.
+ * [91/07/30 17:32:59 dbg]
+ *
+ * Revision 2.12 91/05/14 17:56:11 mrt
+ * Correcting copyright
+ *
+ * Revision 2.11 91/02/14 14:19:26 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:40:50 mrt]
+ *
+ * Revision 2.10 90/11/05 14:36:41 rpd
+ * Added cproc_fork_{prepare,parent,child}.
+ * [90/11/02 rwd]
+ *
+ * Fix for positive stack growth.
+ * [90/11/01 rwd]
+ *
+ * Add spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.9 90/10/12 13:07:12 rpd
+ * Fix type
+ * [90/10/10 15:09:59 rwd]
+ *
+ * Comment code.
+ * [90/10/02 rwd]
+ *
+ * Revision 2.8 90/09/09 14:34:44 rpd
+ * Remove special mutex. Remove thread_calls and debug_mutex
+ * [90/08/24 rwd]
+ * Fix up old call to cthread_msg_busy to new format.
+ * [90/08/22 rwd]
+ *
+ * Revision 2.7 90/08/06 15:09:17 rwd
+ * Fixed arguments to cthread_mach_msg.
+ * [90/06/26 rwd]
+ * Add additional STATISTICS.
+ * [90/06/07 rwd]
+ *
+ * Attempt to reduce number of times a cthread is released to to a
+ * msg_receive by adding min/max instead of single number to
+ * cthread_msg calls.
+ * [90/06/06 rwd]
+ *
+ * Revision 2.6 90/06/02 15:13:36 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:46:16 rpd]
+ *
+ * Revision 2.5 90/05/29 18:40:11 rwd
+ * Don't incr special field until the mutex grab is successful.
+ * [90/05/09 rwd]
+ *
+ * Revision 2.4 90/03/14 21:12:02 rwd
+ * Added WAIT_DEBUG code for deadlock debugging.
+ * [90/03/01 rwd]
+ * Insert cprocs in cproc_list as allocated.
+ * [90/03/01 10:20:16 rwd]
+ *
+ * Revision 2.3 90/01/19 14:36:57 rwd
+ * Make cthread_msg_busy only release new thread if this is still
+ * busy. Ie don't release two on back to back calls.
+ * [90/01/11 rwd]
+ * Add THREAD_CALL code. Add CPROC_ARUN state.
+ * [90/01/03 rwd]
+ * Add new cthread_msg_rpc call
+ * [89/12/20 rwd]
+ * Change cproc_self pointer to top of stack. Now need to change
+ * the stack of the first thread.
+ * [89/12/12 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:13 rwd
+ * Added CPROC_CONDWAIT state to deal with lock held
+ * across mutex_unlock problem.
+ * [89/11/29 rwd]
+ * Changed mutexes to not hand off. MUTEX_EXTRA conditional is
+ * now obsolete.
+ * [89/11/27 rwd]
+ *
+ * Add MUTEX_EXTRA code for extra kernel threads to serve special
+ * mutexes in time of need.
+ * [89/11/25 rwd]
+ * Add MUTEX_SPECIAL and DEBUG_MUTEX code
+ * [89/11/24 rwd]
+ * Changed mutex_lock to mutex_lock_solid. Mutex_lock is now a
+ * macro which tries the spin_lock before making a subroutine call.
+ * Mutex_unlock is now a macro with mutex_unlock_solid for worst case.
+ * [89/11/13 rwd]
+ *
+ * Rewrite most to merge coroutine and thread implementation.
+ * New routines are cthread_set_kernel_limit, cthread_kernel_limit,
+ * cthread_wire, cthread_unwire, and cthread_receive.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:07:10 rwd
+ * Created.
+ *
+ * 11-Apr-89 David Golub (dbg) at Carnegie-Mellon University
+ * Made condition_yield loop break if swtch_pri returns TRUE (in
+ * case we fix it).
+ *
+ * 31-Mar-89 David Golub (dbg) at Carnegie-Mellon University
+ * Change cond_signal, cond_broadcast, and cproc_continue so that
+ * the condition's spin lock is not held while continuing the
+ * process.
+ *
+ * 16-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Changes for stand-alone library to run on pure kernel:
+ * . made IPC_WAIT standard, as calls that are used if IPC_WAIT == 0
+ * vanished a year ago.
+ * . Removed (as much as possible) references to stdio or other U*X
+ * features.
+ *
+ *
+ * 01-Apr-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed condition_clear(c) to acquire c->lock,
+ * to serialize after any threads still doing condition_signal(c).
+ * Suggested by Dan Julin.
+ *
+ * 19-Feb-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Extended the inline scripts to handle spin_unlock() and mutex_unlock().
+ *
+ * 28-Jan-88 David Golub (dbg) at Carnegie Mellon University
+ * Removed thread_data argument from thread_create
+ * and converted to new thread_set_state call.
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added inline expansion for cthread_sp() function.
+ *
+ * 21-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Fixed uninitialized reply_port in cproc_alloc() (found by rds).
+ *
+ * 14-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Tried using return value of swtch() to guide condition_wait().
+ * Performance was worse than using a hybrid spin/yield/block
+ * scheme, so the version using swtch() was commented out.
+ * Disabled IPC_WAIT in released version.
+ *
+ * 13-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added IPC_WAIT option.
+ * If defined, thread synchronization (condition_wait() and
+ * cproc_continue()) are implemented using msg_receive() and
+ * msg_send() instead of thread_suspend() and thread_resume().
+ *
+ * 11-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Moved thread reply port to cproc structure in cthread_internals.h,
+ * because mig calls are made while cproc is idle (no cthread structure).
+ * Changed cproc_switch() and cproc_start (COROUTINE implementation)
+ * to use address of saved context, rather than address of enclosing cproc,
+ * to eliminate dependency on cproc layout.
+ */
+/*
+ * File: cprocs.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Aug, 1987
+ *
+ * Implementation of cprocs (lightweight processes)
+ * and primitive synchronization operations.
+ */
+
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+#include <mach/message.h>
+#include <hurd/threadvar.h> /* GNU */
+#include <assert.h>
+
+/*
+ * C Threads imports:
+ */
+extern void alloc_stack();
+extern void cproc_switch(); /* cproc context switch */
+extern void cproc_start_wait(); /* cproc idle thread */
+extern vm_offset_t cproc_stack_base(); /* return start of stack */
+extern vm_offset_t stack_init();
+
+/*
+ * Port_entry's are used by cthread_mach_msg to store information
+ * about each port/port_set for which it is managing threads
+ */
+
+typedef struct port_entry {
+ struct port_entry *next; /* next port_entry */
+ mach_port_t port; /* which port/port_set */
+ struct cthread_queue queue; /* queue of runnable threads for
+ this port/port_set */
+ int min; /* minimum number of kernel threads
+ to be used by this port/port_set */
+ int max; /* maximum number of kernel threads
+ to be used by this port/port_set */
+ int held; /* actual number of kernel threads
+ currentlt in use */
+ spin_lock_t lock; /* lock governing all above fields */
+} *port_entry_t;
+
+#define PORT_ENTRY_NULL ((port_entry_t) 0)
+
+/* Available to outside for statistics */
+
+int cthread_wait_stack_size = 8192; /* stack size for idle threads */
+int cthread_max_kernel_threads = 0; /* max kernel threads */
+int cthread_kernel_threads = 0; /* current kernel threads */
+private spin_lock_t n_kern_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+#ifdef STATISTICS
+int cthread_ready = 0; /* currently runnable */
+int cthread_running = 1; /* currently running */
+int cthread_waiting = 0; /* currently waiting */
+int cthread_wired = 0; /* currently wired */
+private spin_lock_t wired_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+int cthread_wait_stacks = 0; /* total cthread waiting stacks */
+int cthread_waiters = 0; /* total of watiers */
+int cthread_wakeup = 0; /* total times woken when starting to
+ block */
+int cthread_blocked = 0; /* total blocked */
+int cthread_rnone = 0; /* total times no cthread available
+ to meet minimum for port_entry */
+int cthread_yields = 0; /* total cthread_yields */
+int cthread_none = 0; /* total idle wakeups w/o runnable */
+int cthread_switches = 0; /* total number of cproc_switches */
+int cthread_no_mutex = 0; /* total number times woken to get
+ mutex and couldn't */
+private spin_lock_t mutex_count_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+#endif STATISTICS
+
+cproc_t cproc_list = NO_CPROC; /* list of all cprocs */
+private cproc_list_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above */
+private int cprocs_started = FALSE; /* initialized? */
+private struct cthread_queue ready = QUEUE_INITIALIZER;
+ /* ready queue */
+private int ready_count = 0; /* number of ready threads on ready
+ queue - number of messages sent */
+private spin_lock_t ready_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+private mach_port_t wait_port = MACH_PORT_NULL;
+ /* port on which idle threads wait */
+private int wait_count = 0; /* number of waiters - messages pending
+ to wake them */
+private struct cthread_queue waiters = QUEUE_INITIALIZER;
+ /* queue of cthreads to run as idle */
+private spin_lock_t waiters_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for 2 above */
+private port_entry_t port_list = PORT_ENTRY_NULL;
+ /* master list of port_entries */
+private spin_lock_t port_lock = SPIN_LOCK_INITIALIZER;
+ /* lock for above queue */
+private mach_msg_header_t wakeup_msg; /* prebuilt message used by idle
+ threads */
+
+/*
+ * Return current value for max kernel threads
+ * Note: 0 means no limit
+ */
+
+cthread_kernel_limit()
+{
+ return cthread_max_kernel_threads;
+}
+
+/*
+ * Set max number of kernel threads
+ * Note: This will not currently terminate existing threads
+ * over maximum.
+ */
+
+cthread_set_kernel_limit(n)
+ int n;
+{
+ cthread_max_kernel_threads = n;
+}
+
+/*
+ * Wire a cthread to its current kernel thread
+ */
+
+void cthread_wire()
+{
+ register cproc_t p = cproc_self();
+ kern_return_t r;
+
+ /* In GNU, we wire all threads on creation (in cproc_alloc). */
+ assert (p->wired != MACH_PORT_NULL);
+}
+
+/*
+ * Unwire a cthread. Deallocate its wait port.
+ */
+
+void cthread_unwire()
+{
+ register cproc_t p = cproc_self();
+
+ /* This is bad juju in GNU, where all cthreads must be wired. */
+ abort();
+#if 0
+ if (p->wired != MACH_PORT_NULL) {
+ MACH_CALL(mach_port_mod_refs(mach_task_self(), p->wired,
+ MACH_PORT_RIGHT_SEND, -1), r);
+ MACH_CALL(mach_port_mod_refs(mach_task_self(), p->wired,
+ MACH_PORT_RIGHT_RECEIVE, -1), r);
+ p->wired = MACH_PORT_NULL;
+#ifdef STATISTICS
+ spin_lock(&wired_lock);
+ cthread_wired--;
+ spin_unlock(&wired_lock);
+#endif STATISTICS
+ }
+#endif
+}
+
+private cproc_t
+cproc_alloc()
+{
+ register cproc_t p = (cproc_t) malloc(sizeof(struct cproc));
+ kern_return_t r;
+
+ p->incarnation = NO_CTHREAD;
+#if 0
+ /* This member is not used in GNU. */
+ p->reply_port = MACH_PORT_NULL;
+#endif
+
+ spin_lock_init(&p->lock);
+ p->state = CPROC_RUNNING;
+ p->busy = 0;
+
+ /*
+ * In GNU, every cthread must be wired. So we just
+ * initialize P->wired on creation.
+ *
+ * A wired thread has a port associated with it for all
+ * of its wait/block cases. We also prebuild a wakeup
+ * message.
+ */
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &p->wired), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ p->wired, p->wired,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+ p->msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ p->msg.msgh_size = 0; /* initialized in call */
+ p->msg.msgh_remote_port = p->wired;
+ p->msg.msgh_local_port = MACH_PORT_NULL;
+ p->msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ p->msg.msgh_id = 0;
+
+ spin_lock(&cproc_list_lock);
+ p->list = cproc_list;
+ cproc_list = p;
+ spin_unlock(&cproc_list_lock);
+
+ return p;
+}
+
+/*
+ * Called by cthread_init to set up initial data structures.
+ */
+
+vm_offset_t
+cproc_init()
+{
+ kern_return_t r;
+
+ cproc_t p = cproc_alloc();
+
+ cthread_kernel_threads = 1;
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &wait_port), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ wait_port, wait_port,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+
+ wakeup_msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ wakeup_msg.msgh_size = 0; /* initialized in call */
+ wakeup_msg.msgh_remote_port = wait_port;
+ wakeup_msg.msgh_local_port = MACH_PORT_NULL;
+ wakeup_msg.msgh_kind = MACH_MSGH_KIND_NORMAL;
+ wakeup_msg.msgh_id = 0;
+
+ cprocs_started = TRUE;
+
+
+ /*
+ * We pass back the new stack which should be switched to
+ * by crt0. This guarantess correct size and alignment.
+ */
+ return (stack_init(p));
+}
+
+/*
+ * Insert cproc on ready queue. Make sure it is ready for queue by
+ * synching on its lock. Just send message to wired cproc.
+ */
+
+private int cproc_ready(p, preq)
+ register cproc_t p;
+ register int preq;
+{
+ register cproc_t s=cproc_self();
+ kern_return_t r;
+
+ if (p->wired != MACH_PORT_NULL) {
+ r = mach_msg(&p->msg, MACH_SEND_MSG,
+ sizeof p->msg, 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+#ifdef CHECK_STATUS
+ if (r != MACH_MSG_SUCCESS) {
+ mach_error("mach_msg", r);
+ exit(1);
+ }
+#endif CHECK_STATUS
+ return TRUE;
+ }
+ spin_lock(&p->lock); /* is it ready to be queued? It
+ can appear on a queue before
+ being switched from. This lock
+ is released by cproc_switch as
+ its last operation. */
+ if (p->state & CPROC_SWITCHING) {
+ /*
+ * We caught it early on. Just set to RUNNING
+ * and we will save a lot of time.
+ */
+ p->state = (p->state & ~CPROC_SWITCHING) | CPROC_RUNNING;
+ spin_unlock(&p->lock);
+ return TRUE;
+ }
+ spin_unlock(&p->lock);
+
+ spin_lock(&ready_lock);
+
+ if (preq) {
+ cthread_queue_preq(&ready, p);
+ } else {
+ cthread_queue_enq(&ready, p);
+ }
+#ifdef STATISTICS
+ cthread_ready++;
+#endif STATISTICS
+ ready_count++;
+
+ if ((s->state & CPROC_CONDWAIT) && !(s->wired)) {
+ /*
+ * This is an optimiztion. Don't bother waking anyone to grab
+ * this guy off the ready queue since my thread will block
+ * momentarily for the condition wait.
+ */
+
+ spin_unlock(&ready_lock);
+ return TRUE;
+ }
+
+ if ((ready_count > 0) && wait_count) {
+ wait_count--;
+ ready_count--;
+ spin_unlock(&ready_lock);
+ r = mach_msg(&wakeup_msg, MACH_SEND_MSG,
+ sizeof wakeup_msg, 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+#ifdef CHECK_STATUS
+ if (r != MACH_MSG_SUCCESS) {
+ mach_error("mach_msg", r);
+ exit(1);
+ }
+#endif CHECK_STATUS
+ return TRUE;
+ }
+ spin_unlock(&ready_lock);
+ return FALSE;
+}
+
+/*
+ * This is only run on a partial "waiting" stack and called from
+ * cproc_start_wait
+ */
+
+void
+cproc_waiting(p)
+ register cproc_t p;
+{
+ mach_msg_header_t msg;
+ register cproc_t new;
+ kern_return_t r;
+
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_waiting++;
+ cthread_waiters++;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ for (;;) {
+ MACH_CALL(mach_msg(&msg, MACH_RCV_MSG,
+ 0, sizeof msg, wait_port,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL), r);
+ spin_lock(&ready_lock);
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new != NO_CPROC) break;
+ wait_count++;
+ ready_count++;
+#ifdef STATISTICS
+ cthread_none++;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ }
+#ifdef STATISTICS
+ cthread_ready--;
+ cthread_running++;
+ cthread_waiting--;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ spin_lock(&new->lock);
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ spin_lock(&waiters_lock);
+ cthread_queue_enq(&waiters, p);
+ spin_lock(&p->lock);
+ spin_unlock(&waiters_lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+}
+
+/*
+ * Get a waiter with stack
+ *
+ */
+
+cproc_t
+cproc_waiter()
+{
+ register cproc_t waiter;
+
+ spin_lock(&waiters_lock);
+ cthread_queue_deq(&waiters, cproc_t, waiter);
+ spin_unlock(&waiters_lock);
+ if (waiter == NO_CPROC) {
+ vm_address_t base;
+ kern_return_t r;
+#ifdef STATISTICS
+ spin_lock(&waiters_lock);
+ cthread_wait_stacks++;
+ spin_unlock(&waiters_lock);
+#endif STATISTICS
+ waiter = cproc_alloc();
+ MACH_CALL(vm_allocate(mach_task_self(), &base,
+ cthread_wait_stack_size, TRUE), r);
+ waiter->stack_base = base;
+ waiter->stack_size = cthread_wait_stack_size;
+ }
+ return (waiter);
+}
+
+
+/*
+ * Current cproc is blocked so switch to any ready cprocs, or, if
+ * none, go into the wait state.
+ *
+ * You must hold cproc_self()->lock when called.
+ */
+
+cproc_block()
+{
+ extern unsigned int __hurd_threadvar_max; /* GNU */
+ register cproc_t waiter, new, p = cproc_self();
+ register int extra;
+
+ if (p->wired != MACH_PORT_NULL) {
+ mach_msg_header_t msg;
+ kern_return_t r;
+
+ spin_unlock(&p->lock);
+ MACH_CALL(mach_msg(&msg, MACH_RCV_MSG,
+ 0, sizeof msg, p->wired,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL), r);
+ return;
+ }
+ p->state = CPROC_SWITCHING;
+ spin_unlock(&p->lock);
+ spin_lock(&ready_lock);
+#ifdef STATISTICS
+ cthread_blocked++;
+#endif STATISTICS
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new) {
+#ifdef STATISTICS
+ cthread_ready--;
+ cthread_switches++;
+#endif STATISTICS
+ ready_count--;
+ spin_unlock(&ready_lock);
+ spin_lock(&p->lock);
+ if (p->state == CPROC_RUNNING) { /* have we been saved */
+ spin_unlock(&p->lock);
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_wakeup++;
+ cthread_switches--;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ cproc_ready(new, 1); /* requeue at head were it was */
+ } else {
+ p->state = CPROC_BLOCKED;
+ spin_lock(&new->lock); /* incase still switching */
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+ }
+ } else {
+ wait_count++;
+#ifdef STATISTICS
+ cthread_running--;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ waiter = cproc_waiter();
+ spin_lock(&p->lock);
+ if (p->state == CPROC_RUNNING) { /* we have been saved */
+ spin_unlock(&p->lock);
+ spin_lock(&ready_lock);
+ wait_count--;
+#ifdef STATISTICS
+ cthread_running++;
+ cthread_wakeup++;
+#endif STATISTICS
+ spin_unlock(&ready_lock);
+ spin_lock(&waiters_lock);
+ cthread_queue_preq(&waiters, waiter);
+ spin_unlock(&waiters_lock);
+ } else {
+ p->state = CPROC_BLOCKED;
+ spin_lock(&waiter->lock); /* in case still switching */
+ spin_unlock(&waiter->lock);
+ cproc_start_wait
+ (&p->context, waiter,
+ cproc_stack_base(waiter,
+ sizeof(ur_cthread_t *) +
+ /* Account for GNU per-thread
+ variables. */
+ __hurd_threadvar_max *
+ sizeof (long int)),
+ &p->lock);
+ }
+ }
+}
+
+/*
+ * Implement C threads using MACH threads.
+ */
+cproc_t
+cproc_create()
+{
+ register cproc_t child = cproc_alloc();
+ register kern_return_t r;
+ extern void cproc_setup();
+ extern void cproc_prepare();
+ extern void cthread_body();
+ thread_t n;
+
+ alloc_stack(child);
+ spin_lock(&n_kern_lock);
+ if (cthread_max_kernel_threads == 0 ||
+ cthread_kernel_threads < cthread_max_kernel_threads) {
+ cthread_kernel_threads++;
+ spin_unlock(&n_kern_lock);
+ MACH_CALL(thread_create(mach_task_self(), &n), r);
+ cproc_setup(child, n, cthread_body); /* machine dependent */
+ MACH_CALL(thread_resume(n), r);
+#ifdef STATISTICS
+ spin_lock(&ready_lock);
+ cthread_running++;
+ spin_unlock(&ready_lock);
+#endif STATISTICS
+ } else {
+ vm_offset_t stack;
+ spin_unlock(&n_kern_lock);
+ child->state = CPROC_BLOCKED;
+ /* The original CMU code does the excessively clever
+ optimization of putting CHILD at the base of the stack
+ and setting up to be the argument to cthread_body in the
+ same place (by passing zero as the second arg to
+ cproc_stack_base here).. This doesn't fly for GNU,
+ because we need some more space allocated at the base of
+ the stack, after the cproc_self pointer (where CHILD is
+ stored). */
+ stack = cproc_stack_base(child,
+ sizeof(ur_cthread_t *) +
+ /* Account for GNU per-thread
+ variables. */
+ __hurd_threadvar_max *
+ sizeof (long int));
+ cproc_prepare(child, &child->context, stack, &cthread_body);
+ /* Set up the cproc_self ptr at the base of CHILD's stack. */
+ ur_cthread_ptr(stack) = (ur_cthread_t) child;
+ cproc_ready(child,0);
+ }
+ return child;
+}
+
+void
+condition_wait(c, m)
+ register condition_t c;
+ mutex_t m;
+{
+ register cproc_t p = cproc_self();
+
+ p->state = CPROC_CONDWAIT | CPROC_SWITCHING;
+
+ spin_lock(&c->lock);
+ cthread_queue_enq(&c->queue, p);
+ spin_unlock(&c->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)c;
+#endif WAIT_DEBUG
+
+ mutex_unlock(m);
+
+ spin_lock(&p->lock);
+ if (p->state & CPROC_SWITCHING) {
+ cproc_block();
+ } else {
+ p->state = CPROC_RUNNING;
+ spin_unlock(&p->lock);
+ }
+
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+
+ /*
+ * Re-acquire the mutex and return.
+ */
+ mutex_lock(m);
+}
+
+/* Declare that IMPLICATOR should consider IMPLICATAND's waiter queue
+ to be an extension of its own queue. It is an error for either
+ condition to be deallocated as long as the implication persists. */
+void
+condition_implies (condition_t implicator, condition_t implicatand)
+{
+ struct cond_imp *imp;
+
+ imp = malloc (sizeof (struct cond_imp));
+ imp->implicatand = implicatand;
+ imp->next = implicator->implications;
+ implicator->implications = imp;
+}
+
+/* Declare that the implication relationship from IMPLICATOR to
+ IMPLICATAND should cease. */
+void
+condition_unimplies (condition_t implicator, condition_t implicatand)
+{
+ struct cond_imp **impp;
+
+ for (impp = &implicator->implications; *impp; impp = &(*impp)->next)
+ {
+ if ((*impp)->implicatand == implicatand)
+ {
+ struct cond_imp *tmp = *impp;
+ *impp = (*impp)->next;
+ free (tmp);
+ return;
+ }
+ }
+}
+
+/* Signal one waiter on C. If there were no waiters at all, return
+ 0, else return 1. */
+int
+cond_signal(c)
+ register condition_t c;
+{
+ register cproc_t p;
+ struct cond_imp *imp;
+
+ spin_lock(&c->lock);
+ cthread_queue_deq(&c->queue, cproc_t, p);
+ spin_unlock(&c->lock);
+ if (p != NO_CPROC) {
+ cproc_ready(p,0);
+ return 1;
+ }
+ else {
+ for (imp = c->implications; imp; imp = imp->next)
+ if (cond_signal (imp->implicatand))
+ return 1;
+ }
+ return 0;
+}
+
+void
+cond_broadcast(c)
+ register condition_t c;
+{
+ register cproc_t p;
+ struct cthread_queue blocked_queue;
+ struct cond_imp *imp;
+
+ cthread_queue_init(&blocked_queue);
+
+ spin_lock(&c->lock);
+ for (;;) {
+ register int old_state;
+
+ cthread_queue_deq(&c->queue, cproc_t, p);
+ if (p == NO_CPROC)
+ break;
+ cthread_queue_enq(&blocked_queue, p);
+ }
+ spin_unlock(&c->lock);
+
+ for(;;) {
+ cthread_queue_deq(&blocked_queue, cproc_t, p);
+ if (p == NO_CPROC)
+ break;
+ cproc_ready(p,0);
+ }
+
+ for (imp = c->implications; imp; imp = imp->next)
+ condition_broadcast (imp->implicatand);
+}
+
+void
+cthread_yield()
+{
+ register cproc_t new, p = cproc_self();
+
+ if (p->wired != MACH_PORT_NULL) {
+ yield();
+ return;
+ }
+ spin_lock(&ready_lock);
+#ifdef STATISTICS
+ cthread_yields++;
+#endif STATISTICS
+ cthread_queue_deq(&ready, cproc_t, new);
+ if (new) {
+ cthread_queue_enq(&ready, p);
+ spin_lock(&p->lock);
+ p->state = CPROC_BLOCKED;
+ spin_unlock(&ready_lock);
+ spin_lock(&new->lock);
+ new->state = CPROC_RUNNING;
+ spin_unlock(&new->lock);
+ cproc_switch(&p->context,&new->context,&p->lock);
+ } else {
+ spin_unlock(&ready_lock);
+ yield();
+ }
+}
+
+/*
+ * Mutex objects.
+ */
+
+void
+__mutex_lock_solid(void *ptr)
+{
+ register mutex_t m = ptr;
+ register cproc_t p = cproc_self();
+ register int queued;
+ register int tried = 0;
+
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)m;
+#endif WAIT_DEBUG
+ while (1) {
+ spin_lock(&m->lock);
+ if (cthread_queue_head(&m->queue, cproc_t) == NO_CPROC) {
+ cthread_queue_enq(&m->queue, p);
+ queued = 1;
+ } else {
+ queued = 0;
+ }
+ if (spin_try_lock(&m->held)) {
+ if (queued) cthread_queue_deq(&m->queue, cproc_t, p);
+ spin_unlock(&m->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ return;
+ } else {
+ if (!queued) cthread_queue_enq(&m->queue, p);
+ spin_lock(&p->lock);
+ spin_unlock(&m->lock);
+ cproc_block();
+ if (spin_try_lock(&m->held)) {
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ return;
+ }
+#ifdef STATISTICS
+ spin_lock(&mutex_count_lock);
+ cthread_no_mutex++;
+ spin_unlock(&mutex_count_lock);
+#endif STATISTICS
+ }
+ }
+}
+
+void
+__mutex_unlock_solid(void *ptr)
+{
+ register mutex_t m = ptr;
+ register cproc_t new;
+
+ if (!spin_try_lock(&m->held))
+ return;
+ spin_lock(&m->lock);
+ cthread_queue_deq(&m->queue, cproc_t, new);
+ spin_unlock(&m->held);
+ spin_unlock(&m->lock);
+ if (new) {
+ cproc_ready(new,0);
+ }
+}
+
+
+/*
+ * Use instead of mach_msg in a multi-threaded server so as not
+ * to tie up excessive kernel threads. This uses a simple linked list for
+ * ports since this should never be more than a few.
+ */
+
+/*
+ * A cthread holds a reference to a port_entry even after it receives a
+ * message. This reference is not released until the thread does a
+ * cthread_msg_busy. This allows the fast case of a single mach_msg
+ * call to occur as often as is possible.
+ */
+
+private port_entry_t get_port_entry(port, min, max)
+ mach_port_t port;
+{
+ register port_entry_t i;
+
+ spin_lock(&port_lock);
+ for(i=port_list;i!=PORT_ENTRY_NULL;i=i->next)
+ if (i->port == port) {
+ spin_unlock(&port_lock);
+ return i;
+ }
+ i = (port_entry_t)malloc(sizeof(struct port_entry));
+ cthread_queue_init(&i->queue);
+ i->port = port;
+ i->next = port_list;
+ port_list = i;
+ i->min = min;
+ i->max = max;
+ i->held = 0;
+ spin_lock_init(&i->lock);
+ spin_unlock(&port_lock);
+ return i;
+}
+
+cthread_msg_busy(port, min, max)
+ mach_port_t port;
+{
+ register port_entry_t port_entry;
+ register cproc_t new, p = cproc_self();
+
+ if (p->busy) {
+ port_entry = get_port_entry(port, min, max);
+ spin_lock(&port_entry->lock);
+ p->busy = 0;
+ if (port_entry->held <= port_entry->min) {
+ cthread_queue_deq(&port_entry->queue, cproc_t, new);
+ if (new != NO_CPROC){
+ spin_unlock(&port_entry->lock);
+ cproc_ready(new,0);
+ } else {
+ port_entry->held--;
+ spin_unlock(&port_entry->lock);
+#ifdef STATISTICS
+ spin_lock(&port_lock);
+ cthread_rnone++;
+ spin_unlock(&port_lock);
+#endif STATISTICS
+ }
+ } else {
+ port_entry->held--;
+ spin_unlock(&port_entry->lock);
+ }
+ }
+
+}
+
+cthread_msg_active(port, min, max)
+mach_port_t port;
+{
+ register cproc_t p = cproc_self();
+ register port_entry_t port_entry;
+
+ if (!p->busy) {
+ port_entry = get_port_entry(port, min, max);
+ if (port_entry == 0) return;
+ spin_lock(&port_entry->lock);
+ if (port_entry->held < port_entry->max) {
+ port_entry->held++;
+ p->busy = (int)port_entry;
+ }
+ spin_unlock(&port_entry->lock);
+ }
+}
+
+mach_msg_return_t
+cthread_mach_msg(header, option,
+ send_size, rcv_size, rcv_name,
+ timeout, notify, min, max)
+ register mach_msg_header_t *header;
+ register mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size;
+ register mach_port_t rcv_name;
+ mach_msg_timeout_t timeout;
+ mach_port_t notify;
+ int min, max;
+{
+ register port_entry_t port_entry;
+ register cproc_t p = cproc_self();
+ register int sent=0;
+ mach_msg_return_t r;
+ port_entry_t op = (port_entry_t)p->busy;
+
+ port_entry = get_port_entry(rcv_name, min, max);
+
+ if (op && (port_entry_t)op != port_entry)
+ cthread_msg_busy(op->port, op->min, op->max);
+ spin_lock(&port_entry->lock);
+ if (!(port_entry == (port_entry_t)p->busy)) {
+ if (port_entry->held >= max) {
+ if (option & MACH_SEND_MSG) {
+ spin_unlock(&port_entry->lock);
+ r = mach_msg(header, option &~ MACH_RCV_MSG,
+ send_size, 0, MACH_PORT_NULL,
+ timeout, notify);
+ if (r != MACH_MSG_SUCCESS) return r;
+ spin_lock(&port_entry->lock);
+ sent=1;
+ }
+ if (port_entry->held >= max) {
+ spin_lock(&p->lock);
+ cthread_queue_preq(&port_entry->queue, p);
+ spin_unlock(&port_entry->lock);
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)port_entry;
+#endif WAIT_DEBUG
+ cproc_block();
+ } else {
+ port_entry->held++;
+ spin_unlock(&port_entry->lock);
+ }
+ } else {
+ port_entry->held++;
+ spin_unlock(&port_entry->lock);
+ }
+ } else {
+ spin_unlock(&port_entry->lock);
+ }
+#ifdef WAIT_DEBUG
+ p->waiting_for = (char *)0;
+#endif WAIT_DEBUG
+ p->busy = (int)port_entry;
+ if ((option & MACH_SEND_MSG) && !sent) {
+ r = mach_msg(header, option,
+ send_size, rcv_size, rcv_name,
+ timeout, notify);
+ } else {
+ r = mach_msg(header, option &~ MACH_SEND_MSG,
+ 0, rcv_size, rcv_name,
+ timeout, notify);
+ }
+ return r;
+}
+
+cproc_fork_prepare()
+{
+ register cproc_t p = cproc_self();
+
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_COPY);
+ spin_lock(&port_lock);
+ spin_lock(&cproc_list_lock);
+}
+
+cproc_fork_parent()
+{
+ register cproc_t p = cproc_self();
+
+ spin_unlock(&cproc_list_lock);
+ spin_unlock(&port_lock);
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_NONE);
+}
+
+cproc_fork_child()
+{
+ register cproc_t l,p = cproc_self();
+ cproc_t m;
+ register port_entry_t pe;
+ port_entry_t pet;
+ kern_return_t r;
+
+
+ vm_inherit(mach_task_self(),p->stack_base, p->stack_size, VM_INHERIT_NONE);
+ spin_lock_init(&n_kern_lock);
+ cthread_kernel_threads=0;
+#ifdef STATISTICS
+ cthread_ready = 0;
+ cthread_running = 1;
+ cthread_waiting = 0;
+ cthread_wired = 0;
+ spin_lock_init(&wired_lock);
+ cthread_wait_stacks = 0;
+ cthread_waiters = 0;
+ cthread_wakeup = 0;
+ cthread_blocked = 0;
+ cthread_rnone = 0;
+ cthread_yields = 0;
+ cthread_none = 0;
+ cthread_switches = 0;
+ cthread_no_mutex = 0;
+ spin_lock_init(&mutex_count_lock);
+#endif STATISTICS
+
+ for(l=cproc_list;l!=NO_CPROC;l=m) {
+ m=l->next;
+ if (l!=p)
+ free(l);
+ }
+
+ cproc_list = p;
+ p->next = NO_CPROC;
+ spin_lock_init(&cproc_list_lock);
+ cprocs_started = FALSE;
+ cthread_queue_init(&ready);
+ ready_count = 0;
+ spin_lock_init(&ready_lock);
+
+ MACH_CALL(mach_port_allocate(mach_task_self(),
+ MACH_PORT_RIGHT_RECEIVE,
+ &wait_port), r);
+ MACH_CALL(mach_port_insert_right(mach_task_self(),
+ wait_port, wait_port,
+ MACH_MSG_TYPE_MAKE_SEND), r);
+ wakeup_msg.msgh_remote_port = wait_port;
+ wait_count = 0;
+ cthread_queue_init(&waiters);
+ spin_lock_init(&waiters_lock);
+ for(pe=port_list;pe!=PORT_ENTRY_NULL;pe=pet) {
+ pet = pe->next;
+ free(pe);
+ }
+ port_list = PORT_ENTRY_NULL;
+ spin_lock_init(&port_lock);
+
+ if (p->wired) cthread_wire();
+}
diff --git a/libthreads/cthread_data.c b/libthreads/cthread_data.c
new file mode 100644
index 00000000..0814130d
--- /dev/null
+++ b/libthreads/cthread_data.c
@@ -0,0 +1,188 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991 Carnegie-Mellon University
+ * All rights reserved. The CMU software License Agreement specifies
+ * the terms and conditions for use and redistribution.
+ */
+/*
+ * HISTORY
+ * $Log: cthread_data.c,v $
+ * Revision 2.2 92/05/23 11:35:17 jfriedl
+ * Snarfed from multi-server sources at CMU.
+ * No stdio (for use with single-server).
+ *
+ *
+ * Revision 2.2 91/03/25 14:14:45 jjc
+ * For compatibility with cthread_data:
+ * 1) Added routines, cthread_data and cthread_set_data,
+ * which use the new routines in here.
+ * 2) Defined CTHREAD_KEY_RESERVED as the key used to
+ * access cthread_data.
+ * 3) Changed the first free key from CTHREAD_KEY_NULL
+ * to CTHREAD_KEY_FIRST.
+ * [91/03/18 jjc]
+ * Made simple implementation from POSIX threads specification for
+ * thread specific data.
+ * [91/03/07 jjc]
+ *
+ */
+#include <cthreads.h>
+
+
+#ifdef CTHREAD_DATA
+#define CTHREAD_KEY_FIRST (cthread_key_t)1 /* first free key */
+#else CTHREAD_DATA
+#define CTHREAD_KEY_FIRST CTHREAD_KEY_NULL /* first free key */
+#endif CTHREAD_DATA
+#define CTHREAD_KEY_MAX (cthread_key_t)8 /* max. no. of keys */
+#define CTHREAD_KEY_NULL (cthread_key_t)0
+
+#ifdef CTHREAD_DATA
+/*
+ * Key reserved for cthread_data
+ */
+#define CTHREAD_KEY_RESERVED CTHREAD_KEY_NULL
+#endif CTHREAD_DATA
+
+
+/* lock protecting key creation */
+struct mutex cthread_data_lock = MUTEX_INITIALIZER;
+
+/* next free key */
+cthread_key_t cthread_key = CTHREAD_KEY_FIRST;
+
+
+/*
+ * Create key to private data visible to all threads in task.
+ * Different threads may use same key, but the values bound to the key are
+ * maintained on a thread specific basis.
+ * Returns 0 if successful and returns -1 otherwise.
+ */
+cthread_keycreate(key)
+cthread_key_t *key;
+{
+ if (cthread_key >= CTHREAD_KEY_FIRST && cthread_key < CTHREAD_KEY_MAX) {
+ mutex_lock((mutex_t)&cthread_data_lock);
+ *key = cthread_key++;
+ mutex_unlock((mutex_t)&cthread_data_lock);
+ return(0);
+ }
+ else { /* out of keys */
+ *key = CTHREAD_KEY_INVALID;
+ return(-1);
+ }
+}
+
+
+/*
+ * Get private data associated with given key
+ * Returns 0 if successful and returns -1 if the key is invalid.
+ * If the calling thread doesn't have a value for the given key,
+ * the value returned is CTHREAD_DATA_VALUE_NULL.
+ */
+cthread_getspecific(key, value)
+cthread_key_t key;
+any_t *value;
+{
+ register cthread_t self;
+ register any_t *thread_data;
+
+ *value = CTHREAD_DATA_VALUE_NULL;
+ if (key < CTHREAD_KEY_NULL || key >= cthread_key)
+ return(-1);
+
+ self = cthread_self();
+ thread_data = (any_t *)(self->private_data);
+ if (thread_data != (any_t *)0)
+ *value = thread_data[key];
+
+ return(0);
+}
+
+
+/*
+ * Set private data associated with given key
+ * Returns 0 if successful and returns -1 otherwise.
+ */
+cthread_setspecific(key, value)
+cthread_key_t key;
+any_t value;
+{
+ register int i;
+ register cthread_t self;
+ register any_t *thread_data;
+
+ if (key < CTHREAD_KEY_NULL || key >= cthread_key)
+ return(-1);
+
+ self = cthread_self();
+ thread_data = (any_t *)(self->private_data);
+ if (thread_data != (any_t *)0)
+ thread_data[key] = value;
+ else {
+ /*
+ * Allocate and initialize thread data table,
+ * point cthread_data at it, and then set the
+ * data for the given key with the given value.
+ */
+ thread_data = (any_t *)malloc(CTHREAD_KEY_MAX * sizeof(any_t));
+ if (thread_data == (any_t *)0) {
+ printf("cthread_setspecific: malloc failed\n");
+ return(-1);
+ }
+ self->private_data = (any_t)thread_data;
+
+ for (i = 0; i < CTHREAD_KEY_MAX; i++)
+ thread_data[i] = CTHREAD_DATA_VALUE_NULL;
+
+ thread_data[key] = value;
+ }
+ return(0);
+}
+
+
+#ifdef CTHREAD_DATA
+/*
+ * Set thread specific "global" variable,
+ * using new POSIX routines.
+ * Crash and burn if the thread given isn't the calling thread.
+ * XXX For compatibility with old cthread_set_data() XXX
+ */
+cthread_set_data(t, x)
+cthread_t t;
+any_t x;
+{
+ register cthread_t self;
+
+ self = cthread_self();
+ if (t == self)
+ return(cthread_setspecific(CTHREAD_KEY_RESERVED, x));
+ else {
+ ASSERT(t == self);
+ }
+}
+
+
+/*
+ * Get thread specific "global" variable,
+ * using new POSIX routines.
+ * Crash and burn if the thread given isn't the calling thread.
+ * XXX For compatibility with old cthread_data() XXX
+ */
+any_t
+cthread_data(t)
+cthread_t t;
+{
+ register cthread_t self;
+ any_t value;
+
+ self = cthread_self();
+ if (t == self) {
+ (void)cthread_getspecific(CTHREAD_KEY_RESERVED, &value);
+ return(value);
+ }
+ else {
+ ASSERT(t == self);
+ }
+}
+#endif CTHREAD_DATA
diff --git a/libthreads/cthread_internals.h b/libthreads/cthread_internals.h
new file mode 100644
index 00000000..c3aa8050
--- /dev/null
+++ b/libthreads/cthread_internals.h
@@ -0,0 +1,209 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthread_internals.h,v $
+ * Revision 1.2 1994/05/05 10:58:01 roland
+ * entered into RCS
+ *
+ * Revision 2.14 92/08/03 18:03:56 jfriedl
+ * Made state element of struct cproc volatile.
+ * [92/08/02 jfriedl]
+ *
+ * Revision 2.13 92/03/06 14:09:24 rpd
+ * Added yield, defined using thread_switch.
+ * [92/03/06 rpd]
+ *
+ * Revision 2.12 92/03/01 00:40:23 rpd
+ * Removed exit declaration. It conflicted with the real thing.
+ * [92/02/29 rpd]
+ *
+ * Revision 2.11 91/08/28 11:19:23 jsb
+ * Fixed MACH_CALL to allow multi-line expressions.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.10 91/07/31 18:33:33 dbg
+ * Protect against redefinition of ASSERT.
+ * [91/07/30 17:33:21 dbg]
+ *
+ * Revision 2.9 91/05/14 17:56:24 mrt
+ * Correcting copyright
+ *
+ * Revision 2.8 91/02/14 14:19:42 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:02 mrt]
+ *
+ * Revision 2.7 90/11/05 14:36:55 rpd
+ * Added spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.6 90/09/09 14:34:51 rpd
+ * Remove special field.
+ * [90/08/24 rwd]
+ *
+ * Revision 2.5 90/06/02 15:13:44 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:52:47 rpd]
+ *
+ * Revision 2.4 90/03/14 21:12:11 rwd
+ * Added waiting_for field for debugging deadlocks.
+ * [90/03/01 rwd]
+ * Added list field to keep a master list of all cprocs.
+ * [90/03/01 rwd]
+ *
+ * Revision 2.3 90/01/19 14:37:08 rwd
+ * Keep track of real thread for use in thread_* substitutes.
+ * Add CPROC_ARUN for about to run and CPROC_HOLD to avoid holding
+ * spin_locks over system calls.
+ * [90/01/03 rwd]
+ * Add busy field to be used by cthread_msg calls to make sure we
+ * have the right number of blocked kernel threads.
+ * [89/12/21 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:28 rwd
+ * Added CPROC_CONDWAIT state
+ * [89/11/28 rwd]
+ * Added on_special field.
+ * [89/11/26 rwd]
+ * Removed MSGOPT conditionals
+ * [89/11/25 rwd]
+ * Removed old debugging code. Add wired port/flag. Add state
+ * for small state machine.
+ * [89/10/30 rwd]
+ * Added CPDEBUG code
+ * [89/10/26 rwd]
+ * Change TRACE to {x;} else.
+ * [89/10/24 rwd]
+ * Rewrote to work for limited number of kernel threads. This is
+ * basically a merge of coroutine and thread. Added
+ * cthread_receivce call for use by servers.
+ * [89/10/23 rwd]
+ *
+ */
+/*
+ * cthread_internals.h
+ *
+ *
+ * Private definitions for the C Threads implementation.
+ *
+ * The cproc structure is used for different implementations
+ * of the basic schedulable units that execute cthreads.
+ *
+ */
+
+
+#include "options.h"
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/thread_switch.h>
+
+#if !defined(__STDC__) && !defined(volatile)
+# ifdef __GNUC__
+# define volatile __volatile__
+# else
+# define volatile /* you lose */
+# endif
+#endif
+
+/*
+ * Low-level thread implementation.
+ * This structure must agree with struct ur_cthread in cthreads.h
+ */
+typedef struct cproc {
+ struct cproc *next; /* for lock, condition, and ready queues */
+ cthread_t incarnation; /* for cthread_self() */
+
+ struct cproc *list; /* for master cproc list */
+#ifdef WAIT_DEBUG
+ volatile char *waiting_for; /* address of mutex/cond waiting for */
+#endif WAIT_DEBUG
+
+#if 0
+ /* This is not needed in GNU; libc handles it. */
+ mach_port_t reply_port; /* for mig_get_reply_port() */
+#endif
+
+ int context;
+ spin_lock_t lock;
+ volatile int state; /* current state */
+#define CPROC_RUNNING 0
+#define CPROC_SWITCHING 1
+#define CPROC_BLOCKED 2
+#define CPROC_CONDWAIT 4
+
+ mach_port_t wired; /* is cthread wired to kernel thread */
+ int busy; /* used with cthread_msg calls */
+
+ mach_msg_header_t msg;
+
+ unsigned int stack_base;
+ unsigned int stack_size;
+} *cproc_t;
+
+#define NO_CPROC ((cproc_t) 0)
+#define cproc_self() ((cproc_t) ur_cthread_self())
+
+int cproc_block ();
+
+#if 0
+/* This declaration conflicts with <stdlib.h> in GNU. */
+/*
+ * C Threads imports:
+ */
+extern char *malloc();
+#endif
+
+/*
+ * Mach imports:
+ */
+extern void mach_error();
+
+/*
+ * Macro for MACH kernel calls.
+ */
+#ifdef CHECK_STATUS
+#define MACH_CALL(expr, ret) \
+ if (((ret) = (expr)) != KERN_SUCCESS) { \
+ quit(1, "error in %s at %d: %s\n", __FILE__, __LINE__, \
+ mach_error_string(ret)); \
+ } else
+#else CHECK_STATUS
+#define MACH_CALL(expr, ret) (ret) = (expr)
+#endif CHECK_STATUS
+
+#define private static
+#ifndef ASSERT
+#define ASSERT(x)
+#endif
+#define TRACE(x)
+
+/*
+ * What we do to yield the processor:
+ * (This depresses the thread's priority for up to 10ms.)
+ */
+
+#define yield() \
+ (void) thread_switch(MACH_PORT_NULL, SWITCH_OPTION_DEPRESS, 10)
diff --git a/libthreads/cthreads.c b/libthreads/cthreads.c
new file mode 100644
index 00000000..9a3341da
--- /dev/null
+++ b/libthreads/cthreads.c
@@ -0,0 +1,481 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.c,v $
+ * Revision 1.8 1998/06/10 19:38:01 tb
+ * Tue Jun 9 13:50:09 1998 Thomas Bushnell, n/BSG <tb@mit.edu>
+ *
+ * * cthreads.c (cthread_fork_prepare): Don't call
+ * malloc_fork_prepare since we are no longer providing our own
+ * malloc in this library.
+ * (cthread_fork_parent): Likewise, for malloc_fork_parent.
+ * (cthread_fork_child): Likewize, for malloc_fork_child.
+ *
+ * Revision 1.7 1997/08/20 19:41:20 thomas
+ * Wed Aug 20 15:39:44 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+ *
+ * * cthreads.c (cthread_body): Wire self before calling user work
+ * function. This way all cthreads will be wired, which the ports
+ * library (and hurd_thread_cancel, etc.) depend on.
+ *
+ * Revision 1.6 1997/06/10 01:22:19 thomas
+ * Mon Jun 9 21:18:46 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+ *
+ * * cthreads.c (cthread_fork): Delete debugging oddity that crept
+ * into source.
+ *
+ * Revision 1.5 1997/04/04 01:30:35 thomas
+ * *** empty log message ***
+ *
+ * Revision 1.4 1994/05/05 18:13:57 roland
+ * entered into RCS
+ *
+ * Revision 2.11 92/07/20 13:33:37 cmaeda
+ * In cthread_init, do machine dependent initialization if it's defined.
+ * [92/05/11 14:41:08 cmaeda]
+ *
+ * Revision 2.10 91/08/28 11:19:26 jsb
+ * Fixed mig_init initialization in cthread_fork_child.
+ * [91/08/23 rpd]
+ *
+ * Revision 2.9 91/07/31 18:34:23 dbg
+ * Fix bad self-pointer reference.
+ *
+ * Don't declare _setjmp and _longjmp; they are included by
+ * cthreads.h.
+ * [91/07/30 17:33:50 dbg]
+ *
+ * Revision 2.8 91/05/14 17:56:31 mrt
+ * Correcting copyright
+ *
+ * Revision 2.7 91/02/14 14:19:47 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:07 mrt]
+ *
+ * Revision 2.6 90/11/05 14:37:03 rpd
+ * Added cthread_fork_{prepare,parent,child}.
+ * [90/11/02 rwd]
+ *
+ * Add spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.5 90/08/07 14:30:58 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.4 90/06/02 15:13:49 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:56:44 rpd]
+ *
+ * Revision 2.3 90/01/19 14:37:12 rwd
+ * Make cthread_init return pointer to new stack.
+ * [89/12/18 19:17:45 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:37 rwd
+ * Change cproc and cthread counters to globals with better names.
+ * [89/11/02 rwd]
+ *
+ * Revision 2.1 89/08/03 17:09:34 rwd
+ * Created.
+ *
+ *
+ * 31-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread_exit() logic for the case of the main thread,
+ * to fix thread and stack memory leak found by Camelot group.
+ *
+ * 21-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added consistency check in beginning of cthread_body().
+ *
+ * 11-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Removed cthread_port() and cthread_set_port().
+ * Removed port deallocation from cthread_free().
+ * Minor changes to cthread_body(), cthread_exit(), and cthread_done().
+ *
+ * 10-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed call to mig_init() in cthread_init() to pass 1 as argument.
+ *
+ * 31-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Added call to mig_init() from cthread_init().
+ */
+/*
+ * File: cthreads.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: July, 1987
+ *
+ * Implementation of fork, join, exit, etc.
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+/*
+ * C Threads imports:
+ */
+extern void cproc_create();
+extern vm_offset_t cproc_init();
+extern void mig_init();
+
+/*
+ * Mach imports:
+ */
+
+/*
+ * C library imports:
+ */
+
+/*
+ * Thread status bits.
+ */
+#define T_MAIN 0x1
+#define T_RETURNED 0x2
+#define T_DETACHED 0x4
+
+#ifdef DEBUG
+int cthread_debug = FALSE;
+#endif DEBUG
+
+private struct cthread_queue cthreads = QUEUE_INITIALIZER;
+private struct mutex cthread_lock = MUTEX_INITIALIZER;
+private struct condition cthread_needed = CONDITION_INITIALIZER;
+private struct condition cthread_idle = CONDITION_INITIALIZER;
+int cthread_cprocs = 0;
+int cthread_cthreads = 0;
+int cthread_max_cprocs = 0;
+
+private cthread_t free_cthreads = NO_CTHREAD; /* free list */
+private spin_lock_t free_lock = SPIN_LOCK_INITIALIZER; /* unlocked */
+
+private struct cthread initial_cthread = { 0 };
+
+private cthread_t
+cthread_alloc(func, arg)
+ cthread_fn_t func;
+ any_t arg;
+{
+ register cthread_t t = NO_CTHREAD;
+
+ if (free_cthreads != NO_CTHREAD) {
+ /*
+ * Don't try for the lock unless
+ * the list is likely to be nonempty.
+ * We can't be sure, though, until we lock it.
+ */
+ spin_lock(&free_lock);
+ t = free_cthreads;
+ if (t != NO_CTHREAD)
+ free_cthreads = t->next;
+ spin_unlock(&free_lock);
+ }
+ if (t == NO_CTHREAD) {
+ /*
+ * The free list was empty.
+ * We may have only found this out after
+ * locking it, which is why this isn't an
+ * "else" branch of the previous statement.
+ */
+ t = (cthread_t) malloc(sizeof(struct cthread));
+ }
+ *t = initial_cthread;
+ t->func = func;
+ t->arg = arg;
+ return t;
+}
+
+private void
+cthread_free(t)
+ register cthread_t t;
+{
+ spin_lock(&free_lock);
+ t->next = free_cthreads;
+ free_cthreads = t;
+ spin_unlock(&free_lock);
+}
+
+int
+cthread_init()
+{
+ static int cthreads_started = FALSE;
+ register cproc_t p;
+ register cthread_t t;
+ vm_offset_t stack;
+
+ if (cthreads_started)
+ return 0;
+ t = cthread_alloc((cthread_fn_t) 0, (any_t) 0);
+ stack = cproc_init();
+ cthread_cprocs = 1;
+
+#ifdef cthread_md_init
+ cthread_md_init();
+#endif
+
+ cthread_cthreads = 1;
+ t->state |= T_MAIN;
+ cthread_set_name(t, "main");
+
+ /* cproc_self() doesn't work yet, because
+ we haven't yet switched to the new stack. */
+
+ p = *(cproc_t *)&ur_cthread_ptr(stack);
+ p->incarnation = t;
+ /* The original CMU code passes P to mig_init. In GNU, mig_init
+ does not know about cproc_t; instead it expects to be passed the
+ stack pointer of the initial thread. */
+ mig_init((void *) stack); /* enable multi-threaded mig interfaces */
+
+ cthreads_started = TRUE;
+ return stack;
+}
+
+/*
+ * Used for automatic initialization by crt0.
+ * Cast needed since too many C compilers choke on the type void (*)().
+ */
+int (*_cthread_init_routine)() = (int (*)()) cthread_init;
+
+/*
+ * Procedure invoked at the base of each cthread.
+ */
+void
+cthread_body(self)
+ cproc_t self;
+{
+ register cthread_t t;
+
+ ASSERT(cproc_self() == self);
+ TRACE(printf("[idle] cthread_body(%x)\n", self));
+ mutex_lock(&cthread_lock);
+ for (;;) {
+ /*
+ * Dequeue a thread invocation request.
+ */
+ cthread_queue_deq(&cthreads, cthread_t, t);
+ if (t != NO_CTHREAD) {
+ /*
+ * We have a thread to execute.
+ */
+ mutex_unlock(&cthread_lock);
+ cthread_assoc(self, t); /* assume thread's identity */
+ if (_setjmp(t->catch) == 0) { /* catch for cthread_exit() */
+ cthread_wire ();
+ /*
+ * Execute the fork request.
+ */
+ t->result = (*(t->func))(t->arg);
+ }
+ /*
+ * Return result from thread.
+ */
+ TRACE(printf("[%s] done()\n", cthread_name(t)));
+ mutex_lock(&t->lock);
+ if (t->state & T_DETACHED) {
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ } else {
+ t->state |= T_RETURNED;
+ mutex_unlock(&t->lock);
+ condition_signal(&t->done);
+ }
+ cthread_assoc(self, NO_CTHREAD);
+ mutex_lock(&cthread_lock);
+ cthread_cthreads -= 1;
+ } else {
+ /*
+ * Queue is empty.
+ * Signal that we're idle in case the main thread
+ * is waiting to exit, then wait for reincarnation.
+ */
+ condition_signal(&cthread_idle);
+ condition_wait(&cthread_needed, &cthread_lock);
+ }
+ }
+}
+
+cthread_t
+cthread_fork(func, arg)
+ cthread_fn_t func;
+ any_t arg;
+{
+ register cthread_t t;
+
+ TRACE(printf("[%s] fork()\n", cthread_name(cthread_self())));
+ mutex_lock(&cthread_lock);
+ t = cthread_alloc(func, arg);
+ cthread_queue_enq(&cthreads, t);
+ if (++cthread_cthreads > cthread_cprocs && (cthread_max_cprocs == 0 || cthread_cprocs < cthread_max_cprocs)) {
+ cthread_cprocs += 1;
+ cproc_create();
+ }
+ mutex_unlock(&cthread_lock);
+ condition_signal(&cthread_needed);
+ return t;
+}
+
+void
+cthread_detach(t)
+ cthread_t t;
+{
+ TRACE(printf("[%s] detach(%s)\n", cthread_name(cthread_self()), cthread_name(t)));
+ mutex_lock(&t->lock);
+ if (t->state & T_RETURNED) {
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ } else {
+ t->state |= T_DETACHED;
+ mutex_unlock(&t->lock);
+ }
+}
+
+any_t
+cthread_join(t)
+ cthread_t t;
+{
+ any_t result;
+
+ TRACE(printf("[%s] join(%s)\n", cthread_name(cthread_self()), cthread_name(t)));
+ mutex_lock(&t->lock);
+ ASSERT(! (t->state & T_DETACHED));
+ while (! (t->state & T_RETURNED))
+ condition_wait(&t->done, &t->lock);
+ result = t->result;
+ mutex_unlock(&t->lock);
+ cthread_free(t);
+ return result;
+}
+
+void
+cthread_exit(result)
+ any_t result;
+{
+ register cthread_t t = cthread_self();
+
+ TRACE(printf("[%s] exit()\n", cthread_name(t)));
+ t->result = result;
+ if (t->state & T_MAIN) {
+ mutex_lock(&cthread_lock);
+ while (cthread_cthreads > 1)
+ condition_wait(&cthread_idle, &cthread_lock);
+ mutex_unlock(&cthread_lock);
+ exit((int) result);
+ } else {
+ _longjmp(t->catch, TRUE);
+ }
+}
+
+/*
+ * Used for automatic finalization by crt0. Cast needed since too many C
+ * compilers choke on the type void (*)().
+ */
+int (*_cthread_exit_routine)() = (int (*)()) cthread_exit;
+
+void
+cthread_set_name(t, name)
+ cthread_t t;
+ char *name;
+{
+ t->name = name;
+}
+
+char *
+cthread_name(t)
+ cthread_t t;
+{
+ return (t == NO_CTHREAD
+ ? "idle"
+ : (t->name == 0 ? "?" : t->name));
+}
+
+int
+cthread_limit()
+{
+ return cthread_max_cprocs;
+}
+
+void
+cthread_set_limit(n)
+ int n;
+{
+ cthread_max_cprocs = n;
+}
+
+int
+cthread_count()
+{
+ return cthread_cthreads;
+}
+
+cthread_fork_prepare()
+{
+ spin_lock(&free_lock);
+ mutex_lock(&cthread_lock);
+ cproc_fork_prepare();
+}
+
+cthread_fork_parent()
+{
+ cproc_fork_parent();
+ mutex_unlock(&cthread_lock);
+ spin_unlock(&free_lock);
+}
+
+cthread_fork_child()
+{
+ cthread_t t;
+ cproc_t p;
+
+ cproc_fork_child();
+ mutex_unlock(&cthread_lock);
+ spin_unlock(&free_lock);
+ condition_init(&cthread_needed);
+ condition_init(&cthread_idle);
+
+ cthread_max_cprocs = 0;
+
+ stack_fork_child();
+
+ while (TRUE) { /* Free cthread runnable list */
+ cthread_queue_deq(&cthreads, cthread_t, t);
+ if (t == NO_CTHREAD) break;
+ free((char *) t);
+ }
+
+ while (free_cthreads != NO_CTHREAD) { /* Free cthread free list */
+ t = free_cthreads;
+ free_cthreads = free_cthreads->next;
+ free((char *) t);
+ }
+
+ cthread_cprocs = 1;
+ t = cthread_self();
+ cthread_cthreads = 1;
+ t->state |= T_MAIN;
+ cthread_set_name(t, "main");
+
+ p = cproc_self();
+ p->incarnation = t;
+ /* XXX needs hacking for GNU */
+ mig_init(p); /* enable multi-threaded mig interfaces */
+}
diff --git a/libthreads/cthreads.h b/libthreads/cthreads.h
new file mode 100644
index 00000000..c15e0377
--- /dev/null
+++ b/libthreads/cthreads.h
@@ -0,0 +1,639 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.h,v $
+ * Revision 1.14 1999/05/30 01:39:48 roland
+ * 1999-05-29 Roland McGrath <roland@baalperazim.frob.com>
+ *
+ * * cthreads.h (mutex_clear): Change again, to call mutex_init.
+ *
+ * Revision 1.13 1999/05/29 18:59:10 roland
+ * 1999-05-29 Roland McGrath <roland@baalperazim.frob.com>
+ *
+ * * cthreads.h (mutex_clear): Change from syntax error to no-op (with
+ * warning avoidance).
+ *
+ * Revision 1.12 1996/05/04 10:06:31 roland
+ * [lint] (NEVER): Spurious global variable removed.
+ * [!lint] (NEVER): Useless macro removed.
+ *
+ * Revision 1.11 1996/01/24 18:37:59 roland
+ * Use prototypes for functions of zero args.
+ *
+ * Revision 1.10 1995/09/13 19:50:07 mib
+ * (CONDITION_INITIALIZER): Provide initial zero for IMPLICATIONS member.
+ * (condition_init): Bother initializing NAME and IMPLICATIONS members.
+ *
+ * Revision 1.9 1995/08/30 15:51:41 mib
+ * (condition_implies, condition_unimplies): New functions.
+ * (struct condition): New member `implications'.
+ * (cond_imp): New structure.
+ * (cond_signal): Return int now.
+ * (condition_broadcast): Always call cond_broadcast if this condition
+ * has implications.
+ * (condition_signal): Always call cond_signal if this condition has
+ * implications.
+ *
+ * Revision 1.8 1995/08/30 15:10:23 mib
+ * (hurd_condition_wait): Provide declaration.
+ *
+ * Revision 1.7 1995/07/18 17:15:51 mib
+ * Reverse previous change.
+ *
+ * Revision 1.5 1995/04/04 21:06:16 roland
+ * (mutex_lock, mutex_unlock): Use __ names for *_solid.
+ *
+ * Revision 1.4 1994/05/05 10:52:06 roland
+ * entered into RCS
+ *
+ * Revision 2.12 92/05/22 18:38:36 jfriedl
+ * From Mike Kupfer <kupfer@sprite.Berkeley.EDU>:
+ * Add declaration for cthread_wire().
+ * Merge in Jonathan Chew's changes for thread-local data.
+ * Use MACRO_BEGIN and MACRO_END.
+ *
+ * Revision 1.8 91/03/25 14:14:49 jjc
+ * For compatibility with cthread_data:
+ * 1) Added private_data field to cthread structure
+ * for use by POSIX thread specific data routines.
+ * 2) Conditionalized old data field used by cthread_data
+ * under CTHREAD_DATA for binary compatibility.
+ * 3) Changed macros, cthread_set_data and cthread_data,
+ * into routines which use the POSIX routines for
+ * source compatibility.
+ * Also, conditionalized under CTHREAD_DATA.
+ * [91/03/18 jjc]
+ * Added support for multiplexing the thread specific global
+ * variable, cthread_data, using the POSIX threads interface
+ * for thread private data.
+ * [91/03/14 jjc]
+ *
+ * Revision 2.11 91/08/03 18:20:15 jsb
+ * Removed the infamous line 122.
+ * [91/08/01 22:40:24 jsb]
+ *
+ * Revision 2.10 91/07/31 18:35:42 dbg
+ * Fix the standard-C conditional: it's __STDC__.
+ *
+ * Allow for macro-redefinition of cthread_sp, spin_try_lock,
+ * spin_unlock (from machine/cthreads.h).
+ * [91/07/30 17:34:28 dbg]
+ *
+ * Revision 2.9 91/05/14 17:56:42 mrt
+ * Correcting copyright
+ *
+ * Revision 2.8 91/02/14 14:19:52 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:15 mrt]
+ *
+ * Revision 2.7 90/11/05 14:37:12 rpd
+ * Include machine/cthreads.h. Added spin_lock_t.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.6 90/10/12 13:07:24 rpd
+ * Channge to allow for positive stack growth.
+ * [90/10/10 rwd]
+ *
+ * Revision 2.5 90/09/09 14:34:56 rpd
+ * Remove mutex_special and debug_mutex.
+ * [90/08/24 rwd]
+ *
+ * Revision 2.4 90/08/07 14:31:14 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.3 90/01/19 14:37:18 rwd
+ * Add back pointer to cthread structure.
+ * [90/01/03 rwd]
+ * Change definition of cthread_init and change ur_cthread_self macro
+ * to reflect movement of self pointer on stack.
+ * [89/12/18 19:18:34 rwd]
+ *
+ * Revision 2.2 89/12/08 19:53:49 rwd
+ * Change spin_try_lock to int.
+ * [89/11/30 rwd]
+ * Changed mutex macros to deal with special mutexs
+ * [89/11/26 rwd]
+ * Make mutex_{set,clear}_special routines instead of macros.
+ * [89/11/25 rwd]
+ * Added mutex_special to specify a need to context switch on this
+ * mutex.
+ * [89/11/21 rwd]
+ *
+ * Made mutex_lock a macro trying to grab the spin_lock first.
+ * [89/11/13 rwd]
+ * Removed conditionals. Mutexes are more like conditions now.
+ * Changed for limited kernel thread version.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:09:40 rwd
+ * Created.
+ *
+ *
+ * 28-Oct-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Implemented spin_lock() as test and test-and-set logic
+ * (using mutex_try_lock()) in sync.c. Changed ((char *) 0)
+ * to 0, at Mike Jones's suggestion, and turned on ANSI-style
+ * declarations in either C++ or _STDC_.
+ *
+ * 29-Sep-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed NULL to ((char *) 0) to avoid dependency on <stdio.h>,
+ * at Alessandro Forin's suggestion.
+ *
+ * 08-Sep-88 Alessandro Forin (af) at Carnegie Mellon University
+ * Changed queue_t to cthread_queue_t and string_t to char *
+ * to avoid conflicts.
+ *
+ * 01-Apr-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed compound statement macros to use the
+ * do { ... } while (0) trick, so that they work
+ * in all statement contexts.
+ *
+ * 19-Feb-88 Eric Cooper (ecc) at Carnegie Mellon University
+ * Made spin_unlock() and mutex_unlock() into procedure calls
+ * rather than macros, so that even smart compilers can't reorder
+ * the clearing of the lock. Suggested by Jeff Eppinger.
+ * Removed the now empty <machine>/cthreads.h.
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread_self() to mask the current SP to find
+ * the self pointer stored at the base of the stack.
+ *
+ * 22-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Fixed bugs in mutex_set_name and condition_set_name
+ * due to bad choice of macro formal parameter name.
+ *
+ * 21-Jul-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Moved #include <machine/cthreads.h> to avoid referring
+ * to types before they are declared (required by C++).
+ *
+ * 9-Jul-87 Michael Jones (mbj) at Carnegie Mellon University
+ * Added conditional type declarations for C++.
+ * Added _cthread_init_routine and _cthread_exit_routine variables
+ * for automatic initialization and finalization by crt0.
+ */
+/*
+ * File: cthreads.h
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Jul, 1987
+ *
+ * Definitions for the C Threads package.
+ *
+ */
+
+
+#ifndef _CTHREADS_
+#define _CTHREADS_ 1
+
+/* MIB XXX */
+#define CTHREAD_DATA
+
+#if 0
+/* This is CMU's machine-dependent file. In GNU all of the machine
+ dependencies are dealt with in libc. */
+#include <machine/cthreads.h>
+#else
+#include <machine-sp.h>
+#define cthread_sp() ((int) __thread_stack_pointer ())
+#endif
+
+#if c_plusplus || __STDC__
+
+#ifndef C_ARG_DECLS
+#define C_ARG_DECLS(arglist) arglist
+#endif /* not C_ARG_DECLS */
+
+typedef void *any_t;
+
+#else /* not (c_plusplus || __STDC__) */
+
+#ifndef C_ARG_DECLS
+#define C_ARG_DECLS(arglist) ()
+#endif /* not C_ARG_DECLS */
+
+typedef char *any_t;
+
+#endif /* not (c_plusplus || __STDC__) */
+
+#include <mach/mach.h>
+#include <mach/machine/vm_param.h>
+
+#ifndef TRUE
+#define TRUE 1
+#define FALSE 0
+#endif /* TRUE */
+
+
+#undef MACRO_BEGIN
+#undef MACRO_END
+#define MACRO_BEGIN __extension__ ({
+#define MACRO_END 0; })
+
+
+/*
+ * C Threads package initialization.
+ */
+
+extern int cthread_init C_ARG_DECLS((void));
+#if 0
+/* This prototype is broken for GNU. */
+extern any_t calloc C_ARG_DECLS((unsigned n, unsigned size));
+#else
+#include <stdlib.h>
+#endif
+
+/*
+ * Queues.
+ */
+typedef struct cthread_queue {
+ struct cthread_queue_item *head;
+ struct cthread_queue_item *tail;
+} *cthread_queue_t;
+
+typedef struct cthread_queue_item {
+ struct cthread_queue_item *next;
+} *cthread_queue_item_t;
+
+#define NO_QUEUE_ITEM ((cthread_queue_item_t) 0)
+
+#define QUEUE_INITIALIZER { NO_QUEUE_ITEM, NO_QUEUE_ITEM }
+
+#define cthread_queue_alloc() ((cthread_queue_t) calloc(1, sizeof(struct cthread_queue)))
+#define cthread_queue_init(q) ((q)->head = (q)->tail = 0)
+#define cthread_queue_free(q) free((any_t) (q))
+
+#define cthread_queue_enq(q, x) \
+ MACRO_BEGIN \
+ (x)->next = 0; \
+ if ((q)->tail == 0) \
+ (q)->head = (cthread_queue_item_t) (x); \
+ else \
+ (q)->tail->next = (cthread_queue_item_t) (x); \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_preq(q, x) \
+ MACRO_BEGIN \
+ if ((q)->tail == 0) \
+ (q)->tail = (cthread_queue_item_t) (x); \
+ ((cthread_queue_item_t) (x))->next = (q)->head; \
+ (q)->head = (cthread_queue_item_t) (x); \
+ MACRO_END
+
+#define cthread_queue_head(q, t) ((t) ((q)->head))
+
+#define cthread_queue_deq(q, t, x) \
+ MACRO_BEGIN \
+ if (((x) = (t) ((q)->head)) != 0 && \
+ ((q)->head = (cthread_queue_item_t) ((x)->next)) == 0) \
+ (q)->tail = 0; \
+ MACRO_END
+
+#define cthread_queue_map(q, t, f) \
+ MACRO_BEGIN \
+ register cthread_queue_item_t x, next; \
+ for (x = (cthread_queue_item_t) ((q)->head); x != 0; x = next) { \
+ next = x->next; \
+ (*(f))((t) x); \
+ } \
+ MACRO_END
+
+#if 1
+
+/* In GNU, spin locks are implemented in libc.
+ Just include its header file. */
+#include <spin-lock.h>
+
+#else /* Unused CMU code. */
+
+/*
+ * Spin locks.
+ */
+extern void
+spin_lock_solid C_ARG_DECLS((spin_lock_t *p));
+
+#ifndef spin_unlock
+extern void
+spin_unlock C_ARG_DECLS((spin_lock_t *p));
+#endif
+
+#ifndef spin_try_lock
+extern int
+spin_try_lock C_ARG_DECLS((spin_lock_t *p));
+#endif
+
+#define spin_lock(p) ({if (!spin_try_lock(p)) spin_lock_solid(p);})
+
+#endif /* End unused CMU code. */
+
+/*
+ * Mutex objects.
+ */
+typedef struct mutex {
+ /* The `held' member must be first in GNU. The GNU C library relies on
+ being able to cast a `struct mutex *' to a `spin_lock_t *' (which is
+ kosher if it is the first member) and spin_try_lock that address to
+ see if it gets the mutex. */
+ spin_lock_t held;
+ spin_lock_t lock;
+ char *name;
+ struct cthread_queue queue;
+} *mutex_t;
+
+/* Rearranged accordingly for GNU: */
+#define MUTEX_INITIALIZER { SPIN_LOCK_INITIALIZER, SPIN_LOCK_INITIALIZER, 0, QUEUE_INITIALIZER }
+
+#define mutex_alloc() ((mutex_t) calloc(1, sizeof(struct mutex)))
+#define mutex_init(m) \
+ MACRO_BEGIN \
+ spin_lock_init(&(m)->lock); \
+ cthread_queue_init(&(m)->queue); \
+ spin_lock_init(&(m)->held); \
+ MACRO_END
+#define mutex_set_name(m, x) ((m)->name = (x))
+#define mutex_name(m) ((m)->name != 0 ? (m)->name : "?")
+#define mutex_clear(m) mutex_init(m)
+#define mutex_free(m) free((any_t) (m))
+
+extern void __mutex_lock_solid (void *mutex); /* blocking -- roland@gnu */
+extern void __mutex_unlock_solid (void *mutex); /* roland@gnu */
+
+#define mutex_try_lock(m) spin_try_lock(&(m)->held)
+#define mutex_lock(m) \
+ MACRO_BEGIN \
+ if (!spin_try_lock(&(m)->held)) { \
+ __mutex_lock_solid(m); \
+ } \
+ MACRO_END
+#define mutex_unlock(m) \
+ MACRO_BEGIN \
+ if (spin_unlock(&(m)->held), \
+ cthread_queue_head(&(m)->queue, int) != 0) { \
+ __mutex_unlock_solid(m); \
+ } \
+ MACRO_END
+
+/*
+ * Condition variables.
+ */
+typedef struct condition {
+ spin_lock_t lock;
+ struct cthread_queue queue;
+ char *name;
+ struct cond_imp *implications;
+} *condition_t;
+
+struct cond_imp
+{
+ struct condition *implicatand;
+ struct cond_imp *next;
+};
+
+#define CONDITION_INITIALIZER { SPIN_LOCK_INITIALIZER, QUEUE_INITIALIZER, 0, 0 }
+
+#define condition_alloc() ((condition_t) calloc(1, sizeof(struct condition)))
+#define condition_init(c) \
+ MACRO_BEGIN \
+ spin_lock_init(&(c)->lock); \
+ cthread_queue_init(&(c)->queue); \
+ (c)->name = 0; \
+ (c)->implications = 0; \
+ MACRO_END
+#define condition_set_name(c, x) ((c)->name = (x))
+#define condition_name(c) ((c)->name != 0 ? (c)->name : "?")
+#define condition_clear(c) \
+ MACRO_BEGIN \
+ condition_broadcast(c); \
+ spin_lock(&(c)->lock); \
+ MACRO_END
+#define condition_free(c) \
+ MACRO_BEGIN \
+ condition_clear(c); \
+ free((any_t) (c)); \
+ MACRO_END
+
+#define condition_signal(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head || (c)->implications) { \
+ cond_signal(c); \
+ } \
+ MACRO_END
+
+#define condition_broadcast(c) \
+ MACRO_BEGIN \
+ if ((c)->queue.head || (c)->implications) { \
+ cond_broadcast(c); \
+ } \
+ MACRO_END
+
+extern int
+cond_signal C_ARG_DECLS((condition_t c));
+
+extern void
+cond_broadcast C_ARG_DECLS((condition_t c));
+
+extern void
+condition_wait C_ARG_DECLS((condition_t c, mutex_t m));
+
+extern int
+hurd_condition_wait C_ARG_DECLS((condition_t c, mutex_t m));
+
+extern void
+condition_implies C_ARG_DECLS((condition_t implicator, condition_t implicatand));
+
+extern void
+condition_unimplies C_ARG_DECLS((condition_t implicator, condition_t implicatand));
+
+/*
+ * Threads.
+ */
+
+typedef any_t (*cthread_fn_t) C_ARG_DECLS((any_t arg));
+
+#include <setjmp.h>
+
+typedef struct cthread {
+ struct cthread *next;
+ struct mutex lock;
+ struct condition done;
+ int state;
+ jmp_buf catch;
+ cthread_fn_t func;
+ any_t arg;
+ any_t result;
+ char *name;
+#ifdef CTHREAD_DATA
+ any_t data;
+#endif CTHREAD_DATA
+ any_t private_data;
+ struct ur_cthread *ur;
+} *cthread_t;
+
+#define NO_CTHREAD ((cthread_t) 0)
+
+extern cthread_t
+cthread_fork C_ARG_DECLS((cthread_fn_t func, any_t arg));
+
+extern void
+cthread_detach C_ARG_DECLS((cthread_t t));
+
+extern any_t
+cthread_join C_ARG_DECLS((cthread_t t));
+
+extern void
+cthread_yield C_ARG_DECLS((void));
+
+extern void
+cthread_exit C_ARG_DECLS((any_t result));
+
+/*
+ * This structure must agree with struct cproc in cthread_internals.h
+ */
+typedef struct ur_cthread {
+ struct ur_cthread *next;
+ cthread_t incarnation;
+} *ur_cthread_t;
+
+#ifndef cthread_sp
+extern int
+cthread_sp C_ARG_DECLS((void));
+#endif
+
+extern int cthread_stack_mask;
+
+#ifdef STACK_GROWTH_UP
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ((sp) & cthread_stack_mask))
+#else STACK_GROWTH_UP
+#define ur_cthread_ptr(sp) \
+ (* (ur_cthread_t *) ( ((sp) | cthread_stack_mask) + 1 \
+ - sizeof(ur_cthread_t *)) )
+#endif STACK_GROWTH_UP
+
+#define ur_cthread_self() (ur_cthread_ptr(cthread_sp()))
+
+#define cthread_assoc(id, t) ((((ur_cthread_t) (id))->incarnation = (t)), \
+ ((t) ? ((t)->ur = (ur_cthread_t)(id)) : 0))
+#define cthread_self() (ur_cthread_self()->incarnation)
+
+extern void
+cthread_set_name C_ARG_DECLS((cthread_t t, char *name));
+
+extern char *
+cthread_name C_ARG_DECLS((cthread_t t));
+
+extern int
+cthread_count C_ARG_DECLS((void));
+
+extern void
+cthread_set_limit C_ARG_DECLS((int n));
+
+extern int
+cthread_limit C_ARG_DECLS((void));
+
+extern void
+cthread_wire C_ARG_DECLS((void));
+
+#ifdef CTHREAD_DATA
+/*
+ * Set or get thread specific "global" variable
+ *
+ * The thread given must be the calling thread (ie. thread_self).
+ * XXX This is for compatibility with the old cthread_data. XXX
+ */
+extern int
+cthread_set_data C_ARG_DECLS((cthread_t t, any_t x));
+
+extern any_t
+cthread_data C_ARG_DECLS((cthread_t t));
+#endif CTHREAD_DATA
+
+/*
+ * Support for POSIX thread specific data
+ *
+ * Multiplexes a thread specific "global" variable
+ * into many thread specific "global" variables.
+ */
+#define CTHREAD_DATA_VALUE_NULL (any_t)0
+#define CTHREAD_KEY_INVALID (cthread_key_t)-1
+
+typedef int cthread_key_t;
+
+/*
+ * Create key to private data visible to all threads in task.
+ * Different threads may use same key, but the values bound to the key are
+ * maintained on a thread specific basis.
+ */
+extern int
+cthread_keycreate C_ARG_DECLS((cthread_key_t *key));
+
+/*
+ * Get value currently bound to key for calling thread
+ */
+extern int
+cthread_getspecific C_ARG_DECLS((cthread_key_t key, any_t *value));
+
+/*
+ * Bind value to given key for calling thread
+ */
+extern int
+cthread_setspecific C_ARG_DECLS((cthread_key_t key, any_t value));
+
+/*
+ * Debugging support.
+ */
+#ifdef DEBUG
+
+#ifndef ASSERT
+/*
+ * Assertion macro, similar to <assert.h>
+ */
+#include <stdio.h>
+#define ASSERT(p) \
+ MACRO_BEGIN \
+ if (!(p)) { \
+ fprintf(stderr, \
+ "File %s, line %d: assertion p failed.\n", \
+ __FILE__, __LINE__); \
+ abort(); \
+ } \
+ MACRO_END
+
+#endif ASSERT
+
+#define SHOULDNT_HAPPEN 0
+
+extern int cthread_debug;
+
+#else DEBUG
+
+#ifndef ASSERT
+#define ASSERT(p)
+#endif ASSERT
+
+#endif DEBUG
+
+#endif _CTHREADS_
diff --git a/libthreads/i386/csw.S b/libthreads/i386/csw.S
new file mode 100644
index 00000000..5579db5c
--- /dev/null
+++ b/libthreads/i386/csw.S
@@ -0,0 +1,185 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: csw.S,v $
+ * Revision 1.8 1997/04/04 01:31:16 thomas
+ * Thu Apr 3 20:29:27 1997 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+ *
+ * * i386/csw.S: Define __ELF__ too.
+ *
+ * Revision 1.7 1996/10/24 19:30:10 thomas
+ * Mon Oct 21 22:05:48 1996 Thomas Bushnell, n/BSG <thomas@gnu.ai.mit.edu>
+ *
+ * * i386/csw.S (CALL_MCOUNT): New macro.
+ * (cproc_swtich, cproc_start_wait, cproc_prepare): Use CALL_MCOUNT.
+ *
+ * Revision 1.6 1996/08/29 17:44:42 thomas
+ * *** empty log message ***
+ *
+ * Revision 1.5 1995/10/04 20:55:28 roland
+ * (JUMPTARGET): New macro, versions for [PIC] and not.
+ * Use it in place of EXT.
+ *
+ * Revision 1.4 1995/10/04 20:22:17 roland
+ * [PIC] (EXT): Redefine to use PLT.
+ *
+ * Revision 1.3 1995/05/12 18:35:55 roland
+ * Use EXT macro instead of explicit underscores.
+ *
+# Revision 1.2 1994/05/04 19:01:50 mib
+# entered into RCS
+#
+ * Revision 2.7 91/07/31 18:36:32 dbg
+ * Fix for ANSI C preprocessor.
+ * [91/07/30 17:35:16 dbg]
+ *
+ * Revision 2.6 91/05/14 17:56:56 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/05/08 13:35:49 dbg
+ * Unlock lock with a locked instruction (xchg).
+ * [91/03/20 dbg]
+ *
+ * Revision 2.4 91/02/14 14:20:02 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:15:27 mrt]
+ *
+ * Revision 2.3 91/01/08 16:46:20 rpd
+ * Don't use Times - horta doesn't like it for some reason.
+ * [91/01/06 rpd]
+ *
+ * Revision 2.2 90/05/03 15:54:37 dbg
+ * Created.
+ * [90/02/05 dbg]
+ *
+ */
+#define ELF
+#define __ELF__
+#include <mach/i386/asm.h>
+
+#ifdef PIC
+#define JUMPTARGET(name) EXT(name##@PLT)
+#else
+#define JUMPTARGET(name) EXT(name)
+#endif
+
+#ifdef PROF
+#define CALL_MCOUNT \
+ pushl %ebp; movl %esp, %ebp; call JUMPTARGET(mcount); popl %ebp;
+#else
+#define CALL_MCOUNT
+#endif
+
+
+/*
+ * Suspend the current thread and resume the next one.
+ *
+ * void cproc_switch(int *cur, int *next, int *lock)
+ */
+ENTRY(cproc_switch)
+ CALL_MCOUNT
+ pushl %ebp /* save ebp */
+ movl %esp,%ebp /* set frame pointer to get arguments */
+ pushl %ebx /* save ebx */
+ pushl %esi /* esi */
+ pushl %edi /* edi */
+ movl B_ARG0,%eax /* get cur */
+ movl %esp,(%eax) /* save current esp */
+ movl B_ARG2,%edx /* get address of lock before switching */
+ /* stacks */
+ movl B_ARG1,%eax /* get next */
+ movl (%eax),%esp /* get new stack pointer */
+ xorl %eax,%eax /* unlock */
+ xchgl %eax,(%edx) /* the lock - now old thread can run */
+
+ popl %edi /* restore di */
+ popl %esi /* si */
+ popl %ebx /* bx */
+ popl %ebp /* and bp (don`t use "leave" - bp */
+ /* still points to old stack) */
+ ret
+
+/*
+ * Create a new stack frame for a 'waiting' thread,
+ * save current thread's frame, and switch to waiting thread.
+ *
+ * void cproc_start_wait(int *cur,
+ * cproc_t child,
+ * int stackp,
+ * int *lock)
+ */
+ENTRY(cproc_start_wait)
+ CALL_MCOUNT
+ pushl %ebp /* save ebp */
+ movl %esp,%ebp /* set frame pointer */
+ pushl %ebx /* save ebx */
+ pushl %esi /* esi */
+ pushl %edi /* edi */
+ movl B_ARG0,%eax /* get cur */
+ movl %esp,(%eax) /* save current esp */
+ movl B_ARG1,%eax /* get child thread */
+ movl B_ARG3,%edx /* point to lock before switching stack */
+ movl B_ARG2,%esp /* get new stack */
+ pushl %eax /* push child thread as argument */
+ movl $0,%ebp /* (clear frame pointer) */
+ xorl %eax,%eax /* unlock */
+ xchgl %eax,(%edx) /* the lock - now old thread can run */
+ call JUMPTARGET(cproc_waiting)/* call cproc_waiting */
+ /*NOTREACHED*/
+
+/*
+ * Set up a thread's stack so that when cproc_switch switches to
+ * it, it will start up as if it called
+ * cproc_body(child)
+ *
+ * void cproc_prepare(cproc_t child, int *context, int stack,
+ * void (*cthread_body)(cproc_t));
+ */
+ENTRY(cproc_prepare)
+ CALL_MCOUNT
+ pushl %ebp /* save ebp */
+ movl %esp,%ebp /* set frame pointer */
+ movl B_ARG2,%edx /* get child`s stack */
+ subl $28,%edx
+ /* make room for context: */
+ /* 0 saved edi () */
+ /* 4 saved esi () */
+ /* 8 saved ebx () */
+ /* 12 saved ebp () */
+ /* 16 return PC from cproc_switch */
+ /* 20 return PC from cthread_body */
+ /* 24 argument to cthread_body */
+ movl $0,12(%edx) /* clear frame pointer */
+ movl B_ARG3,%ecx /* get address of cthread_body passed in */
+ movl %ecx,16(%edx) /* set child to resume at cthread_body */
+ movl $0,20(%edx) /* fake return address from cthread_body */
+ movl B_ARG0,%ecx /* get child thread pointer */
+ movl %ecx,24(%edx) /* set as argument to cthread_body */
+ movl B_ARG1,%ecx /* get pointer to context */
+ movl %edx,(%ecx) /* save context */
+ leave
+ ret
diff --git a/libthreads/i386/cthread_inline.awk b/libthreads/i386/cthread_inline.awk
new file mode 100644
index 00000000..8e0cb7d4
--- /dev/null
+++ b/libthreads/i386/cthread_inline.awk
@@ -0,0 +1,86 @@
+#
+# Mach Operating System
+# Copyright (c) 1991,1990 Carnegie Mellon University
+# All Rights Reserved.
+#
+# Permission to use, copy, modify and distribute this software and its
+# documentation is hereby granted, provided that both the copyright
+# notice and this permission notice appear in all copies of the
+# software, derivative works or modified versions, and any portions
+# thereof, and that both notices appear in supporting documentation.
+#
+# CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+# CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+# ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+#
+# Carnegie Mellon requests users of this software to return to
+#
+# Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+# School of Computer Science
+# Carnegie Mellon University
+# Pittsburgh PA 15213-3890
+#
+# any improvements or extensions that they make and grant Carnegie Mellon
+# the rights to redistribute these changes.
+#
+#
+# HISTORY
+# $Log: cthread_inline.awk,v $
+# Revision 2.5 91/05/14 17:57:03 mrt
+# Correcting copyright
+#
+# Revision 2.4 91/05/08 13:36:05 dbg
+# Unlock lock with a locked instruction (xchg).
+# [91/03/20 dbg]
+#
+# Revision 2.3 91/02/14 14:20:06 mrt
+# Added new Mach copyright
+# [91/02/13 12:33:05 mrt]
+#
+# Revision 2.2 90/05/03 15:54:56 dbg
+# Created (from 68020 version).
+# [90/02/05 dbg]
+#
+# Revision 2.2 89/12/08 19:54:30 rwd
+# Inlines are now spins instead of mutexes.
+# [89/10/23 rwd]
+#
+# Revision 2.1 89/08/04 15:15:14 rwd
+# Created.
+#
+# Revision 1.3 89/05/05 19:00:33 mrt
+# Cleanup for Mach 2.5
+#
+#
+
+# sun/cthread_inline.awk
+#
+# Awk script to inline critical C Threads primitives on i386
+
+NF == 2 && $1 == "call" && $2 == "_spin_try_lock" {
+ print "/ BEGIN INLINE spin_try_lock"
+ print " movl (%esp),%ecx / point at mutex"
+ print " movl $1,%eax / set locked value in acc"
+ print " xchg %eax,(%ecx) / locked swap with mutex"
+ print " xorl $1,%eax / logical complement"
+ print "/ END INLINE spin_try_lock"
+ continue
+}
+NF == 2 && $1 == "call" && $2 == "_spin_unlock" {
+ print "/ BEGIN INLINE " $2
+ print " movl (%esp),%ecx"
+ print " xorl %eax,%eax / set unlocked value in acc"
+ print " xchg %eax,(%ecx) / locked swap with mutex"
+ print "/ END INLINE " $2
+ continue
+}
+NF == 2 && $1 == "call" && $2 == "_cthread_sp" {
+ print "/ BEGIN INLINE cthread_sp"
+ print " movl %esp,%eax"
+ print "/ END INLINE cthread_sp"
+ continue
+}
+# default:
+{
+ print
+}
diff --git a/libthreads/i386/cthreads.h b/libthreads/i386/cthreads.h
new file mode 100644
index 00000000..8ffe4b72
--- /dev/null
+++ b/libthreads/i386/cthreads.h
@@ -0,0 +1,89 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cthreads.h,v $
+ * Revision 2.7 92/01/03 20:36:59 dbg
+ * Add volatile to spin_lock_t. Change spin_unlock and
+ * spin_try_lock definitions back to memory operands, but rely on
+ * volatile attribute to keep from using value in memory.
+ * [91/09/04 dbg]
+ *
+ * Revision 2.6 91/08/28 20:18:39 jsb
+ * Safer definitions for spin_unlock and spin_try_lock from mib.
+ *
+ * Revision 2.5 91/07/31 18:36:49 dbg
+ * Add inline substitution for cthread_sp, spin_unlock,
+ * spin_try_lock.
+ * [91/07/30 17:35:53 dbg]
+ *
+ * Revision 2.4 91/05/14 17:57:11 mrt
+ * Correcting copyright
+ *
+ * Revision 2.3 91/02/14 14:20:14 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:20:00 mrt]
+ *
+ * Revision 2.2 90/11/05 14:37:23 rpd
+ * Created.
+ * [90/11/01 rwd]
+ *
+ *
+ */
+
+#ifndef _MACHINE_CTHREADS_H_
+#define _MACHINE_CTHREADS_H_
+
+typedef volatile int spin_lock_t;
+#define SPIN_LOCK_INITIALIZER 0
+#define spin_lock_init(s) (*(s) = 0)
+#define spin_lock_locked(s) (*(s) != 0)
+
+#ifdef __GNUC__
+
+#define spin_unlock(p) \
+ ({ register int _u__ ; \
+ asm volatile("xorl %0, %0; \n\
+ xchgl %0, %1" \
+ : "=&r" (_u__), "=m" (*(p)) ); \
+ 0; })
+
+#define spin_try_lock(p)\
+ ({ boolean_t _r__; \
+ asm volatile("movl $1, %0; \n\
+ xchgl %0, %1" \
+ : "=&r" (_r__), "=m" (*(p)) ); \
+ !_r__; })
+
+#define cthread_sp() \
+ ({ int _sp__; \
+ asm("movl %%esp, %0" \
+ : "=g" (_sp__) ); \
+ _sp__; })
+
+#endif /* __GNUC__ */
+
+#endif _MACHINE_CTHREADS_H_
diff --git a/libthreads/i386/lock.s b/libthreads/i386/lock.s
new file mode 100644
index 00000000..e27fa7ff
--- /dev/null
+++ b/libthreads/i386/lock.s
@@ -0,0 +1,70 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: lock.s,v $
+ * Revision 2.5 91/05/14 17:57:20 mrt
+ * Correcting copyright
+ *
+ * Revision 2.4 91/05/08 13:36:15 dbg
+ * Unlock lock with a locked instruction (xchg).
+ * [91/03/20 dbg]
+ *
+ * Revision 2.3 91/02/14 14:20:18 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:20:06 mrt]
+ *
+ * Revision 2.2 90/05/03 15:54:59 dbg
+ * Created.
+ * [90/02/05 dbg]
+ *
+ */
+
+#include <i386/asm.h>
+
+/*
+ * boolean_t spin_try_lock(int *m)
+ */
+ENTRY(spin_try_lock)
+ movl 4(%esp),%ecx / point at mutex
+ movl $1,%eax / set locked value in acc
+ xchg %eax,(%ecx) / swap with mutex
+ / xchg with memory is automatically
+ / locked
+ xorl $1,%eax / 1 (locked) => FALSE
+ / 0 (locked) => TRUE
+ ret
+
+/*
+ * void spin_unlock(int *m)
+ */
+ENTRY(spin_unlock)
+ movl 4(%esp),%ecx / point at mutex
+ xorl %eax,%eax / set unlocked value in acc
+ xchg %eax,(%ecx) / swap with mutex
+ / xchg with memory is automatically
+ / locked
+ ret
diff --git a/libthreads/i386/thread.c b/libthreads/i386/thread.c
new file mode 100644
index 00000000..38f91192
--- /dev/null
+++ b/libthreads/i386/thread.c
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: thread.c,v $
+ * Revision 1.2 1994/05/04 19:05:26 mib
+ * entered into RCS
+ *
+ * Revision 2.6 91/07/31 18:37:07 dbg
+ * Undefine cthread_sp macro around function definition.
+ * [91/07/30 17:36:23 dbg]
+ *
+ * Revision 2.5 91/05/14 17:57:27 mrt
+ * Correcting copyright
+ *
+ * Revision 2.4 91/02/14 14:20:21 mrt
+ * Changed to new Mach copyright
+ * [91/02/13 12:20:10 mrt]
+ *
+ * Revision 2.3 90/06/02 15:13:53 rpd
+ * Added definition of cthread_sp.
+ * [90/06/02 rpd]
+ *
+ * Revision 2.2 90/05/03 15:55:03 dbg
+ * Created (from 68020 version).
+ * [90/02/05 dbg]
+ *
+ */
+/*
+ * i386/thread.c
+ *
+ */
+
+#ifndef lint
+static char rcs_id[] = "$Header: cvs-sans-libpthread/hurd/libthreads/i386/thread.c,v 1.3 1997/02/18 22:53:31 miles Exp $";
+#endif not lint
+
+
+#include "../cthreads.h"
+#include "../cthread_internals.h"
+
+
+#include <mach/mach.h>
+
+/*
+ * C library imports:
+ */
+extern bzero();
+
+/*
+ * Set up the initial state of a MACH thread
+ * so that it will invoke cthread_body(child)
+ * when it is resumed.
+ */
+void
+cproc_setup(child, thread, routine)
+ register cproc_t child;
+ int thread;
+ int routine;
+{
+ extern unsigned int __hurd_threadvar_max; /* GNU */
+ register int *top = (int *)
+ cproc_stack_base (child,
+ sizeof(ur_cthread_t *) +
+ /* Account for GNU per-thread variables. */
+ __hurd_threadvar_max *
+ sizeof (long int));
+ struct i386_thread_state state;
+ register struct i386_thread_state *ts = &state;
+ kern_return_t r;
+ unsigned int count;
+
+ /*
+ * Set up i386 call frame and registers.
+ * Read registers first to get correct segment values.
+ */
+ count = i386_THREAD_STATE_COUNT;
+ MACH_CALL(thread_get_state(thread,i386_THREAD_STATE,(thread_state_t) &state,&count),r);
+
+ ts->eip = routine;
+ *--top = (int) child; /* argument to function */
+ *--top = 0; /* fake return address */
+ ts->uesp = (int) top; /* set stack pointer */
+ ts->ebp = 0; /* clear frame pointer */
+
+ MACH_CALL(thread_set_state(thread,i386_THREAD_STATE,(thread_state_t) &state,i386_THREAD_STATE_COUNT),r);
+}
+
+#ifdef cthread_sp
+#undef cthread_sp
+#endif
+
+int
+cthread_sp()
+{
+ int x;
+
+ return (int) &x;
+}
+
diff --git a/libthreads/mig_support.c b/libthreads/mig_support.c
new file mode 100644
index 00000000..bb9e6a5e
--- /dev/null
+++ b/libthreads/mig_support.c
@@ -0,0 +1,194 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: mig_support.c,v $
+ * Revision 2.6 91/05/14 17:57:41 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/02/14 14:20:30 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:26 mrt]
+ *
+ * Revision 2.4 90/08/07 14:31:41 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.3 90/08/07 14:27:48 rpd
+ * When we recycle the global reply port by giving it to the first
+ * cthread, clear the global reply port. This will take care of
+ * someone accidently calling this twice.
+ * [90/08/07 rwd]
+ *
+ * Revision 2.2 90/06/02 15:14:04 rpd
+ * Converted to new IPC.
+ * [90/03/20 20:56:50 rpd]
+ *
+ * Revision 2.1 89/08/03 17:09:50 rwd
+ * Created.
+ *
+ * 18-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Replaced task_data() by thread_reply().
+ *
+ *
+ * 27-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed mig_support.c to avoid deadlock that can occur
+ * if tracing is turned on during calls to mig_get_reply_port().
+ *
+ * 10-Aug-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed mig_support.c to use MACH_CALL.
+ * Changed "is_init" to "multithreaded" and reversed its sense.
+ *
+ * 30-Jul-87 Mary Thompson (mrt) at Carnegie Mellon University
+ * Created.
+ */
+/*
+ * File: mig_support.c
+ * Author: Mary R. Thompson, Carnegie Mellon University
+ * Date: July, 1987
+ *
+ * Routines to set and deallocate the mig reply port for the current thread.
+ * Called from mig-generated interfaces.
+ *
+ */
+
+
+#include <mach/mach.h>
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+private boolean_t multithreaded = FALSE;
+/* use a global reply port before becoming multi-threaded */
+private mach_port_t mig_reply_port = MACH_PORT_NULL;
+
+/*
+ * Called by mach_init with 0 before cthread_init is
+ * called and again with initial cproc at end of cthread_init.
+ */
+void
+mig_init(initial)
+ register cproc_t initial;
+{
+ if (initial == NO_CPROC) {
+ /* called from mach_init before cthread_init,
+ possibly after a fork. clear global reply port. */
+
+ multithreaded = FALSE;
+ mig_reply_port = MACH_PORT_NULL;
+ } else {
+ /* recycle global reply port as this cthread's reply port */
+
+ multithreaded = TRUE;
+ initial->reply_port = mig_reply_port;
+ mig_reply_port = MACH_PORT_NULL;
+ }
+}
+
+void
+__mig_init (initial)
+ register cproc_t initial;
+{
+ mig_init (initial);
+}
+
+/*
+ * Called by mig interface code whenever a reply port is needed.
+ */
+mach_port_t
+mig_get_reply_port()
+{
+ register mach_port_t reply_port;
+
+ if (multithreaded) {
+ register cproc_t self;
+
+ self = cproc_self();
+ ASSERT(self != NO_CPROC);
+
+ if ((reply_port = self->reply_port) == MACH_PORT_NULL)
+ self->reply_port = reply_port = mach_reply_port();
+ } else {
+ if ((reply_port = mig_reply_port) == MACH_PORT_NULL)
+ mig_reply_port = reply_port = mach_reply_port();
+ }
+
+ return reply_port;
+}
+
+mach_port_t
+__mig_get_reply_port()
+{
+ return mig_get_reply_port();
+}
+
+/*
+ * Called by mig interface code after a timeout on the reply port.
+ * May also be called by user.
+ */
+void
+mig_dealloc_reply_port()
+{
+ register mach_port_t reply_port;
+
+ if (multithreaded) {
+ register cproc_t self;
+
+ self = cproc_self();
+ ASSERT(self != NO_CPROC);
+
+ reply_port = self->reply_port;
+ self->reply_port = MACH_PORT_NULL;
+ } else {
+ reply_port = mig_reply_port;
+ mig_reply_port = MACH_PORT_NULL;
+ }
+
+ (void) mach_port_mod_refs(mach_task_self(), reply_port,
+ MACH_PORT_RIGHT_RECEIVE, -1);
+}
+
+void
+__mig_dealloc_reply_port ()
+{
+ mig_dealloc_reply_port ();
+}
+
+/* XXX shouldn't need these */
+/* Called by MiG to allocate space. */
+void
+__mig_allocate (vm_address_t *addr,
+ vm_size_t size)
+{
+ if (__vm_allocate (__mach_task_self (), addr, size, 1) != KERN_SUCCESS)
+ *addr = 0;
+}
+
+/* Called by MiG to deallocate space. */
+void
+__mig_deallocate (vm_address_t addr,
+ vm_size_t size)
+{
+ (void) __vm_deallocate (__mach_task_self (), addr, size);
+}
diff --git a/libthreads/options.h b/libthreads/options.h
new file mode 100644
index 00000000..952720de
--- /dev/null
+++ b/libthreads/options.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: options.h,v $
+ * Revision 2.8 91/05/14 17:58:35 mrt
+ * Correcting copyright
+ *
+ * Revision 2.7 91/02/14 14:21:03 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:31 mrt]
+ *
+ * Revision 2.6 90/09/09 14:35:04 rpd
+ * Remove special option , debug_mutex and thread_calls.
+ * [90/08/24 rwd]
+ *
+ * Revision 2.5 90/06/02 15:14:14 rpd
+ * Removed RCS Source, Header lines.
+ * [90/05/03 00:07:27 rpd]
+ *
+ * Revision 2.4 90/03/14 21:12:15 rwd
+ * Added new option:
+ * WAIT_DEBUG: keep track of who a blocked thread is
+ * waiting for.
+ * [90/03/01 rwd]
+ *
+ * Revision 2.3 90/01/19 14:37:25 rwd
+ * New option:
+ * THREAD_CALLS: cthread_* version of thread_* calls.
+ * [90/01/03 rwd]
+ *
+ * Revision 2.2 89/12/08 19:54:09 rwd
+ * Added code:
+ * MUTEX_SPECIAL: Have extra kernel threads available for
+ * special mutexes to avoid deadlocks
+ * Removed options:
+ * MSGOPT, RECEIVE_YIELD
+ * [89/11/25 rwd]
+ * Added option:
+ * MUTEX_SPECIAL: Allow special mutexes which will
+ * garuntee the resulting threads runs
+ * on a mutex_unlock
+ * [89/11/21 rwd]
+ * Options added are:
+ * STATISTICS: collect [kernel/c]thread state stats.
+ * SPIN_RESCHED: call swtch_pri(0) when spin will block.
+ * MSGOPT: try to minimize message sends
+ * CHECK_STATUS: check status of mach calls
+ * RECEIVE_YIELD: yield thread if no waiting threads after
+ * cthread_msg_receive
+ * RED_ZONE: make redzone at end of stacks
+ * DEBUG_MUTEX: in conjunction with same in cthreads.h
+ * use slow mutex with held=cproc_self().
+ * [89/11/13 rwd]
+ * Added copyright. Removed all options.
+ * [89/10/23 rwd]
+ *
+ */
+/*
+ * options.h
+ */
+
+/*#define STATISTICS*/
+#define SPIN_RESCHED
+/*#define CHECK_STATUS*/
+/*#define RED_ZONE*/
+#define WAIT_DEBUG
diff --git a/libthreads/rwlock.c b/libthreads/rwlock.c
new file mode 100644
index 00000000..93533a97
--- /dev/null
+++ b/libthreads/rwlock.c
@@ -0,0 +1,2 @@
+#define RWLOCK_EI
+#include "rwlock.h"
diff --git a/libthreads/rwlock.h b/libthreads/rwlock.h
new file mode 100644
index 00000000..1a61eeea
--- /dev/null
+++ b/libthreads/rwlock.h
@@ -0,0 +1,111 @@
+/* Simple reader/writer locks.
+
+ Copyright (C) 1994, 1995, 1996 Free Software Foundation, Inc.
+
+ This program is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#ifndef _RWLOCK_H
+#define _RWLOCK_H 1
+
+#include <cthreads.h>
+#include <assert.h>
+
+struct rwlock
+{
+ struct mutex master;
+ struct condition wakeup;
+ int readers;
+ int writers_waiting;
+ int readers_waiting;
+};
+
+#ifndef RWLOCK_EI
+#define RWLOCK_EI extern inline
+#endif
+
+/* Get a reader lock on reader-writer lock LOCK for disknode DN */
+RWLOCK_EI void
+rwlock_reader_lock (struct rwlock *lock)
+{
+ mutex_lock (&lock->master);
+ if (lock->readers == -1 || lock->writers_waiting)
+ {
+ lock->readers_waiting++;
+ do
+ condition_wait (&lock->wakeup, &lock->master);
+ while (lock->readers == -1 || lock->writers_waiting);
+ lock->readers_waiting--;
+ }
+ lock->readers++;
+ mutex_unlock (&lock->master);
+}
+
+/* Get a writer lock on reader-writer lock LOCK for disknode DN */
+RWLOCK_EI void
+rwlock_writer_lock (struct rwlock *lock)
+{
+ mutex_lock (&lock->master);
+ if (lock->readers)
+ {
+ lock->writers_waiting++;
+ do
+ condition_wait (&lock->wakeup, &lock->master);
+ while (lock->readers);
+ lock->writers_waiting--;
+ }
+ lock->readers = -1;
+ mutex_unlock (&lock->master);
+}
+
+/* Release a reader lock on reader-writer lock LOCK for disknode DN */
+RWLOCK_EI void
+rwlock_reader_unlock (struct rwlock *lock)
+{
+ mutex_lock (&lock->master);
+ assert (lock->readers);
+ lock->readers--;
+ if (lock->readers_waiting || lock->writers_waiting)
+ condition_broadcast (&lock->wakeup);
+ mutex_unlock (&lock->master);
+}
+
+/* Release a writer lock on reader-writer lock LOCK for disknode DN */
+RWLOCK_EI void
+rwlock_writer_unlock (struct rwlock *lock)
+{
+ mutex_lock (&lock->master);
+ assert (lock->readers == -1);
+ lock->readers = 0;
+ if (lock->readers_waiting || lock->writers_waiting)
+ condition_broadcast (&lock->wakeup);
+ mutex_unlock (&lock->master);
+}
+
+/* Initialize reader-writer lock LOCK */
+RWLOCK_EI void
+rwlock_init (struct rwlock *lock)
+{
+ mutex_init (&lock->master);
+ condition_init (&lock->wakeup);
+ lock->readers = 0;
+ lock->readers_waiting = 0;
+ lock->writers_waiting = 0;
+}
+
+#define RWLOCK_INITIALIZER \
+ { MUTEX_INITIALIZER, CONDITION_INITIALIZER, 0, 0, 0 }
+
+
+#endif /* rwlock.h */
diff --git a/libthreads/stack.c b/libthreads/stack.c
new file mode 100644
index 00000000..def4dc69
--- /dev/null
+++ b/libthreads/stack.c
@@ -0,0 +1,423 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: stack.c,v $
+ * Revision 1.5 1995/12/10 13:41:30 roland
+ * (addr_range_check, probe_stack): Functions #if 0'd out.
+ * (stack_init): Don't call probe_stack or frob old stack at all.
+ * Default cthread_stack_size to 16 pages if it is zero.
+ *
+ * Revision 1.4 1994/05/05 16:00:09 roland
+ * entered into RCS
+ *
+ * Revision 2.13 92/01/14 16:48:54 rpd
+ * Fixed addr_range_check to deallocate the object port from vm_region.
+ * [92/01/14 rpd]
+ *
+ * Revision 2.12 92/01/03 20:37:10 dbg
+ * Export cthread_stack_size, and use it if non-zero instead of
+ * probing the stack. Fix error in deallocating unused initial
+ * stack (STACK_GROWTH_UP case).
+ * [91/08/28 dbg]
+ *
+ * Revision 2.11 91/07/31 18:39:34 dbg
+ * Fix some bad stack references (stack direction).
+ * [91/07/30 17:36:50 dbg]
+ *
+ * Revision 2.10 91/05/14 17:58:49 mrt
+ * Correcting copyright
+ *
+ * Revision 2.9 91/02/14 14:21:08 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:35 mrt]
+ *
+ * Revision 2.8 90/11/05 18:10:46 rpd
+ * Added cproc_stack_base. Add stack_fork_child().
+ * [90/11/01 rwd]
+ *
+ * Revision 2.7 90/11/05 14:37:51 rpd
+ * Fixed addr_range_check for new vm_region semantics.
+ * [90/11/02 rpd]
+ *
+ * Revision 2.6 90/10/12 13:07:34 rpd
+ * Deal with positively growing stacks.
+ * [90/10/10 rwd]
+ * Deal with initial user stacks that are not perfectly aligned.
+ * [90/09/26 11:51:46 rwd]
+ *
+ * Leave extra stack page around in case it is needed before we
+ * switch stacks.
+ * [90/09/25 rwd]
+ *
+ * Revision 2.5 90/08/07 14:31:46 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.4 90/06/02 15:14:18 rpd
+ * Moved cthread_sp to machine-dependent files.
+ * [90/04/24 rpd]
+ * Converted to new IPC.
+ * [90/03/20 20:56:35 rpd]
+ *
+ * Revision 2.3 90/01/19 14:37:34 rwd
+ * Move self pointer to top of stack
+ * [89/12/12 rwd]
+ *
+ * Revision 2.2 89/12/08 19:49:52 rwd
+ * Back out change from af.
+ * [89/12/08 rwd]
+ *
+ * Revision 2.1.1.3 89/12/06 12:54:17 rwd
+ * Gap fix from af
+ * [89/12/06 rwd]
+ *
+ * Revision 2.1.1.2 89/11/21 15:01:40 rwd
+ * Add RED_ZONE ifdef.
+ * [89/11/20 rwd]
+ *
+ * Revision 2.1.1.1 89/10/24 13:00:44 rwd
+ * Remove conditionals.
+ * [89/10/23 rwd]
+ *
+ * Revision 2.1 89/08/03 17:10:05 rwd
+ * Created.
+ *
+ * 18-Jan-89 David Golub (dbg) at Carnegie-Mellon University
+ * Altered for stand-alone use:
+ * use vm_region to probe for the bottom of the initial thread's
+ * stack.
+ *
+ *
+ * 01-Dec-87 Eric Cooper (ecc) at Carnegie Mellon University
+ * Changed cthread stack allocation to use aligned stacks
+ * and store self pointer at base of stack.
+ * Added inline expansion for cthread_sp() function.
+ */
+/*
+ * File: stack.c
+ * Author: Eric Cooper, Carnegie Mellon University
+ * Date: Dec, 1987
+ *
+ * C Thread stack allocation.
+ *
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+#include <hurd/threadvar.h>
+
+#define BYTES_TO_PAGES(b) (((b) + vm_page_size - 1) / vm_page_size)
+
+int cthread_stack_mask;
+vm_size_t cthread_stack_size;
+private vm_address_t next_stack_base;
+
+vm_offset_t cproc_stack_base(); /* forward */
+
+/*
+ * Set up a stack segment for a thread.
+ * Segment has a red zone (invalid page)
+ * for early detection of stack overflow.
+ * The cproc_self pointer is stored at the top.
+ *
+ * --------- (high address)
+ * | self |
+ * | ... |
+ * | |
+ * | stack |
+ * | |
+ * | ... |
+ * | |
+ * ---------
+ * | |
+ * |invalid|
+ * | |
+ * --------- (stack base)
+ * --------- (low address)
+ *
+ * or the reverse, if the stack grows up.
+ */
+
+private void
+setup_stack(p, base)
+ register cproc_t p;
+ register vm_address_t base;
+{
+ register kern_return_t r;
+
+ p->stack_base = base;
+ /*
+ * Stack size is segment size minus size of self pointer
+ */
+ p->stack_size = cthread_stack_size;
+ /*
+ * Protect red zone.
+ */
+#ifdef RED_ZONE
+ MACH_CALL(vm_protect(mach_task_self(), base + vm_page_size, vm_page_size, FALSE, VM_PROT_NONE), r);
+#endif RED_ZONE
+ /*
+ * Store self pointer.
+ */
+ *(cproc_t *)&ur_cthread_ptr(base) = p;
+}
+
+#if 0 /* roland@gnu */
+vm_offset_t
+addr_range_check(start_addr, end_addr, desired_protection)
+ vm_offset_t start_addr, end_addr;
+ vm_prot_t desired_protection;
+{
+ register vm_offset_t addr;
+
+ addr = start_addr;
+ while (addr < end_addr) {
+ vm_offset_t r_addr;
+ vm_size_t r_size;
+ vm_prot_t r_protection,
+ r_max_protection;
+ vm_inherit_t r_inheritance;
+ boolean_t r_is_shared;
+ memory_object_name_t r_object_name;
+ vm_offset_t r_offset;
+ kern_return_t kr;
+
+ r_addr = addr;
+ kr = vm_region(mach_task_self(), &r_addr, &r_size,
+ &r_protection, &r_max_protection, &r_inheritance,
+ &r_is_shared, &r_object_name, &r_offset);
+ if ((kr == KERN_SUCCESS) && MACH_PORT_VALID(r_object_name))
+ (void) mach_port_deallocate(mach_task_self(), r_object_name);
+
+ if ((kr != KERN_SUCCESS) ||
+ (r_addr > addr) ||
+ ((r_protection & desired_protection) != desired_protection))
+ return (0);
+ addr = r_addr + r_size;
+ }
+ return (addr);
+}
+
+/*
+ * Probe for bottom and top of stack.
+ * Assume:
+ * 1. stack grows DOWN
+ * 2. There is an unallocated region below the stack.
+ */
+void
+probe_stack(stack_bottom, stack_top)
+ vm_offset_t *stack_bottom;
+ vm_offset_t *stack_top;
+{
+ /*
+ * Since vm_region returns the region starting at
+ * or ABOVE the given address, we cannot use it
+ * directly to search downwards. However, we
+ * also want a size that is the closest power of
+ * 2 to the stack size (so we can mask off the stack
+ * address and get the stack base). So we probe
+ * in increasing powers of 2 until we find a gap
+ * in the stack.
+ */
+ vm_offset_t start_addr, end_addr;
+ vm_offset_t last_start_addr, last_end_addr;
+ vm_size_t stack_size;
+
+ /*
+ * Start with a page
+ */
+ start_addr = cthread_sp() & ~(vm_page_size - 1);
+ end_addr = start_addr + vm_page_size;
+
+ stack_size = vm_page_size;
+
+ /*
+ * Increase the tentative stack size, by doubling each
+ * time, until we have exceeded the stack (some of the
+ * range is not valid).
+ */
+ do {
+ /*
+ * Save last addresses
+ */
+ last_start_addr = start_addr;
+ last_end_addr = end_addr;
+
+ /*
+ * Double the stack size
+ */
+ stack_size <<= 1;
+ start_addr = end_addr - stack_size;
+
+ /*
+ * Check that the entire range exists and is writable
+ */
+ } while (end_addr = (addr_range_check(start_addr,
+ end_addr,
+ VM_PROT_READ|VM_PROT_WRITE)));
+ /*
+ * Back off to previous power of 2.
+ */
+ *stack_bottom = last_start_addr;
+ *stack_top = last_end_addr;
+}
+#endif
+
+/* For GNU: */
+extern unsigned long int __hurd_threadvar_stack_mask;
+extern unsigned long int __hurd_threadvar_stack_offset;
+extern unsigned int __hurd_threadvar_max;
+
+vm_offset_t
+stack_init(p)
+ cproc_t p;
+{
+#if 0
+ vm_offset_t stack_bottom,
+ stack_top,
+ start;
+ vm_size_t size;
+ kern_return_t r;
+#endif
+
+ void alloc_stack();
+
+#if 0
+ /*
+ * Probe for bottom and top of stack, as a power-of-2 size.
+ */
+ probe_stack(&stack_bottom, &stack_top);
+
+ /*
+ * Use the stack size found for the Cthread stack size,
+ * if not already specified.
+ */
+ if (cthread_stack_size == 0)
+ cthread_stack_size = stack_top - stack_bottom;
+#else /* roland@gnu */
+ if (cthread_stack_size == 0)
+ cthread_stack_size = vm_page_size * 16; /* Reasonable default. */
+#endif
+
+#ifdef STACK_GROWTH_UP
+ cthread_stack_mask = ~(cthread_stack_size - 1);
+#else STACK_GROWTH_UP
+ cthread_stack_mask = cthread_stack_size - 1;
+#endif STACK_GROWTH_UP
+
+ /* Set up the variables so GNU can find its per-thread variables. */
+ __hurd_threadvar_stack_mask = ~(cthread_stack_size - 1);
+ /* The GNU per-thread variables will be stored just after the
+ cthread-self pointer at the base of the stack. */
+#ifdef STACK_GROWTH_UP
+ __hurd_threadvar_stack_offset = sizeof (ur_cthread_t *);
+#else
+ __hurd_threadvar_stack_offset = (cthread_stack_size -
+ sizeof (ur_cthread_t *) -
+ __hurd_threadvar_max * sizeof (long));
+#endif
+
+ /*
+ * Guess at first available region for stack.
+ */
+ next_stack_base = 0;
+
+ /*
+ * Set up stack for main thread.
+ */
+ alloc_stack(p);
+
+#if 0
+ /*
+ * Delete rest of old stack.
+ */
+
+#ifdef STACK_GROWTH_UP
+ start = (cthread_sp() | (vm_page_size - 1)) + 1 + vm_page_size;
+ size = stack_top - start;
+#else STACK_GROWTH_UP
+ start = stack_bottom;
+ size = (cthread_sp() & ~(vm_page_size - 1)) - stack_bottom -
+ vm_page_size;
+#endif STACK_GROWTH_UP
+ MACH_CALL(vm_deallocate(mach_task_self(),start,size),r);
+#endif
+
+ /*
+ * Return new stack; it gets passed back to the caller
+ * of cthread_init who must switch to it.
+ */
+ return cproc_stack_base(p,
+ sizeof(ur_cthread_t *) +
+ /* Account for GNU per-thread variables. */
+ __hurd_threadvar_max * sizeof (long int));
+}
+
+/*
+ * Allocate a stack segment for a thread.
+ * Stacks are never deallocated.
+ *
+ * The variable next_stack_base is used to align stacks.
+ * It may be updated by several threads in parallel,
+ * but mutual exclusion is unnecessary: at worst,
+ * the vm_allocate will fail and the thread will try again.
+ */
+
+void
+alloc_stack(p)
+ cproc_t p;
+{
+ vm_address_t base = next_stack_base;
+
+ for (base = next_stack_base;
+ vm_allocate(mach_task_self(), &base, cthread_stack_size, FALSE) != KERN_SUCCESS;
+ base += cthread_stack_size)
+ ;
+ next_stack_base = base + cthread_stack_size;
+ setup_stack(p, base);
+}
+
+vm_offset_t
+cproc_stack_base(cproc, offset)
+ register cproc_t cproc;
+ register int offset;
+{
+#ifdef STACK_GROWTH_UP
+ return (cproc->stack_base + offset);
+#else STACK_GROWTH_UP
+ return (cproc->stack_base + cproc->stack_size - offset);
+#endif STACK_GROWTH_UP
+
+}
+
+void stack_fork_child()
+/*
+ * Called in the child after a fork(). Resets stack data structures to
+ * coincide with the reality that we now have a single cproc and cthread.
+ */
+{
+ next_stack_base = 0;
+}
diff --git a/libthreads/sync.c b/libthreads/sync.c
new file mode 100644
index 00000000..e17280aa
--- /dev/null
+++ b/libthreads/sync.c
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: sync.c,v $
+ * Revision 2.7 92/03/06 14:09:59 rpd
+ * Replaced swtch_pri with yield.
+ * [92/03/06 rpd]
+ *
+ * Revision 2.6 91/05/14 17:59:54 mrt
+ * Correcting copyright
+ *
+ * Revision 2.5 91/02/14 14:21:38 mrt
+ * Added new Mach copyright
+ * [91/02/13 12:41:42 mrt]
+ *
+ * Revision 2.4 90/11/05 14:38:08 rpd
+ * Fix casting. Use new macros.
+ * [90/10/31 rwd]
+ *
+ * Revision 2.3 90/08/07 14:31:50 rpd
+ * Removed RCS keyword nonsense.
+ *
+ * Revision 2.2 89/12/08 19:55:01 rwd
+ * Changed spin_lock to spin_lock_solid to optimize.
+ * [89/11/13 rwd]
+ * Added copyright. Move mutexes to cproc.c. Spin locks are now
+ * old mutexes.
+ * [89/10/23 rwd]
+ *
+ */
+/*
+ * sync.c
+ *
+ * Spin locks
+ */
+
+#include <cthreads.h>
+#include "cthread_internals.h"
+
+/*
+ * Spin locks.
+ * Use test and test-and-set logic on all architectures.
+ */
+
+int cthread_spin_count=0;
+
+void
+spin_lock_solid(p)
+ register spin_lock_t *p;
+{
+ while (spin_lock_locked(p) || !spin_try_lock(p)) {
+#ifdef STATISTICS
+ cthread_spin_count++;
+#endif
+#ifdef SPIN_RESCHED
+ yield();
+#endif
+ }
+}