diff options
Diffstat (limited to 'pfinet')
-rw-r--r-- | pfinet/Makefile | 3 | ||||
-rw-r--r-- | pfinet/ethernet.c | 21 | ||||
-rw-r--r-- | pfinet/glue-include/asm/spinlock.h | 2 | ||||
-rw-r--r-- | pfinet/glue-include/linux/interrupt.h | 10 | ||||
-rw-r--r-- | pfinet/glue-include/linux/sched.h | 14 | ||||
-rw-r--r-- | pfinet/glue-include/linux/timer.h | 2 | ||||
-rw-r--r-- | pfinet/glue-include/linux/wait.h | 8 | ||||
-rw-r--r-- | pfinet/iioctl-ops.c | 24 | ||||
-rw-r--r-- | pfinet/io-ops.c | 52 | ||||
-rw-r--r-- | pfinet/kmem_cache.c | 16 | ||||
-rw-r--r-- | pfinet/main.c | 14 | ||||
-rw-r--r-- | pfinet/options.c | 10 | ||||
-rw-r--r-- | pfinet/pfinet-ops.c | 6 | ||||
-rw-r--r-- | pfinet/pfinet.h | 7 | ||||
-rw-r--r-- | pfinet/sched.c | 18 | ||||
-rw-r--r-- | pfinet/socket-ops.c | 52 | ||||
-rw-r--r-- | pfinet/socket.c | 10 | ||||
-rw-r--r-- | pfinet/timer-emul.c | 25 | ||||
-rw-r--r-- | pfinet/tunnel.c | 54 |
19 files changed, 188 insertions, 160 deletions
diff --git a/pfinet/Makefile b/pfinet/Makefile index 48b47915..d442629d 100644 --- a/pfinet/Makefile +++ b/pfinet/Makefile @@ -114,7 +114,8 @@ FROBBEDLINUXHEADERS = autoconf.h binfmts.h config.h errno.h fcntl.h fs.h \ ASMHEADERS = atomic.h bitops.h byteorder.h delay.h errno.h hardirq.h init.h \ segment.h spinlock.h system.h types.h uaccess.h -HURDLIBS=trivfs fshelp threads ports ihash shouldbeinlibc iohelp +HURDLIBS=trivfs fshelp ports ihash shouldbeinlibc iohelp +OTHERLIBS = -lpthread target = pfinet diff --git a/pfinet/ethernet.c b/pfinet/ethernet.c index 0fd76706..447d5b06 100644 --- a/pfinet/ethernet.c +++ b/pfinet/ethernet.c @@ -98,13 +98,13 @@ static int bpf_ether_filter_len = sizeof (bpf_ether_filter) / sizeof (short); static struct port_bucket *etherport_bucket; -static any_t -ethernet_thread (any_t arg) +static void * +ethernet_thread (void *arg) { ports_manage_port_operations_one_thread (etherport_bucket, ethernet_demuxer, 0); - return 0; + return NULL; } int @@ -134,7 +134,7 @@ ethernet_demuxer (mach_msg_header_t *inp, datalen = ETH_HLEN + msg->packet_type.msgt_number - sizeof (struct packet_header); - __mutex_lock (&net_bh_lock); + pthread_mutex_lock (&net_bh_lock); skb = alloc_skb (datalen, GFP_ATOMIC); skb_put (skb, datalen); skb->dev = dev; @@ -148,7 +148,7 @@ ethernet_demuxer (mach_msg_header_t *inp, /* Drop it on the queue. */ skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); - __mutex_unlock (&net_bh_lock); + pthread_mutex_unlock (&net_bh_lock); return 1; } @@ -157,10 +157,19 @@ ethernet_demuxer (mach_msg_header_t *inp, void ethernet_initialize (void) { + pthread_t thread; + error_t err; etherport_bucket = ports_create_bucket (); etherreadclass = ports_create_class (0, 0); - cthread_detach (cthread_fork (ethernet_thread, 0)); + err = pthread_create (&thread, NULL, ethernet_thread, NULL); + if (!err) + pthread_detach (thread); + else + { + errno = err; + perror ("pthread_create"); + } } int diff --git a/pfinet/glue-include/asm/spinlock.h b/pfinet/glue-include/asm/spinlock.h index 1666b0e2..ef13312d 100644 --- a/pfinet/glue-include/asm/spinlock.h +++ b/pfinet/glue-include/asm/spinlock.h @@ -1,8 +1,6 @@ #ifndef _HACK_ASM_SPINLOCK_H_ #define _HACK_ASM_SPINLOCK_H_ -#include <cthreads.h> - typedef struct { } spinlock_t; #define SPIN_LOCK_UNLOCKED { } diff --git a/pfinet/glue-include/linux/interrupt.h b/pfinet/glue-include/linux/interrupt.h index 5f485e32..df58d2f4 100644 --- a/pfinet/glue-include/linux/interrupt.h +++ b/pfinet/glue-include/linux/interrupt.h @@ -14,13 +14,13 @@ #define start_bh_atomic() ((void) 0) #define end_bh_atomic() ((void) 0) /* -extern struct mutex net_bh_lock; -#define start_bh_atomic() __mutex_lock (&net_bh_lock) -#define end_bh_atomic() __mutex_unlock (&net_bh_lock) +extern pthread_mutex_t net_bh_lock; +#define start_bh_atomic() pthread_mutex_lock (&net_bh_lock) +#define end_bh_atomic() pthread_mutex_unlock (&net_bh_lock) */ /* See sched.c::net_bh_worker comments. */ -extern struct condition net_bh_wakeup; +extern pthread_cond_t net_bh_wakeup; #define NET_BH 0xb00bee51 @@ -30,7 +30,7 @@ static inline void mark_bh (int bh) { assert (bh == NET_BH); - condition_broadcast (&net_bh_wakeup); + pthread_cond_broadcast (&net_bh_wakeup); } void net_bh (void); diff --git a/pfinet/glue-include/linux/sched.h b/pfinet/glue-include/linux/sched.h index d4cae42a..aea6c47a 100644 --- a/pfinet/glue-include/linux/sched.h +++ b/pfinet/glue-include/linux/sched.h @@ -7,7 +7,7 @@ #include <hurd/hurd_types.h> #include <limits.h> #include <assert.h> -#include <cthreads.h> +#include <pthread.h> #include "mapped-time.h" @@ -90,12 +90,12 @@ capable(int cap) } -extern struct mutex global_lock; +extern pthread_mutex_t global_lock; static inline void interruptible_sleep_on (struct wait_queue **p) { - struct condition **condp = (void *) p, *c; + pthread_cond_t **condp = (void *) p, *c; int isroot; struct wait_queue **next_wait; @@ -104,14 +104,14 @@ interruptible_sleep_on (struct wait_queue **p) { c = malloc (sizeof **condp); assert (c); - condition_init (c); + pthread_cond_init (c, NULL); *condp = c; } isroot = current->isroot; /* This is our context that needs switched. */ next_wait = current->next_wait; /* This too, for multiple schedule calls. */ current->next_wait = 0; - if (hurd_condition_wait (c, &global_lock)) + if (pthread_hurd_cond_wait_np (c, &global_lock)) current->signal = 1; /* We got cancelled, mark it for later. */ current->isroot = isroot; /* Switch back to our context. */ current->next_wait = next_wait; @@ -121,9 +121,9 @@ interruptible_sleep_on (struct wait_queue **p) static inline void wake_up_interruptible (struct wait_queue **p) { - struct condition **condp = (void *) p, *c = *condp; + pthread_cond_t **condp = (void *) p, *c = *condp; if (c) - condition_broadcast (c); + pthread_cond_broadcast (c); } #define wake_up wake_up_interruptible diff --git a/pfinet/glue-include/linux/timer.h b/pfinet/glue-include/linux/timer.h index cc8dec80..5497b109 100644 --- a/pfinet/glue-include/linux/timer.h +++ b/pfinet/glue-include/linux/timer.h @@ -1,7 +1,7 @@ #ifndef _HACK_TIMER_H_ #define _HACK_TIMER_H_ -#include <cthreads.h> +#include <pthread.h> enum tstate { diff --git a/pfinet/glue-include/linux/wait.h b/pfinet/glue-include/linux/wait.h index 7ee962dc..58f4960e 100644 --- a/pfinet/glue-include/linux/wait.h +++ b/pfinet/glue-include/linux/wait.h @@ -1,14 +1,14 @@ #ifndef _HACK_WAIT_H_ #define _HACK_WAIT_H_ -#include <cthreads.h> +#include <pthread.h> /* This data structure actually represents one waiter on a wait queue, and waiters always expect to initialize it with { current, NULL }. The actual wait queue is a `struct wait_queue *' stored somewhere. We ignore these structures provided by the waiters entirely. In the `struct wait_queue *' that is the "head of the wait queue" slot, - we actually store a `struct condition *' pointing to malloc'd storage. */ + we actually store a `pthread_cond_t *' pointing to malloc'd storage. */ struct wait_queue { @@ -19,13 +19,13 @@ struct wait_queue struct select_table_elt { - struct condition *dependent_condition; + pthread_cond_t *dependent_condition; struct select_table_elt *next; }; typedef struct select_table_struct { - struct condition master_condition; + pthread_cond_t master_condition; struct select_table_elt *head; } select_table; diff --git a/pfinet/iioctl-ops.c b/pfinet/iioctl-ops.c index c0dd6d5f..9904b9b3 100644 --- a/pfinet/iioctl-ops.c +++ b/pfinet/iioctl-ops.c @@ -55,7 +55,7 @@ struct device *get_dev (char *name) memcpy (ifname, name, IFNAMSIZ-1); ifname[IFNAMSIZ-1] = 0; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); for (dev = dev_base; dev; dev = dev->next) if (strcmp (dev->name, ifname) == 0) @@ -110,7 +110,7 @@ siocgifXaddr (io_t port, sin->sin_addr.s_addr = addrs[type]; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); end_using_socket_port (user); return err; } @@ -157,7 +157,7 @@ siocsifXaddr (io_t port, err = configure_device (dev, addrs[0], addrs[1], addrs[2], addrs[3]); } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); end_using_socket_port (user); return err; } @@ -194,7 +194,7 @@ S_iioctl_siocsifflags (io_t port, err = ethernet_change_flags (dev, flags); } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); end_using_socket_port (user); return err; } @@ -215,7 +215,7 @@ S_iioctl_siocgifflags (io_t port, { *flags = dev->flags; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -241,7 +241,7 @@ S_iioctl_siocgifmetric (io_t port, { *metric = 0; /* Not supported. */ } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -296,7 +296,7 @@ S_iioctl_siocgifhwaddr (io_t port, addr->sa_family = dev->type; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -316,7 +316,7 @@ S_iioctl_siocgifmtu (io_t port, { *mtu = dev->mtu; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -351,7 +351,7 @@ S_iioctl_siocsifmtu (io_t port, notifier_call_chain (&netdev_chain, NETDEV_CHANGEMTU, dev); } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); end_using_socket_port (user); return err; } @@ -372,7 +372,7 @@ S_iioctl_siocgifindex (io_t port, { *index = dev->ifindex; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -385,7 +385,7 @@ S_iioctl_siocgifname (io_t port, error_t err = 0; struct device *dev; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); dev = dev_get_by_index (*index); if (!dev) err = ENODEV; @@ -394,7 +394,7 @@ S_iioctl_siocgifname (io_t port, strncpy (ifnam, dev->name, IFNAMSIZ); ifnam[IFNAMSIZ-1] = '\0'; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } diff --git a/pfinet/io-ops.c b/pfinet/io-ops.c index ef8d8513..0236c594 100644 --- a/pfinet/io-ops.c +++ b/pfinet/io-ops.c @@ -48,12 +48,12 @@ S_io_write (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); if (user->sock->flags & O_NONBLOCK) m.msg_flags |= MSG_DONTWAIT; err = (*user->sock->ops->sendmsg) (user->sock, &m, datalen, 0); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); if (err < 0) err = -err; @@ -98,13 +98,13 @@ S_io_read (struct sock_user *user, iov.iov_base = *data; iov.iov_len = amount; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = (*user->sock->ops->recvmsg) (user->sock, &m, amount, ((user->sock->flags & O_NONBLOCK) ? MSG_DONTWAIT : 0), 0); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); if (err < 0) { @@ -142,7 +142,7 @@ S_io_readable (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); /* We need to avoid calling the Linux ioctl routines, @@ -178,7 +178,7 @@ S_io_readable (struct sock_user *user, break; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -189,12 +189,12 @@ S_io_set_all_openmodes (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); if (bits & O_NONBLOCK) user->sock->flags |= O_NONBLOCK; else user->sock->flags &= ~O_NONBLOCK; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -207,7 +207,7 @@ S_io_get_openmodes (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); sk = user->sock->sk; *bits = 0; @@ -218,7 +218,7 @@ S_io_get_openmodes (struct sock_user *user, if (user->sock->flags & O_NONBLOCK) *bits |= O_NONBLOCK; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -229,10 +229,10 @@ S_io_set_some_openmodes (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); if (bits & O_NONBLOCK) user->sock->flags |= O_NONBLOCK; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -243,10 +243,10 @@ S_io_clear_some_openmodes (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); if (bits & O_NONBLOCK) user->sock->flags &= ~O_NONBLOCK; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -262,7 +262,7 @@ S_io_select (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); /* In Linux, this means (supposedly) that I/O will never be possible. @@ -283,7 +283,7 @@ S_io_select (struct sock_user *user, interruptible_sleep_on (user->sock->sk->sleep); if (signal_pending (current)) /* This means we were cancelled. */ { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return EINTR; } avail = (*user->sock->ops->poll) ((void *) 0xdeadbeef, @@ -296,7 +296,7 @@ S_io_select (struct sock_user *user, /* We got something. */ *select_type = avail; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -342,7 +342,7 @@ S_io_reauthenticate (struct sock_user *user, aux_uids = aubuf; aux_gids = agbuf; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); newuser = make_sock_user (user->sock, 0, 1, 0); auth = getauth (); @@ -380,7 +380,7 @@ S_io_reauthenticate (struct sock_user *user, mach_port_move_member (mach_task_self (), newuser->pi.port_right, pfinet_bucket->portset); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); ports_port_deref (newuser); @@ -410,7 +410,7 @@ S_io_restrict_auth (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); isroot = 0; if (user->isroot) @@ -429,7 +429,7 @@ S_io_restrict_auth (struct sock_user *user, *newobject = ports_get_right (newuser); *newobject_type = MACH_MSG_TYPE_MAKE_SEND; ports_port_deref (newuser); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -442,12 +442,12 @@ S_io_duplicate (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); newuser = make_sock_user (user->sock, user->isroot, 0, 0); *newobject = ports_get_right (newuser); *newobject_type = MACH_MSG_TYPE_MAKE_SEND; ports_port_deref (newuser); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -464,14 +464,14 @@ S_io_identity (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); if (user->sock->identity == MACH_PORT_NULL) { err = mach_port_allocate (mach_task_self (), MACH_PORT_RIGHT_RECEIVE, &user->sock->identity); if (err) { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } } @@ -482,7 +482,7 @@ S_io_identity (struct sock_user *user, *fsystype = MACH_MSG_TYPE_MAKE_SEND; *fileno = user->sock->st_ino; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } diff --git a/pfinet/kmem_cache.c b/pfinet/kmem_cache.c index 8c73c9bf..aab192e1 100644 --- a/pfinet/kmem_cache.c +++ b/pfinet/kmem_cache.c @@ -20,12 +20,12 @@ /* Hack replacement for Linux's kmem_cache_t allocator, using plain malloc and cthreads locking. The locking here is probably unnecessary. */ -#include <cthreads.h> +#include <pthread.h> #include <linux/malloc.h> struct kmem_cache_s { - struct mutex lock; + pthread_mutex_t lock; void *freelist; size_t item_size; @@ -43,7 +43,7 @@ kmem_cache_create (const char *name, size_t item_size, kmem_cache_t *new = malloc (sizeof *new); if (!new) return 0; - mutex_init (&new->lock); + pthread_mutex_init (&new->lock, NULL); new->freelist = 0; new->item_size = item_size; new->ctor = ctor; @@ -58,14 +58,14 @@ kmem_cache_alloc (kmem_cache_t *cache, int flags) { void *p; - __mutex_lock (&cache->lock); + pthread_mutex_lock (&cache->lock); p = cache->freelist; if (p != 0) { cache->freelist = *(void **)(p + cache->item_size); - __mutex_unlock (&cache->lock); + pthread_mutex_unlock (&cache->lock); return p; } - __mutex_unlock (&cache->lock); + pthread_mutex_unlock (&cache->lock); p = malloc (cache->item_size + sizeof (void *)); if (p && cache->ctor) @@ -79,10 +79,10 @@ kmem_cache_free (kmem_cache_t *cache, void *p) { void **const nextp = (void **) (p + cache->item_size); - __mutex_lock (&cache->lock); + pthread_mutex_lock (&cache->lock); *nextp = cache->freelist; cache->freelist = p; - __mutex_unlock (&cache->lock); + pthread_mutex_unlock (&cache->lock); /* XXX eventually destroy some... */ } diff --git a/pfinet/main.c b/pfinet/main.c index 1357b037..7ec1bf1c 100644 --- a/pfinet/main.c +++ b/pfinet/main.c @@ -249,6 +249,7 @@ main (int argc, error_t err; mach_port_t bootstrap; struct stat st; + pthread_t thread; pfinet_bucket = ports_create_bucket (); addrport_class = ports_create_class (clean_addrport, 0); @@ -261,9 +262,16 @@ main (int argc, init_time (); ethernet_initialize (); - cthread_detach (cthread_fork (net_bh_worker, 0)); + err = pthread_create (&thread, NULL, net_bh_worker, NULL); + if (!err) + pthread_detach (thread); + else + { + errno = err; + perror ("pthread_create"); + } - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); prepare_current (1); /* Set up to call into Linux initialization. */ @@ -284,7 +292,7 @@ main (int argc, htonl (INADDR_LOOPBACK), htonl (IN_CLASSA_NET), htonl (INADDR_NONE), htonl (INADDR_NONE)); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* Parse options. When successful, this configures the interfaces before returning; to do so, it will acquire the global_lock. diff --git a/pfinet/options.c b/pfinet/options.c index 21a35c61..1d0a9e1f 100644 --- a/pfinet/options.c +++ b/pfinet/options.c @@ -350,7 +350,7 @@ parse_opt (int opt, char *arg, struct argp_state *state) } /* Successfully finished parsing, return a result. */ - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); for (in = h->interfaces; in < h->interfaces + h->num_interfaces; in++) { @@ -367,7 +367,7 @@ parse_opt (int opt, char *arg, struct argp_state *state) in->peer, INADDR_NONE); if (err) { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); FAIL (err, 16, 0, "cannot configure interface"); } } @@ -436,7 +436,7 @@ parse_opt (int opt, char *arg, struct argp_state *state) err = - (*tb->tb_delete) (tb, &req.rtm, &rta, &req.nlh, 0); if (err && err != ESRCH) { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); FAIL (err, 17, 0, "cannot remove old default gateway"); } err = 0; @@ -452,7 +452,7 @@ parse_opt (int opt, char *arg, struct argp_state *state) : - (*tb->tb_insert) (tb, &req.rtm, &rta, &req.nlh, 0)); if (err) { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); FAIL (err, 17, 0, "cannot set default gateway"); } } @@ -474,7 +474,7 @@ parse_opt (int opt, char *arg, struct argp_state *state) } #endif - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* Fall through to free hook. */ diff --git a/pfinet/pfinet-ops.c b/pfinet/pfinet-ops.c index 8e251020..6724575c 100644 --- a/pfinet/pfinet-ops.c +++ b/pfinet/pfinet-ops.c @@ -48,7 +48,7 @@ S_pfinet_siocgifconf (io_t port, error_t err = 0; struct ifconf ifc; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); if (amount == (vm_size_t) -1) { /* Get the needed buffer length. */ @@ -57,7 +57,7 @@ S_pfinet_siocgifconf (io_t port, err = dev_ifconf ((char *) &ifc); if (err) { - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return -err; } amount = ifc.ifc_len; @@ -88,6 +88,6 @@ S_pfinet_siocgifconf (io_t port, *ifr = ifc.ifc_buf; } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } diff --git a/pfinet/pfinet.h b/pfinet/pfinet.h index 050ba6f7..66353956 100644 --- a/pfinet/pfinet.h +++ b/pfinet/pfinet.h @@ -28,9 +28,10 @@ #include <hurd/trivfs.h> #include <sys/mman.h> #include <sys/socket.h> +#include <pthread.h> -extern struct mutex global_lock; -extern struct mutex net_bh_lock; +extern pthread_mutex_t global_lock; +extern pthread_mutex_t net_bh_lock; struct port_bucket *pfinet_bucket; struct port_class *addrport_class; @@ -75,7 +76,7 @@ struct sock_user *make_sock_user (struct socket *, int, int, int); error_t make_sockaddr_port (struct socket *, int, mach_port_t *, mach_msg_type_name_t *); void init_devices (void); -any_t net_bh_worker (any_t); +void *net_bh_worker (void *); void init_time (void); void ip_rt_add (short, u_long, u_long, u_long, struct device *, u_short, u_long); diff --git a/pfinet/sched.c b/pfinet/sched.c index 37e4ecbd..89927741 100644 --- a/pfinet/sched.c +++ b/pfinet/sched.c @@ -23,9 +23,9 @@ #include <linux/sched.h> #include <linux/interrupt.h> -struct mutex global_lock = MUTEX_INITIALIZER; -struct mutex net_bh_lock = MUTEX_INITIALIZER; -struct condition net_bh_wakeup = CONDITION_INITIALIZER; +pthread_mutex_t global_lock = PTHREAD_MUTEX_INITIALIZER; +pthread_mutex_t net_bh_lock = PTHREAD_MUTEX_INITIALIZER; +pthread_cond_t net_bh_wakeup = PTHREAD_COND_INITIALIZER; struct task_struct current_contents; /* zeros are right default values */ @@ -55,16 +55,16 @@ sock_wake_async (struct socket *sock, int how) queue, or dropped, without synchronizing with RPC service threads. (The RPC service threads lock out the running of net_bh, but not the queuing/dropping of packets in netif_rx.) */ -any_t -net_bh_worker (any_t arg) +void * +net_bh_worker (void *arg) { - __mutex_lock (&net_bh_lock); + pthread_mutex_lock (&net_bh_lock); while (1) { - condition_wait (&net_bh_wakeup, &net_bh_lock); - __mutex_lock (&global_lock); + pthread_cond_wait (&net_bh_wakeup, &net_bh_lock); + pthread_mutex_lock (&global_lock); net_bh (); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); } /*NOTREACHED*/ return 0; diff --git a/pfinet/socket-ops.c b/pfinet/socket-ops.c index b4172dc4..3f8b7fbc 100644 --- a/pfinet/socket-ops.c +++ b/pfinet/socket-ops.c @@ -60,7 +60,7 @@ S_socket_create (struct trivfs_protid *master, if (protocol < 0) return EPROTONOSUPPORT; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task_protid (master); @@ -97,7 +97,7 @@ S_socket_create (struct trivfs_protid *master, ports_port_deref (user); } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -112,10 +112,10 @@ S_socket_listen (struct sock_user *user, int queue_limit) if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = - (*user->sock->ops->listen) (user->sock, queue_limit); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -136,7 +136,7 @@ S_socket_accept (struct sock_user *user, sock = user->sock; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); @@ -169,7 +169,7 @@ S_socket_accept (struct sock_user *user, sock_release (newsock); } - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -186,14 +186,14 @@ S_socket_connect (struct sock_user *user, sock = user->sock; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = - (*sock->ops->connect) (sock, &addr->address, addr->address.sa_len, sock->flags); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* MiG should do this for us, but it doesn't. */ if (!err) @@ -213,11 +213,11 @@ S_socket_bind (struct sock_user *user, if (! addr) return EADDRNOTAVAIL; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = - (*user->sock->ops->bind) (user->sock, &addr->address, addr->address.sa_len); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* MiG should do this for us, but it doesn't. */ if (!err) @@ -234,10 +234,10 @@ S_socket_name (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); make_sockaddr_port (user->sock, 0, addr_port, addr_port_name); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return 0; } @@ -251,10 +251,10 @@ S_socket_peername (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = make_sockaddr_port (user->sock, 1, addr_port, addr_port_name); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -268,7 +268,7 @@ S_socket_connect2 (struct sock_user *user1, if (!user1 || !user2) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user1); @@ -280,7 +280,7 @@ S_socket_connect2 (struct sock_user *user1, else err = - (*user1->sock->ops->socketpair) (user1->sock, user2->sock); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* MiG should do this for us, but it doesn't. */ if (!err) @@ -362,10 +362,10 @@ S_socket_shutdown (struct sock_user *user, if (!user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = - (*user->sock->ops->shutdown) (user->sock, direction); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -382,7 +382,7 @@ S_socket_getopt (struct sock_user *user, if (! user) return EOPNOTSUPP; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); int len = *datalen; @@ -391,7 +391,7 @@ S_socket_getopt (struct sock_user *user, (user->sock, level, option, *data, &len); *datalen = len; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* XXX option data not properly typed, needs byte-swapping for netmsgserver. Most options are ints, some like IP_OPTIONS are bytesex-neutral. */ @@ -414,14 +414,14 @@ S_socket_setopt (struct sock_user *user, /* XXX option data not properly typed, needs byte-swapping for netmsgserver. Most options are ints, some like IP_OPTIONS are bytesex-neutral. */ - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); err = - (level == SOL_SOCKET ? sock_setsockopt : *user->sock->ops->setsockopt) (user->sock, level, option, data, datalen); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); return err; } @@ -452,12 +452,12 @@ S_socket_send (struct sock_user *user, if (nports != 0 || controllen != 0) return EINVAL; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); if (user->sock->flags & O_NONBLOCK) m.msg_flags |= MSG_DONTWAIT; sent = (*user->sock->ops->sendmsg) (user->sock, &m, datalen, 0); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); /* MiG should do this for us, but it doesn't. */ if (addr && sent >= 0) @@ -513,12 +513,12 @@ S_socket_recv (struct sock_user *user, iov.iov_base = *data; iov.iov_len = amount; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); become_task (user); if (user->sock->flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (*user->sock->ops->recvmsg) (user->sock, &m, amount, flags, 0); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); if (err < 0) err = -err; diff --git a/pfinet/socket.c b/pfinet/socket.c index d56c1794..23a2dd91 100644 --- a/pfinet/socket.c +++ b/pfinet/socket.c @@ -43,13 +43,13 @@ sock_alloc (void) { static ino_t nextino; /* locked by global_lock */ struct socket *sock; - struct condition *c; + pthread_cond_t *c; - sock = malloc (sizeof *sock + sizeof (struct condition)); + sock = malloc (sizeof *sock + sizeof (pthread_cond_t)); if (!sock) return 0; c = (void *) &sock[1]; - condition_init (c); + pthread_cond_init (c, NULL); bzero (sock, sizeof *sock); sock->state = SS_UNCONNECTED; sock->identity = MACH_PORT_NULL; @@ -120,7 +120,7 @@ clean_socketport (void *arg) { struct sock_user *const user = arg; - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); sock_release (user->sock); - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); } diff --git a/pfinet/timer-emul.c b/pfinet/timer-emul.c index 6eb20bc4..5a503597 100644 --- a/pfinet/timer-emul.c +++ b/pfinet/timer-emul.c @@ -18,6 +18,9 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */ +/* Do not include glue-include/linux/errno.h */ +#define _HACK_ERRNO_H + #include <linux/timer.h> #include <asm/system.h> #include <linux/sched.h> @@ -31,8 +34,8 @@ volatile struct mapped_time_value *mapped_time; struct timer_list *timers; thread_t timer_thread = 0; -static int -timer_function (int this_is_a_pointless_variable_with_a_rather_long_name) +static void * +timer_function (void *this_is_a_pointless_variable_with_a_rather_long_name) { mach_port_t recv; int wait = 0; @@ -41,7 +44,7 @@ timer_function (int this_is_a_pointless_variable_with_a_rather_long_name) timer_thread = mach_thread_self (); - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); while (1) { int jiff = jiffies; @@ -53,13 +56,13 @@ timer_function (int this_is_a_pointless_variable_with_a_rather_long_name) else wait = ((timers->expires - jiff) * 1000) / HZ; - __mutex_unlock (&global_lock); + pthread_mutex_unlock (&global_lock); mach_msg (NULL, (MACH_RCV_MSG | MACH_RCV_INTERRUPT | (wait == -1 ? 0 : MACH_RCV_TIMEOUT)), 0, 0, recv, wait, MACH_PORT_NULL); - __mutex_lock (&global_lock); + pthread_mutex_lock (&global_lock); while (timers->expires < jiffies) { @@ -78,7 +81,7 @@ timer_function (int this_is_a_pointless_variable_with_a_rather_long_name) } } - return 0; + return NULL; } @@ -157,6 +160,7 @@ init_time () { error_t err; struct timeval tp; + pthread_t thread; err = maptime_map (0, 0, &mapped_time); if (err) @@ -167,5 +171,12 @@ init_time () root_jiffies = (long long) tp.tv_sec * HZ + ((long long) tp.tv_usec * HZ) / 1000000; - cthread_detach (cthread_fork ((cthread_fn_t) timer_function, 0)); + err = pthread_create (&thread, NULL, timer_function, NULL); + if (!err) + pthread_detach (thread); + else + { + errno = err; + perror ("pthread_create"); + } } diff --git a/pfinet/tunnel.c b/pfinet/tunnel.c index 6f9e1498..cf907f59 100644 --- a/pfinet/tunnel.c +++ b/pfinet/tunnel.c @@ -21,7 +21,7 @@ #include "pfinet.h" #include <hurd.h> -#include <cthreads.h> +#include <pthread.h> #include <fcntl.h> #include <device/device.h> #include <device/net_status.h> @@ -47,9 +47,9 @@ struct tunnel_device file_t underlying; struct iouser *user; struct sk_buff_head xq; /* Transmit queue. */ - struct condition wait; /* For read and select. */ - struct condition select_alert; /* For read and select. */ - struct mutex lock; /* For read and select. */ + pthread_cond_t wait; /* For read and select. */ + pthread_cond_t select_alert; /* For read and select. */ + pthread_mutex_t lock; /* For read and select. */ int read_blocked; /* For read and select. */ struct device dev; struct net_device_stats stats; @@ -117,7 +117,7 @@ tunnel_xmit (struct sk_buff *skb, struct device *dev) assert (tdev); - __mutex_lock (&tdev->lock); + pthread_mutex_lock (&tdev->lock); /* Avoid unlimited growth. */ if (skb_queue_len(&tdev->xq) > 128) @@ -134,11 +134,11 @@ tunnel_xmit (struct sk_buff *skb, struct device *dev) if (tdev->read_blocked) { tdev->read_blocked = 0; - condition_broadcast (&tdev->wait); - condition_broadcast (&tdev->select_alert); + pthread_cond_broadcast (&tdev->wait); + pthread_cond_broadcast (&tdev->select_alert); } - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return 0; } @@ -211,9 +211,9 @@ setup_tunnel_device (char *name, struct device **device) if (err) error (2, err, "%s", tdev->dev.name); - __mutex_init (&tdev->lock); - condition_init (&tdev->wait); - condition_init (&tdev->select_alert); + pthread_mutex_init (&tdev->lock, NULL); + pthread_cond_init (&tdev->wait, NULL); + pthread_cond_init (&tdev->select_alert, NULL); /* This call adds the device to the `dev_base' chain, initializes its `ifindex' member (which matters!), @@ -294,20 +294,20 @@ trivfs_S_io_read (struct trivfs_protid *cred, tdev = (struct tunnel_device *) cred->po->cntl->hook; - __mutex_lock (&tdev->lock); + pthread_mutex_lock (&tdev->lock); while (skb_queue_len(&tdev->xq) == 0) { if (cred->po->openmodes & O_NONBLOCK) { - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return EWOULDBLOCK; } tdev->read_blocked = 1; - if (hurd_condition_wait (&tdev->wait, &tdev->lock)) + if (pthread_hurd_cond_wait_np (&tdev->wait, &tdev->lock)) { - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return EINTR; } /* See term/users.c for possible race? */ @@ -327,7 +327,7 @@ trivfs_S_io_read (struct trivfs_protid *cred, if (*data == MAP_FAILED) { dev_kfree_skb (skb); - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return ENOMEM; } } @@ -340,7 +340,7 @@ trivfs_S_io_read (struct trivfs_protid *cred, /* Set atime, see term/users.c */ - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return 0; } @@ -375,9 +375,9 @@ trivfs_S_io_write (struct trivfs_protid *cred, tdev = (struct tunnel_device *) cred->po->cntl->hook; - __mutex_lock (&tdev->lock); + pthread_mutex_lock (&tdev->lock); - __mutex_lock (&net_bh_lock); + pthread_mutex_lock (&net_bh_lock); skb = alloc_skb (datalen, GFP_ATOMIC); skb->len = datalen; skb->dev = &tdev->dev; @@ -388,11 +388,11 @@ trivfs_S_io_write (struct trivfs_protid *cred, skb->mac.raw = skb->data; skb->protocol = htons (ETH_P_IP); netif_rx (skb); - __mutex_unlock (&net_bh_lock); + pthread_mutex_unlock (&net_bh_lock); *amount = datalen; - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return 0; } @@ -418,7 +418,7 @@ trivfs_S_io_readable (struct trivfs_protid *cred, tdev = (struct tunnel_device *) cred->po->cntl->hook; - __mutex_lock (&tdev->lock); + pthread_mutex_lock (&tdev->lock); /* XXX: Now return the length of the next entry in the queue. From the BSD manual: @@ -442,7 +442,7 @@ trivfs_S_io_readable (struct trivfs_protid *cred, else *amount = 0; - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return 0; } @@ -474,23 +474,23 @@ trivfs_S_io_select (struct trivfs_protid *cred, if (*type == 0) return 0; - __mutex_lock (&tdev->lock); + pthread_mutex_lock (&tdev->lock); while (1) { if (skb_queue_len (&tdev->xq) != 0) { *type = SELECT_READ; - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return 0; } ports_interrupt_self_on_port_death (cred, reply); tdev->read_blocked = 1; - if (hurd_condition_wait (&tdev->select_alert, &tdev->lock)) + if (pthread_hurd_cond_wait_np (&tdev->select_alert, &tdev->lock)) { *type = 0; - __mutex_unlock (&tdev->lock); + pthread_mutex_unlock (&tdev->lock); return EINTR; } } |