diff options
-rw-r--r-- | libmachdev/Makefile | 31 | ||||
-rw-r--r-- | libmachdev/dev_hdr.h | 148 | ||||
-rw-r--r-- | libmachdev/device.defs | 204 | ||||
-rw-r--r-- | libmachdev/device_emul.h | 64 | ||||
-rw-r--r-- | libmachdev/device_reply.defs | 1 | ||||
-rw-r--r-- | libmachdev/ds_routines.c | 802 | ||||
-rw-r--r-- | libmachdev/ds_routines.h | 55 | ||||
-rw-r--r-- | libmachdev/if_ether.h | 87 | ||||
-rw-r--r-- | libmachdev/io_req.h | 135 | ||||
-rw-r--r-- | libmachdev/mach.defs | 779 | ||||
-rw-r--r-- | libmachdev/net.c | 679 | ||||
-rw-r--r-- | libmachdev/notify.defs | 1 | ||||
-rw-r--r-- | libmachdev/queue.c | 131 | ||||
-rw-r--r-- | libmachdev/queue.h | 370 | ||||
-rw-r--r-- | libmachdev/trivfs_server.c | 154 | ||||
-rw-r--r-- | libmachdev/util.h | 33 | ||||
-rw-r--r-- | libmachdev/vm_param.h | 7 |
17 files changed, 3681 insertions, 0 deletions
diff --git a/libmachdev/Makefile b/libmachdev/Makefile new file mode 100644 index 00000000..daf13a82 --- /dev/null +++ b/libmachdev/Makefile @@ -0,0 +1,31 @@ +# Copyright (C) 2009 Free Software Foundation, Inc. +# This file is part of the GNU Hurd. +# +# The GNU Hurd is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# The GNU Hurd is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with the GNU Hurd; see the file COPYING. If not, write to +# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + +dir := libmachdev +makemode := library +libname = libmachdev + +SRCS = deviceUser.c machUser.c net.c ds_routines.c queue.c trivfs_server.c \ + device_replyUser.c deviceServer.c notifyServer.c +LCLHDRS = dev_hdr.h device_emul.h ds_routines.h vm_param.h \ + util.h queue.h io_req.h if_ether.h +HURDLIBS = ports threads trivfs +OBJS = $(SRCS:.c=.o) $(MIGSTUBS) + +include ../Makeconf + +CFLAGS += diff --git a/libmachdev/dev_hdr.h b/libmachdev/dev_hdr.h new file mode 100644 index 00000000..db44f712 --- /dev/null +++ b/libmachdev/dev_hdr.h @@ -0,0 +1,148 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +/* + * Mach device emulation definitions (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#ifndef _DEVICE_DEV_HDR_H_ +#define _DEVICE_DEV_HDR_H_ + +#include <mach.h> +#include <hurd.h> +#include <hurd/ports.h> +#include <cthreads.h> + +#include "device_emul.h" + +/* + * Operations list for major device types. + */ +struct dev_ops { + char * d_name; /* name for major device */ + int (*d_open)(); /* open device */ + int (*d_close)(); /* close device */ + int (*d_read)(); /* read */ + int (*d_write)(); /* write */ + int (*d_getstat)(); /* get status/control */ + int (*d_setstat)(); /* set status/control */ + vm_offset_t (*d_mmap)(); /* map memory */ + int (*d_async_in)();/* asynchronous input setup */ + int (*d_reset)(); /* reset device */ + int (*d_port_death)(); + /* clean up reply ports */ + int d_subdev; /* number of sub-devices per + unit */ + int (*d_dev_info)(); /* driver info for kernel */ +}; +typedef struct dev_ops *dev_ops_t; + +/* This structure is associated with each open device port. + * The port representing the device points to this structure. */ +struct emul_device +{ + struct device_emulation_ops *emul_ops; + void *emul_data; +}; + +typedef struct emul_device *emul_device_t; + +#define DEVICE_NULL ((device_t) 0) + +/* + * Generic device header. May be allocated with the device, + * or built when the device is opened. + */ +struct mach_device { + struct port_info port; + struct emul_device dev; /* the real device structure */ + struct mutex lock; + short state; /* state: */ +#define DEV_STATE_INIT 0 /* not open */ +#define DEV_STATE_OPENING 1 /* being opened */ +#define DEV_STATE_OPEN 2 /* open */ +#define DEV_STATE_CLOSING 3 /* being closed */ + short flag; /* random flags: */ +#define D_EXCL_OPEN 0x0001 /* open only once */ + short open_count; /* number of times open */ + short io_in_progress; /* number of IOs in progress */ + boolean_t io_wait; /* someone waiting for IO to finish */ + + int dev_number; /* device number */ + int bsize; /* replacement for DEV_BSIZE */ + struct dev_ops *dev_ops; /* and operations vector */ +}; +typedef struct mach_device *mach_device_t; +#define MACH_DEVICE_NULL ((mach_device_t)0) + +/* + * To find and remove device entries + */ +mach_device_t device_lookup(char *); /* by name */ + +/* + * To find and remove port-to-device mappings + */ +void dev_port_enter(mach_device_t); +void dev_port_remove(mach_device_t); + +/* + * To call a routine on each device + */ +boolean_t dev_map(boolean_t (*)(), mach_port_t); + +/* + * To lock and unlock state and open-count + */ +#define device_lock(device) mutex_lock(&(device)->lock) +#define device_unlock(device) mutex_unlock(&(device)->lock) + +#endif /* _DEVICE_DEV_HDR_H_ */ diff --git a/libmachdev/device.defs b/libmachdev/device.defs new file mode 100644 index 00000000..6a73853a --- /dev/null +++ b/libmachdev/device.defs @@ -0,0 +1,204 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * File: device/device.defs + * Author: Douglas Orr + * Feb 10, 1988 + * Abstract: + * Mach device support. Mach devices are accessed through + * block and character device interfaces to the kernel. + */ + +#ifdef MACH_KERNEL +simport <kern/compat_xxx_defs.h>; /* for obsolete routines */ +#endif + +subsystem +#if KERNEL_SERVER + KernelServer +#endif + device 2800; + +#include <mach/std_types.defs> +#include <mach/mach_types.defs> +#include <device/device_types.defs> + +serverprefix ds_; + +type pci_config_data_t = array[*:4] of char; +type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic + ctype: mach_port_t; + +routine device_open( + master_port : mach_port_t; + sreplyport reply_port : reply_port_t; + mode : dev_mode_t; + name : dev_name_t; + out device : device_t + ); + +routine device_close( + device : device_t + ); + +routine device_write( + device : device_t; + sreplyport reply_port : reply_port_t; + in mode : dev_mode_t; + in recnum : recnum_t; + in data : io_buf_ptr_t; + out bytes_written : int + ); + +routine device_write_inband( + device : device_t; + sreplyport reply_port : reply_port_t; + in mode : dev_mode_t; + in recnum : recnum_t; + in data : io_buf_ptr_inband_t; + out bytes_written : int + ); + +routine device_read( + device : device_t; + sreplyport reply_port : reply_port_t; + in mode : dev_mode_t; + in recnum : recnum_t; + in bytes_wanted : int; + out data : io_buf_ptr_t + ); + +routine device_read_inband( + device : device_t; + sreplyport reply_port : reply_port_t; + in mode : dev_mode_t; + in recnum : recnum_t; + in bytes_wanted : int; + out data : io_buf_ptr_inband_t + ); + +/* obsolete */ +routine xxx_device_set_status( + device : device_t; + in flavor : dev_flavor_t; + in status : dev_status_t, IsLong + ); + +/* obsolete */ +routine xxx_device_get_status( + device : device_t; + in flavor : dev_flavor_t; + out status : dev_status_t, IsLong + ); + +/* obsolete */ +routine xxx_device_set_filter( + device : device_t; + in receive_port : mach_port_send_t; + in priority : int; + in filter : filter_array_t, IsLong + ); + +routine device_map( + device : device_t; + in prot : vm_prot_t; + in offset : vm_offset_t; + in size : vm_size_t; + out pager : memory_object_t; + in unmap : int + ); + +routine device_set_status( + device : device_t; + in flavor : dev_flavor_t; + in status : dev_status_t + ); + +routine device_get_status( + device : device_t; + in flavor : dev_flavor_t; + out status : dev_status_t, CountInOut + ); + +routine device_set_filter( + device : device_t; + in receive_port : mach_port_send_t; + in priority : int; + in filter : filter_array_t + ); + +routine device_intr_notify( + master_port : mach_port_t; + in irq : int; + in id : int; + in receive_port : mach_port_send_t + ); + +/* + * Test whether IPC devices exist. + */ +routine pci_present( + master_port : mach_port_t); + +/* + * Find the specified PCI device. + */ +routine pci_find_device( + master_port : mach_port_t; + vendor : short; + device_id : short; + index : short; + out bus : char; + out device_fn : char); + +/* + * Read the configuration space of a IPC device. + */ +routine pci_read_config( + master_port : mach_port_t; + bus : char; + device_fn : char; + where : char; + bytes_wanted : int; + out result : pci_config_data_t); + +/* + * Write the configuration space of a IPC device. + */ +routine pci_write_config( + master_port : mach_port_t; + bus : char; + device_fn : char; + where : char; + data : pci_config_data_t); + +/* + * enable/disable the specified irq. + */ +routine device_irq_enable( + master_port : mach_port_t; + irq : int; + status : char); diff --git a/libmachdev/device_emul.h b/libmachdev/device_emul.h new file mode 100644 index 00000000..edcf6d07 --- /dev/null +++ b/libmachdev/device_emul.h @@ -0,0 +1,64 @@ +/* + * Mach device emulation definitions (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#ifndef _I386AT_DEVICE_EMUL_H_ +#define _I386AT_DEVICE_EMUL_H_ + +#include <mach.h> + +/* Each emulation layer provides these operations. */ +struct device_emulation_ops +{ + void (*init) (); + void (*reference) (void *); + void (*dealloc) (void *); + mach_port_t (*dev_to_port) (void *); + io_return_t (*open) (mach_port_t, mach_msg_type_name_t, + dev_mode_t, char *, device_t *); + io_return_t (*close) (void *); + io_return_t (*write) (void *, mach_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, io_buf_ptr_t, unsigned, int *); + io_return_t (*write_inband) (void *, mach_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, io_buf_ptr_inband_t, + unsigned, int *); + io_return_t (*read) (void *, mach_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, int, io_buf_ptr_t *, unsigned *); + io_return_t (*read_inband) (void *, mach_port_t, mach_msg_type_name_t, + dev_mode_t, recnum_t, int, char *, unsigned *); + io_return_t (*set_status) (void *, dev_flavor_t, dev_status_t, + mach_msg_type_number_t); + io_return_t (*get_status) (void *, dev_flavor_t, dev_status_t, + mach_msg_type_number_t *); + io_return_t (*set_filter) (void *, mach_port_t, int, filter_t [], unsigned); + io_return_t (*map) (void *, vm_prot_t, vm_offset_t, + vm_size_t, mach_port_t *, boolean_t); + void (*no_senders) (mach_no_senders_notification_t *); + io_return_t (*write_trap) (void *, dev_mode_t, + recnum_t, vm_offset_t, vm_size_t); + io_return_t (*writev_trap) (void *, dev_mode_t, + recnum_t, io_buf_vec_t *, vm_size_t); +}; + +#endif /* _I386AT_DEVICE_EMUL_H_ */ diff --git a/libmachdev/device_reply.defs b/libmachdev/device_reply.defs new file mode 100644 index 00000000..69930311 --- /dev/null +++ b/libmachdev/device_reply.defs @@ -0,0 +1 @@ +#include <device/device_reply.defs> diff --git a/libmachdev/ds_routines.c b/libmachdev/ds_routines.c new file mode 100644 index 00000000..1cd06359 --- /dev/null +++ b/libmachdev/ds_routines.c @@ -0,0 +1,802 @@ +/* + * Mach Operating System + * Copyright (c) 1993,1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 3/89 + */ + +/* + * Mach device server routines (i386at version). + * + * Copyright (c) 1996 The University of Utah and + * the Computer Systems Laboratory at the University of Utah (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software is hereby + * granted provided that (1) source code retains these copyright, permission, + * and disclaimer notices, and (2) redistributions including binaries + * reproduce the notices in supporting documentation, and (3) all advertising + * materials mentioning features or use of this software display the following + * acknowledgement: ``This product includes software developed by the + * Computer Systems Laboratory at the University of Utah.'' + * + * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS + * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF + * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * CSL requests users of this software to return to csl-dist@cs.utah.edu any + * improvements that they make and grant CSL redistribution rights. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +#include <stdio.h> +#include <string.h> +#include <error.h> + +#include <hurd.h> +#include <mach.h> +#include <cthreads.h> + +#include "vm_param.h" +#include "device_reply_U.h" +#include "io_req.h" +#include "dev_hdr.h" +#include "util.h" +#include "queue.h" + +struct port_bucket *port_bucket; +struct port_class *dev_class; + +extern struct device_emulation_ops linux_net_emulation_ops; + +#define NUM_EMULATION (sizeof (emulation_list) / sizeof (emulation_list[0])) + +/* List of emulations. */ +static struct device_emulation_ops *emulation_list[] = +{ + &linux_net_emulation_ops, +}; + +boolean_t is_master_device (mach_port_t port); + +static inline void +mach_device_deallocate (void *device) +{ + ports_port_deref (device); +} + +static inline void +mach_device_reference (mach_device_t device) +{ + ports_port_ref (device); +} + +static inline emul_device_t +mach_convert_port_to_device (device_t device) +{ + mach_device_t dev = ports_lookup_port (port_bucket, device, dev_class); + if (dev == NULL) + return NULL; + + return &dev->dev; +} + +static inline void * +device_to_pi (emul_device_t device) +{ + return ((void *) device) - (int) &((mach_device_t) 0)->dev; +} + +/* + * What follows is the interface for the native Mach devices. + */ + +static inline mach_port_t +mach_convert_device_to_port (mach_device_t device) +{ + if (device == NULL) + return MACH_PORT_NULL; + + // TODO I have to somehow dereference it when it is called at the first time. + return ports_get_right (device); +} + +/* Implementation of device interface */ +kern_return_t +ds_xxx_device_set_status (device_t device, dev_flavor_t flavor, + dev_status_t status, size_t statu_cnt) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_xxx_device_get_status (device_t device, dev_flavor_t flavor, + dev_status_t status, size_t *statuscnt) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_xxx_device_set_filter (device_t device, mach_port_t rec, + int pri, filter_array_t filt, size_t len) +{ + return D_INVALID_OPERATION; +} + +io_return_t +ds_device_intr_notify (mach_port_t master_port, int irq, + int id, mach_port_t receive_port) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_pci_write_config (mach_port_t master_port, char bus, char device_fn, + char where, pci_config_data_t data, + mach_msg_type_number_t dataCnt) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_pci_read_config (mach_port_t master_port, char bus, char device_fn, + char where, int bytes_wanted, pci_config_data_t result, + mach_msg_type_number_t *resultCnt) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_pci_find_device (mach_port_t master_port, short vendor, short device_id, + short index, short *bus, char *device_fn) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_pci_present (mach_port_t master_port) +{ + return D_INVALID_OPERATION; +} + +kern_return_t +ds_device_irq_enable (mach_port_t master_port, + int irq, char status) +{ + return D_INVALID_OPERATION; +} + +io_return_t +ds_device_open (mach_port_t open_port, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + char *name, device_t *devp) +{ + int i; + io_return_t err; + + /* Open must be called on the master device port. */ + if (!is_master_device (open_port)) + return D_INVALID_OPERATION; + + /* There must be a reply port. */ + if (! MACH_PORT_VALID (reply_port)) + { + fprintf (stderr, "ds_* invalid reply port\n"); + return MIG_NO_REPLY; + } + + /* Call each emulation's open routine to find the device. */ + for (i = 0; i < NUM_EMULATION; i++) + { + err = (*emulation_list[i]->open) (reply_port, reply_port_type, + mode, name, devp); + if (err != D_NO_SUCH_DEVICE) + break; + } + + return err; +} + +io_return_t +ds_device_close (device_t dev) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + ret = (device->emul_ops->close + ? (*device->emul_ops->close) (device->emul_data) + : D_SUCCESS); + mach_device_deallocate (device_to_pi (device)); + + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_write (device_t dev, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, io_buf_ptr_t data, unsigned int count, + int *bytes_written) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + if (data == 0) + return D_INVALID_SIZE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->write) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->write) (device->emul_data, reply_port, + reply_port_type, mode, recnum, + data, count, bytes_written); + ports_port_deref (device_to_pi (device)); + + return ret; +} + +io_return_t +ds_device_write_inband (device_t dev, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, + dev_mode_t mode, recnum_t recnum, + io_buf_ptr_inband_t data, unsigned count, + int *bytes_written) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + if (data == 0) + return D_INVALID_SIZE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->write_inband) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->write_inband) (device->emul_data, reply_port, + reply_port_type, mode, recnum, + data, count, bytes_written); + ports_port_deref (device_to_pi (device)); + + return ret; +} + +io_return_t +ds_device_read (device_t dev, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, int count, io_buf_ptr_t *data, + unsigned *bytes_read) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->read) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->read) (device->emul_data, reply_port, + reply_port_type, mode, recnum, + count, data, bytes_read); + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_read_inband (device_t dev, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t recnum, int count, char *data, + unsigned *bytes_read) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->read_inband) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->read_inband) (device->emul_data, reply_port, + reply_port_type, mode, recnum, + count, data, bytes_read); + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_set_status (device_t dev, dev_flavor_t flavor, + dev_status_t status, mach_msg_type_number_t status_count) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->set_status) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->set_status) (device->emul_data, flavor, + status, status_count); + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_get_status (device_t dev, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t *status_count) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->get_status) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->get_status) (device->emul_data, flavor, + status, status_count); + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_set_filter (device_t dev, mach_port_t receive_port, int priority, + filter_t *filter, unsigned filter_count) +{ + emul_device_t device; + io_return_t ret; + + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + device = mach_convert_port_to_device (dev); + + if (! device->emul_ops->set_filter) + return D_INVALID_OPERATION; + + ret = (*device->emul_ops->set_filter) (device->emul_data, receive_port, + priority, filter, filter_count); + ports_port_deref (device_to_pi (device)); + return ret; +} + +io_return_t +ds_device_map (device_t dev, vm_prot_t prot, vm_offset_t offset, + vm_size_t size, mach_port_t *pager, boolean_t unmap) +{ + /* Refuse if device is dead or not completely open. */ + if (dev == MACH_PORT_NULL) + return D_NO_SUCH_DEVICE; + + return D_INVALID_OPERATION; +} + +boolean_t +ds_open_done(ior) + register io_req_t ior; +{ + kern_return_t result; + register mach_device_t device; + + device = ior->io_device; + result = ior->io_error; + + if (result != D_SUCCESS) { + /* + * Open failed. Deallocate port and device. + */ +// dev_port_remove(device); +// ipc_port_dealloc_kernel(device->port); + + device_lock(device); + device->state = DEV_STATE_INIT; + if (device->io_wait) { + device->io_wait = FALSE; +// thread_wakeup((event_t)device); + } + device_unlock(device); + +// mach_device_deallocate(device); + device = MACH_DEVICE_NULL; + } + else { + /* + * Open succeeded. + */ + device_lock(device); + device->state = DEV_STATE_OPEN; + device->open_count = 1; + if (device->io_wait) { + device->io_wait = FALSE; +// thread_wakeup((event_t)device); + } + device_unlock(device); + + /* donate device reference to get port */ + } + /* + * Must explicitly convert device to port, since + * device_reply interface is built as 'user' side + * (thus cannot get translation). + */ + if (MACH_PORT_VALID(ior->io_reply_port)) { + (void) ds_device_open_reply(ior->io_reply_port, + ior->io_reply_port_type, + result, + mach_convert_device_to_port(device)); + } +// else +// mach_device_deallocate(device); + + mach_device_deallocate (device); + return (TRUE); +} + +boolean_t ds_read_done(ior) + io_req_t ior; +{ + vm_offset_t start_data, end_data; + vm_offset_t start_sent, end_sent; + register vm_size_t size_read; + + if (ior->io_error) + size_read = 0; + else + size_read = ior->io_count - ior->io_residual; + + start_data = (vm_offset_t)ior->io_data; + end_data = start_data + size_read; + + start_sent = (ior->io_op & IO_INBAND) ? start_data : + trunc_page(start_data); + end_sent = (ior->io_op & IO_INBAND) ? + start_data + ior->io_alloc_size : round_page(end_data); + + /* + * Zero memory that the device did not fill. + */ + if (start_sent < start_data) + memset((char *)start_sent, 0, start_data - start_sent); + if (end_sent > end_data) + memset((char *)end_data, 0, end_sent - end_data); + + + /* + * Touch the data being returned, to mark it dirty. + * If the pages were filled by DMA, the pmap module + * may think that they are clean. + */ + { + register vm_offset_t touch; + register int c; + + for (touch = start_sent; touch < end_sent; touch += PAGE_SIZE) { + c = *(volatile char *)touch; + *(volatile char *)touch = c; + } + } + + /* + * Send the data to the reply port - this + * unwires and deallocates it. + */ + if (ior->io_op & IO_INBAND) { + (void)ds_device_read_reply_inband(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (char *) start_data, + size_read); + } else { +// vm_map_copy_t copy; +// kern_return_t kr; +// +// kr = vm_map_copyin_page_list(kernel_map, start_data, +// size_read, TRUE, TRUE, +// ©, FALSE); +// +// if (kr != KERN_SUCCESS) +// panic("read_done: vm_map_copyin_page_list failed"); + + (void)ds_device_read_reply(ior->io_reply_port, + ior->io_reply_port_type, + ior->io_error, + (char *) start_data, + size_read); + } + + /* + * Free any memory that was allocated but not sent. + */ + if (ior->io_count != 0) { + if (ior->io_op & IO_INBAND) { + if (ior->io_alloc_size > 0) + free (ior->io_data); +// zfree(io_inband_zone, (vm_offset_t)ior->io_data); + } else { + register vm_offset_t end_alloc; + + end_alloc = start_sent + round_page(ior->io_alloc_size); + if (end_alloc > end_sent) + vm_deallocate(mach_task_self (), + end_sent, + end_alloc - end_sent); + } + } + + mach_device_deallocate(ior->io_device); + + return (TRUE); +} + +/* + * Allocate wired-down memory for device read. + */ +kern_return_t device_read_alloc(ior, size) + register io_req_t ior; + register vm_size_t size; +{ + vm_offset_t addr; + kern_return_t kr; + + /* + * Nothing to do if no data. + */ + if (ior->io_count == 0) + return (KERN_SUCCESS); + + if (ior->io_op & IO_INBAND) { + ior->io_data = (io_buf_ptr_t) malloc(sizeof(io_buf_ptr_inband_t)); + ior->io_alloc_size = sizeof(io_buf_ptr_inband_t); + } else { + size = round_page(size); + kr = vm_allocate (mach_task_self (), &addr, size, TRUE); +// kr = kmem_alloc(kernel_map, &addr, size); + if (kr != KERN_SUCCESS) + return (kr); + + ior->io_data = (io_buf_ptr_t) addr; + ior->io_alloc_size = size; + } + + return (KERN_SUCCESS); +} + +struct thread_wait +{ + struct condition cond; + struct mutex mutex; + int v; +}; + +static struct thread_wait io_done_wait; + +void thread_wait_init (struct thread_wait *t) +{ + mutex_init (&t->mutex); + condition_init (&t->cond); + t->v = 0; +} + +void thread_block (struct thread_wait *t) +{ + mutex_lock (&t->mutex); + t->v = 1; + while (t->v) + hurd_condition_wait (&t->cond, &t->mutex); + mutex_unlock (&t->mutex); +} + +void thread_wakeup (struct thread_wait *t) +{ + mutex_lock (&t->mutex); + t->v = 0; + condition_signal (&t->cond); + mutex_unlock (&t->mutex); +} + +queue_head_t io_done_list; +struct mutex io_done_list_lock; + +#define splio splsched /* XXX must block ALL io devices */ + +void iodone(ior) + register io_req_t ior; +{ + /* + * If this ior was loaned to us, return it directly. + */ + if (ior->io_op & IO_LOANED) { + (*ior->io_done)(ior); + return; + } + /* + * If !IO_CALL, some thread is waiting for this. Must lock + * structure to interlock correctly with iowait(). Else can + * toss on queue for io_done thread to call completion. + */ + // TODO need a lock here? +// s = splio(); + if ((ior->io_op & IO_CALL) == 0) { + ior_lock(ior); + ior->io_op |= IO_DONE; + ior->io_op &= ~IO_WANTED; + ior_unlock(ior); +// thread_wakeup((event_t)ior); + } else { + ior->io_op |= IO_DONE; + mutex_lock (&io_done_list_lock); + enqueue_tail(&io_done_list, (queue_entry_t)ior); + thread_wakeup (&io_done_wait); +// thread_wakeup((event_t)&io_done_list); + mutex_unlock (&io_done_list_lock); + } +// splx(s); +} + +void wakeup_io_done_thread () +{ + thread_wakeup (&io_done_wait); +} + +void io_done_thread_continue() +{ + for (;;) { + extern void free_skbuffs (); + register io_req_t ior; + + free_skbuffs (); + mutex_lock(&io_done_list_lock); + while ((ior = (io_req_t)dequeue_head(&io_done_list)) != 0) { + mutex_unlock(&io_done_list_lock); + + if ((*ior->io_done)(ior)) { + /* + * IO done - free io_req_elt + */ + io_req_free(ior); + } + /* else routine has re-queued it somewhere */ + + mutex_lock(&io_done_list_lock); + } + +// assert_wait(&io_done_list, FALSE); + mutex_unlock(&io_done_list_lock); +// counter(c_io_done_thread_block++); +// thread_block(io_done_thread_continue); + thread_block (&io_done_wait); + } +} + + +void +wire_thread() +{ + kern_return_t kr; + mach_port_t priv_host_port; + + kr = get_privileged_ports (&priv_host_port, NULL); + if (kr != KERN_SUCCESS) + panic("get privileged port: %d", kr); + + kr = thread_wire(priv_host_port, + mach_thread_self(), + TRUE); + if (kr != KERN_SUCCESS) + panic("wire_thread: %d", kr); +} + +void +thread_set_own_priority (int priority) +{ + kern_return_t kr; + mach_port_t priv_host_port; + mach_port_t pset, psetcntl; + + kr = get_privileged_ports (&priv_host_port, NULL); + if (kr != KERN_SUCCESS) + panic("get privileged port: %d", kr); + + kr = thread_get_assignment (mach_thread_self (), &pset); + if (kr != KERN_SUCCESS) + panic("thread get assignment: %d", kr); + kr = host_processor_set_priv (priv_host_port, pset, &psetcntl); + if (kr != KERN_SUCCESS) + panic("processor set priv: %d", kr); + kr = thread_max_priority (mach_thread_self (), psetcntl, 0); + if (kr != KERN_SUCCESS) + panic("set thread max priority: %d", kr); + kr = thread_priority (mach_thread_self (), 0, FALSE); + if (kr != KERN_SUCCESS) + panic("set thread priority: %d", kr); +} + +static any_t io_done_thread(any_t unused) +{ + /* + * Set thread privileges and highest priority. + */ +// current_thread()->vm_privilege = TRUE; +// stack_privilege(current_thread()); + wire_thread (); + + thread_set_own_priority(0); + + io_done_thread_continue(); + /*NOTREACHED*/ + return 0; +} + +void mach_device_init() +{ + int i; + + queue_init(&io_done_list); + mutex_init (&io_done_list_lock); + thread_wait_init (&io_done_wait); + + port_bucket = ports_create_bucket (); + dev_class = ports_create_class (0, 0); + + for (i = 0; i < NUM_EMULATION; i++) { + emulation_list[i]->init(); + } + + cthread_detach (cthread_fork (io_done_thread, 0)); +} diff --git a/libmachdev/ds_routines.h b/libmachdev/ds_routines.h new file mode 100644 index 00000000..e314e80e --- /dev/null +++ b/libmachdev/ds_routines.h @@ -0,0 +1,55 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 8/89 + * + * Device service utility routines. + */ + +#ifndef DS_ROUTINES_H +#define DS_ROUTINES_H + +#include <mach.h> + +#include "io_req.h" + +/* + * Map for device IO memory. + */ +//vm_map_t device_io_map; + +kern_return_t device_read_alloc(io_req_t, vm_size_t); +kern_return_t device_write_get(io_req_t, boolean_t *); +boolean_t device_write_dealloc(io_req_t); + +boolean_t ds_open_done(io_req_t); +boolean_t ds_read_done(io_req_t); +boolean_t ds_write_done(io_req_t); + +void iowait (io_req_t ior); + +#endif /* DS_ROUTINES_H */ diff --git a/libmachdev/if_ether.h b/libmachdev/if_ether.h new file mode 100644 index 00000000..29974674 --- /dev/null +++ b/libmachdev/if_ether.h @@ -0,0 +1,87 @@ +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Global definitions for the Ethernet IEEE 802.3 interface. + * + * Version: @(#)if_ether.h 1.0.1a 02/08/94 + * + * Author: Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Donald Becker, <becker@super.org> + * Alan Cox, <alan@cymru.net> + * Steve Whitehouse, <gw7rrm@eeshack3.swan.ac.uk> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#ifndef _LINUX_IF_ETHER_H +#define _LINUX_IF_ETHER_H + +/* + * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble + * and FCS/CRC (frame check sequence). + */ + +#define ETH_ALEN 6 /* Octets in one ethernet addr */ +#define ETH_HLEN 14 /* Total octets in header. */ +#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */ +#define ETH_DATA_LEN 1500 /* Max. octets in payload */ +#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */ + +/* + * These are the defined Ethernet Protocol ID's. + */ + +#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */ +#define ETH_P_ECHO 0x0200 /* Ethernet Echo packet */ +#define ETH_P_PUP 0x0400 /* Xerox PUP packet */ +#define ETH_P_IP 0x0800 /* Internet Protocol packet */ +#define ETH_P_X25 0x0805 /* CCITT X.25 */ +#define ETH_P_ARP 0x0806 /* Address Resolution packet */ +#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet [ NOT AN OFFICIALLY REGISTERED ID ] */ +#define ETH_P_DEC 0x6000 /* DEC Assigned proto */ +#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */ +#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */ +#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */ +#define ETH_P_LAT 0x6004 /* DEC LAT */ +#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */ +#define ETH_P_CUST 0x6006 /* DEC Customer use */ +#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */ +#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */ +#define ETH_P_ATALK 0x809B /* Appletalk DDP */ +#define ETH_P_AARP 0x80F3 /* Appletalk AARP */ +#define ETH_P_IPX 0x8137 /* IPX over DIX */ +#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */ + +/* + * Non DIX types. Won't clash for 1500 types. + */ + +#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */ +#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */ +#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */ +#define ETH_P_802_2 0x0004 /* 802.2 frames */ +#define ETH_P_SNAP 0x0005 /* Internal only */ +#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */ +#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ +#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ +#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ +#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ +#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ + +/* + * This is an Ethernet frame header. + */ + +struct ethhdr +{ + unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ + unsigned char h_source[ETH_ALEN]; /* source ether addr */ + unsigned short h_proto; /* packet type ID field */ +}; + +#endif /* _LINUX_IF_ETHER_H */ diff --git a/libmachdev/io_req.h b/libmachdev/io_req.h new file mode 100644 index 00000000..df8d743c --- /dev/null +++ b/libmachdev/io_req.h @@ -0,0 +1,135 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Author: David B. Golub, Carnegie Mellon University + * Date: 10/88 + */ + +#ifndef _IO_REQ_ +#define _IO_REQ_ + +#include <mach.h> +#include <cthreads.h> + +#include "dev_hdr.h" + +/* + * IO request element, queued on device for delayed replies. + */ +typedef struct io_req *io_req_t; +struct io_req { + struct io_req * io_next; /* next, ... */ + struct io_req * io_prev; /* prev pointers: link in done, + defered, or in-progress list */ + mach_device_t io_device; /* pointer to open-device structure */ + char * io_dev_ptr; /* pointer to driver structure - + filled in by driver if necessary */ + int io_unit; /* unit number ('minor') of device */ + int io_op; /* IO operation */ + dev_mode_t io_mode; /* operation mode (wait, truncate) */ + recnum_t io_recnum; /* starting record number for + random-access devices */ + + union io_un { + io_buf_ptr_t data; /* data, for IO requests */ + } io_un; +#define io_data io_un.data + + long io_count; /* amount requested */ + long io_alloc_size; /* amount allocated */ + long io_residual; /* amount NOT done */ + io_return_t io_error; /* error code */ + /* call when done - returns TRUE if IO really finished */ + boolean_t (*io_done)(io_req_t); + mach_port_t io_reply_port; /* reply port, for asynchronous + messages */ + mach_msg_type_name_t io_reply_port_type; + /* send or send-once right? */ + struct io_req * io_link; /* forward link (for driver header) */ + struct io_req * io_rlink; /* reverse link (for driver header) */ +// vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */ + long io_total; /* total op size, for write */ + struct mutex io_req_lock; +// decl_simple_lock_data(,io_req_lock) + /* Lock for this structure */ + long io_physrec; /* mapping to the physical block + number */ + long io_rectotal; /* total number of blocks to move */ +}; + +/* + * LOCKING NOTE: Operations on io_req's are in general single threaded by + * the invoking code, obviating the need for a lock. The usual IO_CALL + * path through the code is: Initiating thread hands io_req to device driver, + * driver passes it to io_done thread, io_done thread sends reply message. No + * locking is needed in this sequence. Unfortunately, a synchronous wait + * for a buffer requires a lock to avoid problems if the wait and interrupt + * happen simultaneously on different processors. + */ + +#define ior_lock(ior) mutex_lock(&(ior)->io_req_lock) +#define ior_unlock(ior) mutex_unlock(&(ior)->io_req_lock) + +/* + * Flags and operations + */ + +#define IO_WRITE 0x00000000 /* operation is write */ +#define IO_READ 0x00000001 /* operation is read */ +#define IO_OPEN 0x00000002 /* operation is open */ +#define IO_DONE 0x00000100 /* operation complete */ +#define IO_ERROR 0x00000200 /* error on operation */ +#define IO_BUSY 0x00000400 /* operation in progress */ +#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */ +#define IO_BAD 0x00001000 /* bad disk block */ +#define IO_CALL 0x00002000 /* call io_done_thread when done */ +#define IO_INBAND 0x00004000 /* mig call was inband */ +#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */ +#define IO_LOANED 0x00010000 /* ior loaned by another module */ + +#define IO_SPARE_START 0x00020000 /* start of spare flags */ + +/* + * Standard completion routine for io_requests. + */ +void iodone(io_req_t); + +/* + * Macros to allocate and free IORs - will convert to zones later. + */ +#define io_req_alloc(ior,size) \ + MACRO_BEGIN \ + (ior) = (io_req_t)malloc(sizeof(struct io_req)); \ + mutex_init(&(ior)->io_req_lock); \ + MACRO_END + +#define io_req_free(ior) \ + (free(ior)) + + +//zone_t io_inband_zone; /* for inband reads */ + +#endif /* _IO_REQ_ */ diff --git a/libmachdev/mach.defs b/libmachdev/mach.defs new file mode 100644 index 00000000..764bd451 --- /dev/null +++ b/libmachdev/mach.defs @@ -0,0 +1,779 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University. + * Copyright (c) 1993,1994 The University of Utah and + * the Computer Systems Laboratory (CSL). + * All rights reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF + * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY + * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF + * THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Matchmaker definitions file for Mach kernel interface. + */ + +#ifdef MACH_KERNEL +simport <kern/compat_xxx_defs.h>; /* for obsolete routines */ +#endif /* MACH_KERNEL */ + +subsystem +#if KERNEL_USER + KernelUser +#endif /* KERNEL_USER */ +#if KERNEL_SERVER + KernelServer +#endif /* KERNEL_SERVER */ + mach 2000; + +#ifdef KERNEL_USER +userprefix r_; +#endif /* KERNEL_USER */ + +#include <mach/std_types.defs> +#include <mach/mach_types.defs> + +skip; /* old port_allocate */ +skip; /* old port_deallocate */ +skip; /* old port_enable */ +skip; /* old port_disable */ +skip; /* old port_select */ +skip; /* old port_set_backlog */ +skip; /* old port_status */ + +/* + * Create a new task with an empty set of IPC rights, + * and having an address space constructed from the + * target task (or empty, if inherit_memory is FALSE). + */ +routine task_create( + target_task : task_t; + inherit_memory : boolean_t; + out child_task : task_t); + +/* + * Destroy the target task, causing all of its threads + * to be destroyed, all of its IPC rights to be deallocated, + * and all of its address space to be deallocated. + */ +routine task_terminate( + target_task : task_t); + +/* + * Get user-level handler entry points for all + * emulated system calls. + */ +routine task_get_emulation_vector( + task : task_t; + out vector_start : int; + out emulation_vector: emulation_vector_t); + +/* + * Establish user-level handlers for the specified + * system calls. Non-emulated system calls are specified + * with emulation_vector[i] == EML_ROUTINE_NULL. + */ +routine task_set_emulation_vector( + task : task_t; + vector_start : int; + emulation_vector: emulation_vector_t); + + +/* + * Returns the set of threads belonging to the target task. + */ +routine task_threads( + target_task : task_t; + out thread_list : thread_array_t); + +/* + * Returns information about the target task. + */ +routine task_info( + target_task : task_t; + flavor : int; + out task_info_out : task_info_t, CountInOut); + + +skip; /* old task_status */ +skip; /* old task_set_notify */ +skip; /* old thread_create */ + +/* + * Destroy the target thread. + */ +routine thread_terminate( + target_thread : thread_t); + +/* + * Return the selected state information for the target + * thread. If the thread is currently executing, the results + * may be stale. [Flavor THREAD_STATE_FLAVOR_LIST provides a + * list of valid flavors for the target thread.] + */ +routine thread_get_state( + target_thread : thread_t; + flavor : int; + out old_state : thread_state_t, CountInOut); + +/* + * Set the selected state information for the target thread. + * If the thread is currently executing, the state change + * may be ill-defined. + */ +routine thread_set_state( + target_thread : thread_t; + flavor : int; + new_state : thread_state_t); + +/* + * Returns information about the target thread. + */ +routine thread_info( + target_thread : thread_t; + flavor : int; + out thread_info_out : thread_info_t, CountInOut); + +skip; /* old thread_mutate */ + +/* + * Allocate zero-filled memory in the address space + * of the target task, either at the specified address, + * or wherever space can be found (if anywhere is TRUE), + * of the specified size. The address at which the + * allocation actually took place is returned. + */ +#ifdef EMULATOR +skip; /* the emulator redefines vm_allocate using vm_map */ +#else /* EMULATOR */ +routine vm_allocate( + target_task : vm_task_t; + inout address : vm_address_t; + size : vm_size_t; + anywhere : boolean_t); +#endif /* EMULATOR */ + +skip; /* old vm_allocate_with_pager */ + +/* + * Deallocate the specified range from the virtual + * address space of the target task. + */ +routine vm_deallocate( + target_task : vm_task_t; + address : vm_address_t; + size : vm_size_t); + +/* + * Set the current or maximum protection attribute + * for the specified range of the virtual address + * space of the target task. The current protection + * limits the memory access rights of threads within + * the task; the maximum protection limits the accesses + * that may be given in the current protection. + * Protections are specified as a set of {read, write, execute} + * *permissions*. + */ +routine vm_protect( + target_task : vm_task_t; + address : vm_address_t; + size : vm_size_t; + set_maximum : boolean_t; + new_protection : vm_prot_t); + +/* + * Set the inheritance attribute for the specified range + * of the virtual address space of the target task. + * The inheritance value is one of {none, copy, share}, and + * specifies how the child address space should acquire + * this memory at the time of a task_create call. + */ +routine vm_inherit( + target_task : vm_task_t; + address : vm_address_t; + size : vm_size_t; + new_inheritance : vm_inherit_t); + +/* + * Returns the contents of the specified range of the + * virtual address space of the target task. [The + * range must be aligned on a virtual page boundary, + * and must be a multiple of pages in extent. The + * protection on the specified range must permit reading.] + */ +routine vm_read( + target_task : vm_task_t; + address : vm_address_t; + size : vm_size_t; + out data : pointer_t); + +/* + * Writes the contents of the specified range of the + * virtual address space of the target task. [The + * range must be aligned on a virtual page boundary, + * and must be a multiple of pages in extent. The + * protection on the specified range must permit writing.] + */ +routine vm_write( + target_task : vm_task_t; + address : vm_address_t; + data : pointer_t); + +/* + * Copy the contents of the source range of the virtual + * address space of the target task to the destination + * range in that same address space. [Both of the + * ranges must be aligned on a virtual page boundary, + * and must be multiples of pages in extent. The + * protection on the source range must permit reading, + * and the protection on the destination range must + * permit writing.] + */ +routine vm_copy( + target_task : vm_task_t; + source_address : vm_address_t; + size : vm_size_t; + dest_address : vm_address_t); + +/* + * Returns information about the contents of the virtual + * address space of the target task at the specified + * address. The returned protection, inheritance, sharing + * and memory object values apply to the entire range described + * by the address range returned; the memory object offset + * corresponds to the beginning of the address range. + * [If the specified address is not allocated, the next + * highest address range is described. If no addresses beyond + * the one specified are allocated, the call returns KERN_NO_SPACE.] + */ +routine vm_region( + target_task : vm_task_t; + inout address : vm_address_t; + out size : vm_size_t; + out protection : vm_prot_t; + out max_protection : vm_prot_t; + out inheritance : vm_inherit_t; + out is_shared : boolean_t; + /* avoid out-translation of the argument */ + out object_name : memory_object_name_t = + MACH_MSG_TYPE_MOVE_SEND + ctype: mach_port_t; + out offset : vm_offset_t); + +/* + * Return virtual memory statistics for the host + * on which the target task resides. [Note that the + * statistics are not specific to the target task.] + */ +routine vm_statistics( + target_task : vm_task_t; + out vm_stats : vm_statistics_data_t); + +skip; /* old task_by_u*x_pid */ +skip; /* old vm_pageable */ + +/* + * Stash a handful of ports for the target task; child + * tasks inherit this stash at task_create time. + */ +routine mach_ports_register( + target_task : task_t; + init_port_set : mach_port_array_t = + ^array[] of mach_port_t); + +/* + * Retrieve the stashed ports for the target task. + */ +routine mach_ports_lookup( + target_task : task_t; + out init_port_set : mach_port_array_t = + ^array[] of mach_port_t); + +skip; /* old u*x_pid */ +skip; /* old netipc_listen */ +skip; /* old netipc_ignore */ + +/* + * Provide the data contents of a range of the given memory + * object, with the access restriction specified. [Only + * whole virtual pages of data can be accepted; partial pages + * will be discarded. Data should be provided on request, but + * may be provided in advance as desired. When data already + * held by this kernel is provided again, the new data is ignored. + * The access restriction is the subset of {read, write, execute} + * which are prohibited. The kernel may not provide any data (or + * protection) consistency among pages with different virtual page + * alignments within the same object.] + */ +simpleroutine memory_object_data_provided( + memory_control : memory_object_control_t; + offset : vm_offset_t; + data : pointer_t; + lock_value : vm_prot_t); + +/* + * Indicate that a range of the given temporary memory object does + * not exist, and that the backing memory object should be used + * instead (or zero-fill memory be used, if no backing object exists). + * [This call is intended for use only by the default memory manager. + * It should not be used to indicate a real error -- + * memory_object_data_error should be used for that purpose.] + */ +simpleroutine memory_object_data_unavailable( + memory_control : memory_object_control_t; + offset : vm_offset_t; + size : vm_size_t); + +/* + * Retrieves the attributes currently associated with + * a memory object. + */ +routine memory_object_get_attributes( + memory_control : memory_object_control_t; + out object_ready : boolean_t; + out may_cache : boolean_t; + out copy_strategy : memory_object_copy_strategy_t); + +/* + * Sets the default memory manager, the port to which + * newly-created temporary memory objects are delivered. + * [See (memory_object_default)memory_object_create.] + * The old memory manager port is returned. + */ +routine vm_set_default_memory_manager( + host_priv : host_priv_t; + inout default_manager : mach_port_make_send_t); + +skip; /* old pager_flush_request */ + +/* + * Control use of the data associated with the given + * memory object. For each page in the given range, + * perform the following operations, in order: + * 1) restrict access to the page (disallow + * forms specified by "prot"); + * 2) write back modifications (if "should_return" + * is RETURN_DIRTY and the page is dirty, or + * "should_return" is RETURN_ALL and the page + * is either dirty or precious); and, + * 3) flush the cached copy (if "should_flush" + * is asserted). + * The set of pages is defined by a starting offset + * ("offset") and size ("size"). Only pages with the + * same page alignment as the starting offset are + * considered. + * + * A single acknowledgement is sent (to the "reply_to" + * port) when these actions are complete. + * + * There are two versions of this routine because IPC distinguishes + * between booleans and integers (a 2-valued integer is NOT a + * boolean). The new routine is backwards compatible at the C + * language interface. + */ +simpleroutine xxx_memory_object_lock_request( + memory_control : memory_object_control_t; + offset : vm_offset_t; + size : vm_size_t; + should_clean : boolean_t; + should_flush : boolean_t; + lock_value : vm_prot_t; + reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); + + +simpleroutine memory_object_lock_request( + memory_control : memory_object_control_t; + offset : vm_offset_t; + size : vm_size_t; + should_return : memory_object_return_t; + should_flush : boolean_t; + lock_value : vm_prot_t; + reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); + +/* obsolete */ +routine xxx_task_get_emulation_vector( + task : task_t; + out vector_start : int; + out emulation_vector: xxx_emulation_vector_t, IsLong); + +/* obsolete */ +routine xxx_task_set_emulation_vector( + task : task_t; + vector_start : int; + emulation_vector: xxx_emulation_vector_t, IsLong); + +/* + * Returns information about the host on which the + * target object resides. [This object may be + * a task, thread, or memory_object_control port.] + */ +routine xxx_host_info( + target_task : mach_port_t; + out info : machine_info_data_t); + +/* + * Returns information about a particular processor on + * the host on which the target task resides. + */ +routine xxx_slot_info( + target_task : task_t; + slot : int; + out info : machine_slot_data_t); + +/* + * Performs control operations (currently only + * turning off or on) on a particular processor on + * the host on which the target task resides. + */ +routine xxx_cpu_control( + target_task : task_t; + cpu : int; + running : boolean_t); + +skip; /* old thread_statistics */ +skip; /* old task_statistics */ +skip; /* old netport_init */ +skip; /* old netport_enter */ +skip; /* old netport_remove */ +skip; /* old thread_set_priority */ + +/* + * Increment the suspend count for the target task. + * No threads within a task may run when the suspend + * count for that task is non-zero. + */ +routine task_suspend( + target_task : task_t); + +/* + * Decrement the suspend count for the target task, + * if the count is currently non-zero. If the resulting + * suspend count is zero, then threads within the task + * that also have non-zero suspend counts may execute. + */ +routine task_resume( + target_task : task_t); + +/* + * Returns the current value of the selected special port + * associated with the target task. + */ +routine task_get_special_port( + task : task_t; + which_port : int; + out special_port : mach_port_t); + +/* + * Set one of the special ports associated with the + * target task. + */ +routine task_set_special_port( + task : task_t; + which_port : int; + special_port : mach_port_t); + +/* obsolete */ +routine xxx_task_info( + target_task : task_t; + flavor : int; + out task_info_out : task_info_t, IsLong); + + +/* + * Create a new thread within the target task, returning + * the port representing that new thread. The + * initial execution state of the thread is undefined. + */ +routine thread_create( + parent_task : task_t; + out child_thread : thread_t); + +/* + * Increment the suspend count for the target thread. + * Once this call has completed, the thread will not + * execute any further user or meta- instructions. + * Once suspended, a thread may not execute again until + * its suspend count is zero, and the suspend count + * for its task is also zero. + */ +routine thread_suspend( + target_thread : thread_t); + +/* + * Decrement the suspend count for the target thread, + * if that count is not already zero. + */ +routine thread_resume( + target_thread : thread_t); + +/* + * Cause any user or meta- instructions currently being + * executed by the target thread to be aborted. [Meta- + * instructions consist of the basic traps for IPC + * (e.g., msg_send, msg_receive) and self-identification + * (e.g., task_self, thread_self, thread_reply). Calls + * described by MiG interfaces are not meta-instructions + * themselves.] + */ +routine thread_abort( + target_thread : thread_t); + +/* obsolete */ +routine xxx_thread_get_state( + target_thread : thread_t; + flavor : int; + out old_state : thread_state_t, IsLong); + +/* obsolete */ +routine xxx_thread_set_state( + target_thread : thread_t; + flavor : int; + new_state : thread_state_t, IsLong); + +/* + * Returns the current value of the selected special port + * associated with the target thread. + */ +routine thread_get_special_port( + thread : thread_t; + which_port : int; + out special_port : mach_port_t); + +/* + * Set one of the special ports associated with the + * target thread. + */ +routine thread_set_special_port( + thread : thread_t; + which_port : int; + special_port : mach_port_t); + +/* obsolete */ +routine xxx_thread_info( + target_thread : thread_t; + flavor : int; + out thread_info_out : thread_info_t, IsLong); + +/* + * Establish a user-level handler for the specified + * system call. + */ +routine task_set_emulation( + target_port : task_t; + routine_entry_pt: vm_address_t; + routine_number : int); + +/* + * Establish restart pc for interrupted atomic sequences. + * This reuses the message number for the old task_get_io_port. + * See task_info.h for description of flavors. + * + */ +routine task_ras_control( + target_task : task_t; + basepc : vm_address_t; + boundspc : vm_address_t; + flavor : int); + + + +skip; /* old host_ipc_statistics */ +skip; /* old port_names */ +skip; /* old port_type */ +skip; /* old port_rename */ +skip; /* old port_allocate */ +skip; /* old port_deallocate */ +skip; /* old port_set_backlog */ +skip; /* old port_status */ +skip; /* old port_set_allocate */ +skip; /* old port_set_deallocate */ +skip; /* old port_set_add */ +skip; /* old port_set_remove */ +skip; /* old port_set_status */ +skip; /* old port_insert_send */ +skip; /* old port_extract_send */ +skip; /* old port_insert_receive */ +skip; /* old port_extract_receive */ + +/* + * Map a user-defined memory object into the virtual address + * space of the target task. If desired (anywhere is TRUE), + * the kernel will find a suitable address range of the + * specified size; else, the specific address will be allocated. + * + * The beginning address of the range will be aligned on a virtual + * page boundary, be at or beyond the address specified, and + * meet the mask requirements (bits turned on in the mask must not + * be turned on in the result); the size of the range, in bytes, + * will be rounded up to an integral number of virtual pages. + * + * The memory in the resulting range will be associated with the + * specified memory object, with the beginning of the memory range + * referring to the specified offset into the memory object. + * + * The mapping will take the current and maximum protections and + * the inheritance attributes specified; see the vm_protect and + * vm_inherit calls for a description of these attributes. + * + * If desired (copy is TRUE), the memory range will be filled + * with a copy of the data from the memory object; this copy will + * be private to this mapping in this target task. Otherwise, + * the memory in this mapping will be shared with other mappings + * of the same memory object at the same offset (in this task or + * in other tasks). [The Mach kernel only enforces shared memory + * consistency among mappings on one host with similar page alignments. + * The user-defined memory manager for this object is responsible + * for further consistency.] + */ +#ifdef EMULATOR +routine htg_vm_map( + target_task : vm_task_t; + ureplyport reply_port : mach_port_make_send_once_t; + inout address : vm_address_t; + size : vm_size_t; + mask : vm_address_t; + anywhere : boolean_t; + memory_object : memory_object_t; + offset : vm_offset_t; + copy : boolean_t; + cur_protection : vm_prot_t; + max_protection : vm_prot_t; + inheritance : vm_inherit_t); +#else /* EMULATOR */ +routine vm_map( + target_task : vm_task_t; + inout address : vm_address_t; + size : vm_size_t; + mask : vm_address_t; + anywhere : boolean_t; + memory_object : memory_object_t; + offset : vm_offset_t; + copy : boolean_t; + cur_protection : vm_prot_t; + max_protection : vm_prot_t; + inheritance : vm_inherit_t); +#endif /* EMULATOR */ + +/* + * Indicate that a range of the specified memory object cannot + * be provided at this time. [Threads waiting for memory pages + * specified by this call will experience a memory exception. + * Only threads waiting at the time of the call are affected.] + */ +simpleroutine memory_object_data_error( + memory_control : memory_object_control_t; + offset : vm_offset_t; + size : vm_size_t; + error_value : kern_return_t); + +/* + * Make decisions regarding the use of the specified + * memory object. + */ +simpleroutine memory_object_set_attributes( + memory_control : memory_object_control_t; + object_ready : boolean_t; + may_cache : boolean_t; + copy_strategy : memory_object_copy_strategy_t); + +/* + */ +simpleroutine memory_object_destroy( + memory_control : memory_object_control_t; + reason : kern_return_t); + +/* + * Provide the data contents of a range of the given memory + * object, with the access restriction specified, optional + * precious attribute, and reply message. [Only + * whole virtual pages of data can be accepted; partial pages + * will be discarded. Data should be provided on request, but + * may be provided in advance as desired. When data already + * held by this kernel is provided again, the new data is ignored. + * The access restriction is the subset of {read, write, execute} + * which are prohibited. The kernel may not provide any data (or + * protection) consistency among pages with different virtual page + * alignments within the same object. The precious value controls + * how the kernel treats the data. If it is FALSE, the kernel treats + * its copy as a temporary and may throw it away if it hasn't been + * changed. If the precious value is TRUE, the kernel treats its + * copy as a data repository and promises to return it to the manager; + * the manager may tell the kernel to throw it away instead by flushing + * and not cleaning the data -- see memory_object_lock_request. The + * reply_to port is for a compeletion message; it will be + * memory_object_supply_completed.] + */ + +simpleroutine memory_object_data_supply( + memory_control : memory_object_control_t; + offset : vm_offset_t; + data : pointer_t, Dealloc[]; + lock_value : vm_prot_t; + precious : boolean_t; + reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); + +simpleroutine memory_object_ready( + memory_control : memory_object_control_t; + may_cache : boolean_t; + copy_strategy : memory_object_copy_strategy_t); + +simpleroutine memory_object_change_attributes( + memory_control : memory_object_control_t; + may_cache : boolean_t; + copy_strategy : memory_object_copy_strategy_t; + reply_to : mach_port_t = + MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic); + +skip; /* old host_callout_statistics_reset */ +skip; /* old port_set_select */ +skip; /* old port_set_backup */ + +/* + * Set/Get special properties of memory associated + * to some virtual address range, such as cachability, + * migrability, replicability. Machine-dependent. + */ +routine vm_machine_attribute( + target_task : vm_task_t; + address : vm_address_t; + size : vm_size_t; + attribute : vm_machine_attribute_t; + inout value : vm_machine_attribute_val_t); + +/*skip;*/ /* old host_fpa_counters_reset */ + +/* + * This routine is created for allocating DMA buffers. + * We are going to get a contiguous physical memory + * and its physical address in addition to the virtual address. + */ +routine vm_dma_buff_alloc( + host_priv : host_priv_t; + target_task : vm_task_t; + size : vm_size_t; + out vaddr : vm_address_t; + out paddr : vm_address_t); + +/* + * There is no more room in this interface for additional calls. + */ diff --git a/libmachdev/net.c b/libmachdev/net.c new file mode 100644 index 00000000..f14e9350 --- /dev/null +++ b/libmachdev/net.c @@ -0,0 +1,679 @@ +/* + * Linux network driver support. + * + * Copyright (C) 1996 The University of Utah and the Computer Systems + * Laboratory at the University of Utah (CSL) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Author: Shantanu Goel, University of Utah CSL + */ + +/* + * INET An implementation of the TCP/IP protocol suite for the LINUX + * operating system. INET is implemented using the BSD Socket + * interface as the means of communication with the user level. + * + * Ethernet-type device handling. + * + * Version: @(#)eth.c 1.0.7 05/25/93 + * + * Authors: Ross Biro, <bir7@leland.Stanford.Edu> + * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> + * Mark Evans, <evansmp@uhura.aston.ac.uk> + * Florian La Roche, <rzsfl@rz.uni-sb.de> + * Alan Cox, <gw4pts@gw4pts.ampr.org> + * + * Fixes: + * Mr Linux : Arp problems + * Alan Cox : Generic queue tidyup (very tiny here) + * Alan Cox : eth_header ntohs should be htons + * Alan Cox : eth_rebuild_header missing an htons and + * minor other things. + * Tegge : Arp bug fixes. + * Florian : Removed many unnecessary functions, code cleanup + * and changes for new arp and skbuff. + * Alan Cox : Redid header building to reflect new format. + * Alan Cox : ARP only when compiled with CONFIG_INET + * Greg Page : 802.2 and SNAP stuff. + * Alan Cox : MAC layer pointers/new format. + * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding. + * Alan Cox : Protect against forwarding explosions with + * older network drivers and IFF_ALLMULTI + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <assert.h> +#include <string.h> +#include <arpa/inet.h> + +#include "mach_U.h" + +#include <mach.h> +#include <hurd.h> + +#define MACH_INCLUDE + +#include "vm_param.h" +#include "device_reply_U.h" +#include "dev_hdr.h" +#include "if_ether.h" +#include "util.h" + +#define ether_header ethhdr + +extern int linux_intr_pri; +extern struct port_bucket *port_bucket; +extern struct port_class *dev_class; + +/* One of these is associated with each instance of a device. */ +struct net_data +{ + struct port_info port; /* device port */ +// struct ifnet ifnet; /* Mach ifnet structure (needed for filters) */ + struct emul_device device; /* generic device structure */ + mach_port_t delivery_port; + struct net_device *dev; /* Linux network device structure */ + struct net_data *next; +}; + +struct skb_reply +{ + mach_port_t reply; + mach_msg_type_name_t reply_type; + int pkglen; +}; + +struct sk_buff; +void skb_done_queue(struct sk_buff *skb); +struct sk_buff *skb_done_dequeue(); +void linux_net_emulation_init (); +void *skb_reply(struct sk_buff *skb); +int netdev_flags(struct net_device *dev); +char *netdev_addr(struct net_device *dev); +int dev_change_flags (struct net_device *dev, short flags); +int linux_pkg_xmit (char *pkg_data, int len, void *del_data, + int (*del_func) (struct sk_buff *, void *), + struct net_device *dev); +struct net_device *search_netdev (char *name); +void kfree_skb (struct sk_buff *skb); +int dev_open(struct net_device *dev); +void *l4dde26_register_rx_callback(void *cb); +void skb_done_head_init(); + +struct net_data *nd_head; + +/* Forward declarations. */ + +extern struct device_emulation_ops linux_net_emulation_ops; + +static int print_packet_size = 0; + +mach_msg_type_t header_type = +{ + MACH_MSG_TYPE_BYTE, + 8, + NET_HDW_HDR_MAX, + TRUE, + FALSE, + FALSE, + 0 +}; + +mach_msg_type_t packet_type = +{ + MACH_MSG_TYPE_BYTE, /* name */ + 8, /* size */ + 0, /* number */ + TRUE, /* inline */ + FALSE, /* longform */ + FALSE /* deallocate */ +}; + +struct net_data *search_nd (struct net_device *dev) +{ + struct net_data *nd = nd_head; + + //TODO protected by locks. + while (nd) + { + if (nd->dev == dev) + return nd; + nd = nd->next; + } + return NULL; +} + +/* Linux kernel network support routines. */ + +/* Free all sk_buffs on the done list. + This routine is called by the iodone thread in ds_routines.c. */ +void +free_skbuffs () +{ + struct sk_buff *skb; + + while (1) + { + skb = skb_done_dequeue (); + if (skb) + { + struct skb_reply *reply = skb_reply(skb); + if (MACH_PORT_VALID (reply->reply)) + { + ds_device_write_reply (reply->reply, reply->reply_type, + 0, reply->pkglen); + reply->reply = MACH_PORT_NULL; + } + kfree_skb (skb); + } + else + break; + } +} + +/* actions before freeing the sk_buff SKB. + * If it returns 1, the packet will be deallocated later. */ +int +pre_kfree_skb (struct sk_buff *skb, void *data) +{ + struct skb_reply *reply = data; + extern void wakeup_io_done_thread (); + + /* Queue sk_buff on done list if there is a + page list attached or we need to send a reply. + Wakeup the iodone thread to process the list. */ + if (reply && MACH_PORT_VALID (reply->reply)) + { + skb_done_queue (skb); + wakeup_io_done_thread (); + return 1; + } + return 0; +} + +/* + * Deliver the message to all right pfinet servers that + * connects to the virtual network interface. + */ +int +deliver_msg(mach_port_t dest, struct net_rcv_msg *msg) +{ + mach_msg_return_t err; + + msg->msg_hdr.msgh_bits = MACH_MSGH_BITS (MACH_MSG_TYPE_COPY_SEND, 0); + /* remember message sizes must be rounded up */ + msg->msg_hdr.msgh_local_port = MACH_PORT_NULL; + msg->msg_hdr.msgh_kind = MACH_MSGH_KIND_NORMAL; + msg->msg_hdr.msgh_id = NET_RCV_MSG_ID; + + msg->msg_hdr.msgh_remote_port = dest; + err = mach_msg ((mach_msg_header_t *)msg, + MACH_SEND_MSG|MACH_SEND_TIMEOUT, + msg->msg_hdr.msgh_size, 0, MACH_PORT_NULL, + 0, MACH_PORT_NULL); + if (err != MACH_MSG_SUCCESS) + { + mach_port_deallocate(mach_task_self (), + ((mach_msg_header_t *)msg)->msgh_remote_port); + return err; + } + + return MACH_MSG_SUCCESS; +} + +/* Accept packet SKB received on an interface. */ +void +netif_rx_handle (char *data, int len, struct net_device *dev) +{ + int pack_size; + net_rcv_msg_t net_msg; + struct ether_header *eh; + struct packet_header *ph; + struct net_data *nd; + + if (print_packet_size) + printf ("netif_rx: length %d\n", len); + + nd = search_nd(dev); + assert (nd); + + /* Allocate a kernel message buffer. */ + net_msg = malloc (sizeof (*net_msg)); + if (!net_msg) + return; + + pack_size = len - sizeof (struct ethhdr); + /* remember message sizes must be rounded up */ + net_msg->msg_hdr.msgh_size = (((mach_msg_size_t) (sizeof(struct net_rcv_msg) + - NET_RCV_MAX + pack_size)) + 3) & ~3; + + /* Copy packet into message buffer. */ + eh = (struct ether_header *) (net_msg->header); + ph = (struct packet_header *) (net_msg->packet); + memcpy (eh, data, sizeof (struct ether_header)); + /* packet is prefixed with a struct packet_header, + see include/device/net_status.h. */ + memcpy (ph + 1, data + sizeof (struct ether_header), pack_size); + ph->type = eh->h_proto; + ph->length = pack_size + sizeof (struct packet_header); + + net_msg->sent = FALSE; /* Mark packet as received. */ + + net_msg->header_type = header_type; + net_msg->packet_type = packet_type; + net_msg->net_rcv_msg_packet_count = ph->length; + deliver_msg (nd->delivery_port, net_msg); + free (net_msg); +} + +/* Mach device interface routines. */ + +/* Return a send right associated with network device ND. */ +static mach_port_t +dev_to_port (void *nd) +{ + return (nd + ? ports_get_send_right (nd) + : MACH_PORT_NULL); +} + +#if 0 +/* + * Initialize send and receive queues on an interface. + */ +void if_init_queues(ifp) + register struct ifnet *ifp; +{ + IFQ_INIT(&ifp->if_snd); + queue_init(&ifp->if_rcv_port_list); + queue_init(&ifp->if_snd_port_list); + simple_lock_init(&ifp->if_rcv_port_list_lock); + simple_lock_init(&ifp->if_snd_port_list_lock); +} +#endif + +static io_return_t +device_open (mach_port_t reply_port, mach_msg_type_name_t reply_port_type, + dev_mode_t mode, char *name, device_t *devp) +{ + io_return_t err = D_SUCCESS; + struct net_device *dev; + struct net_data *nd; +// struct ifnet *ifp; + + /* Search for the device. */ + dev = search_netdev (name); + if (!dev) + return D_NO_SUCH_DEVICE; + + /* Allocate and initialize device data if this is the first open. */ + nd = search_nd (dev); + if (!nd) + { + err = ports_create_port (dev_class, port_bucket, + sizeof (*nd), &nd); + if (err) + goto out; + + nd->dev = dev; + nd->device.emul_data = nd; + nd->device.emul_ops = &linux_net_emulation_ops; + nd->next = nd_head; + nd_head = nd; +#if 0 + ipc_kobject_set (nd->port, (ipc_kobject_t) & nd->device, IKOT_DEVICE); + notify = ipc_port_make_sonce (nd->port); + ip_lock (nd->port); + ipc_port_nsrequest (nd->port, 1, notify, ¬ify); + assert (notify == IP_NULL); + + ifp = &nd->ifnet; + ifp->if_unit = dev->name[strlen (dev->name) - 1] - '0'; + ifp->if_flags = IFF_UP | IFF_RUNNING; + ifp->if_mtu = dev->mtu; + ifp->if_header_size = dev->hard_header_len; + ifp->if_header_format = dev->type; + ifp->if_address_size = dev->addr_len; + ifp->if_address = dev->dev_addr; + if_init_queues (ifp); +#endif + + if (dev_open(dev) < 0) + err = D_NO_SUCH_DEVICE; + + out: + if (err) + { + if (nd) + { + ports_destroy_right (nd); + nd = NULL; + } + } + else + { +#if 0 + /* IPv6 heavily relies on multicasting (especially router and + neighbor solicits and advertisements), so enable reception of + those multicast packets by setting `LINUX_IFF_ALLMULTI'. */ + dev->flags |= LINUX_IFF_UP | LINUX_IFF_RUNNING | LINUX_IFF_ALLMULTI; + skb_queue_head_init (&dev->buffs[0]); + + if (dev->set_multicast_list) + dev->set_multicast_list (dev); +#endif + } + if (MACH_PORT_VALID (reply_port)) + ds_device_open_reply (reply_port, reply_port_type, + err, dev_to_port (nd)); + return MIG_NO_REPLY; + } + + *devp = ports_get_right (nd); + return D_SUCCESS; +} + +static io_return_t +device_write (void *d, mach_port_t reply_port, + mach_msg_type_name_t reply_port_type, dev_mode_t mode, + recnum_t bn, io_buf_ptr_t data, unsigned int count, + int *bytes_written) +{ + struct net_data *nd = d; + struct net_device *dev = nd->dev; + struct skb_reply *skb_reply = malloc (sizeof (*skb_reply)); + + if (skb_reply == NULL) + return D_NO_MEMORY; + + skb_reply->pkglen = count; + skb_reply->reply = reply_port; + skb_reply->reply_type = reply_port_type; + + linux_pkg_xmit (data, count, skb_reply, pre_kfree_skb, dev); + vm_deallocate (mach_task_self (), (vm_address_t) data, count); + + /* Send packet to filters. */ + // TODO should I deliver the packet to other network stacks? +#if 0 + { + struct packet_header *packet; + struct ether_header *header; + ipc_kmsg_t kmsg; + + kmsg = net_kmsg_get (); + + if (kmsg != IKM_NULL) + { + /* Suitable for Ethernet only. */ + header = (struct ether_header *) (net_kmsg (kmsg)->header); + packet = (struct packet_header *) (net_kmsg (kmsg)->packet); + memcpy (header, skb->data, sizeof (struct ether_header)); + + /* packet is prefixed with a struct packet_header, + see include/device/net_status.h. */ + memcpy (packet + 1, skb->data + sizeof (struct ether_header), + skb->len - sizeof (struct ether_header)); + packet->length = skb->len - sizeof (struct ether_header) + + sizeof (struct packet_header); + packet->type = header->ether_type; + net_kmsg (kmsg)->sent = TRUE; /* Mark packet as sent. */ + s = splimp (); + net_packet (&dev->net_data->ifnet, kmsg, packet->length, + ethernet_priority (kmsg)); + splx (s); + } + } +#endif + + return MIG_NO_REPLY; +} + +/* + * Other network operations + */ +io_return_t +net_getstat(dev, flavor, status, count) + struct net_device *dev; + dev_flavor_t flavor; + dev_status_t status; /* pointer to OUT array */ + natural_t *count; /* OUT */ +{ +#define ETHERMTU 1500 + switch (flavor) { + case NET_STATUS: + { + register struct net_status *ns = (struct net_status *)status; + + if (*count < NET_STATUS_COUNT) + return (D_INVALID_OPERATION); + + ns->min_packet_size = 60; + ns->max_packet_size = ETH_HLEN + ETHERMTU; + ns->header_format = HDR_ETHERNET; + ns->header_size = ETH_HLEN; + ns->address_size = ETH_ALEN; + ns->flags = 0; + ns->mapped_size = 0; + + *count = NET_STATUS_COUNT; + break; + } + case NET_ADDRESS: + { + register int addr_byte_count; + register int addr_int_count; + register int i; + + addr_byte_count = ETH_ALEN; + addr_int_count = (addr_byte_count + (sizeof(int)-1)) + / sizeof(int); + + if (*count < addr_int_count) + { + /* XXX debug hack. */ + printf ("net_getstat: count: %d, addr_int_count: %d\n", + *count, addr_int_count); + return (D_INVALID_OPERATION); + } + + memcpy(status, netdev_addr(dev), addr_byte_count); + if (addr_byte_count < addr_int_count * sizeof(int)) + memset((char *)status + addr_byte_count, 0, + (addr_int_count * sizeof(int) + - addr_byte_count)); + + for (i = 0; i < addr_int_count; i++) { + register int word; + + word = status[i]; + status[i] = htonl(word); + } + *count = addr_int_count; + break; + } + default: + return (D_INVALID_OPERATION); + } + return (D_SUCCESS); +} + +static io_return_t +device_get_status (void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t *count) +{ + struct net_data *net = (struct net_data *) d; + + if (flavor == NET_FLAGS) + { + if (*count != sizeof(short)) + return D_INVALID_SIZE; + + *(short *) status = netdev_flags (net->dev); + return D_SUCCESS; + } + +#if 0 + if(flavor >= SIOCIWFIRST && flavor <= SIOCIWLAST) + { + /* handle wireless ioctl */ + if(! IW_IS_GET(flavor)) + return D_INVALID_OPERATION; + + if(*count * sizeof(int) < sizeof(struct ifreq)) + return D_INVALID_OPERATION; + + struct net_data *nd = d; + struct linux_device *dev = nd->dev; + + if(! dev->do_ioctl) + return D_INVALID_OPERATION; + + int result; + + if (flavor == SIOCGIWRANGE || flavor == SIOCGIWENCODE + || flavor == SIOCGIWESSID || flavor == SIOCGIWNICKN + || flavor == SIOCGIWSPY) + { + /* + * These ioctls require an `iw_point' as their argument (i.e. + * they want to return some data to userspace. + * Therefore supply some sane values and carry the data back + * to userspace right behind the `struct iwreq'. + */ + struct iw_point *iwp = &((struct iwreq *) status)->u.data; + iwp->length = *count * sizeof (dev_status_t) - sizeof (struct ifreq); + iwp->pointer = (void *) status + sizeof (struct ifreq); + + result = dev->do_ioctl (dev, (struct ifreq *) status, flavor); + + *count = ((sizeof (struct ifreq) + iwp->length) + / sizeof (dev_status_t)); + if (iwp->length % sizeof (dev_status_t)) + (*count) ++; + } + else + { + *count = sizeof(struct ifreq) / sizeof(int); + result = dev->do_ioctl(dev, (struct ifreq *) status, flavor); + } + + return result ? D_IO_ERROR : D_SUCCESS; + } + else +#endif + { + /* common get_status request */ + return net_getstat (net->dev, flavor, status, count); + } +} + +static io_return_t +device_set_status(void *d, dev_flavor_t flavor, dev_status_t status, + mach_msg_type_number_t count) +{ + if (flavor == NET_FLAGS) + { + if (count != sizeof(short)) + return D_INVALID_SIZE; + + short flags = *(short *) status; + struct net_data *net = (struct net_data *) d; + + dev_change_flags (net->dev, flags); + + return D_SUCCESS; + } + return D_INVALID_OPERATION; + +#if 0 + if(flavor < SIOCIWFIRST || flavor > SIOCIWLAST) + return D_INVALID_OPERATION; + + if(! IW_IS_SET(flavor)) + return D_INVALID_OPERATION; + + if(count * sizeof(int) < sizeof(struct ifreq)) + return D_INVALID_OPERATION; + + struct net_data *nd = d; + struct linux_device *dev = nd->dev; + + if(! dev->do_ioctl) + return D_INVALID_OPERATION; + + if((flavor == SIOCSIWENCODE || flavor == SIOCSIWESSID + || flavor == SIOCSIWNICKN || flavor == SIOCSIWSPY) + && ((struct iwreq *) status)->u.data.pointer) + { + struct iw_point *iwp = &((struct iwreq *) status)->u.data; + + /* safety check whether the status array is long enough ... */ + if(count * sizeof(int) < sizeof(struct ifreq) + iwp->length) + return D_INVALID_OPERATION; + + /* make sure, iwp->pointer points to the correct address */ + if(iwp->pointer) iwp->pointer = (void *) status + sizeof(struct ifreq); + } + + int result = dev->do_ioctl(dev, (struct ifreq *) status, flavor); + return result ? D_IO_ERROR : D_SUCCESS; +#endif +} + + +static io_return_t +device_set_filter (void *d, mach_port_t port, int priority, + filter_t * filter, unsigned filter_count) +{ + ((struct net_data *) d)->delivery_port = port; + return 0; +#if 0 + return net_set_filter (&((struct net_data *) d)->ifnet, + port, priority, filter, filter_count); +#endif +} + +/* Do any initialization required for network devices. */ +void linux_net_emulation_init () +{ + skb_done_head_init(); + l4dde26_register_rx_callback(netif_rx_handle); +} + +struct device_emulation_ops linux_net_emulation_ops = +{ + linux_net_emulation_init, + NULL, + NULL, + dev_to_port, + device_open, + NULL, + device_write, + NULL, + NULL, + NULL, + device_set_status, + device_get_status, + device_set_filter, + NULL, + NULL, + NULL, + NULL +}; diff --git a/libmachdev/notify.defs b/libmachdev/notify.defs new file mode 100644 index 00000000..2014be5c --- /dev/null +++ b/libmachdev/notify.defs @@ -0,0 +1 @@ +#include <mach/notify.defs> diff --git a/libmachdev/queue.c b/libmachdev/queue.c new file mode 100644 index 00000000..a43a21b0 --- /dev/null +++ b/libmachdev/queue.c @@ -0,0 +1,131 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon + * the rights to redistribute these changes. + */ +/* + * Routines to implement queue package. + */ + +#include "queue.h" + + + +/* + * Insert element at head of queue. + */ +void enqueue_head( + register queue_t que, + register queue_entry_t elt) +{ + elt->next = que->next; + elt->prev = que; + elt->next->prev = elt; + que->next = elt; +} + +/* + * Insert element at tail of queue. + */ +void enqueue_tail( + register queue_t que, + register queue_entry_t elt) +{ + elt->next = que; + elt->prev = que->prev; + elt->prev->next = elt; + que->prev = elt; +} + +/* + * Remove and return element at head of queue. + */ +queue_entry_t dequeue_head( + register queue_t que) +{ + register queue_entry_t elt; + + if (que->next == que) + return((queue_entry_t)0); + + elt = que->next; + elt->next->prev = que; + que->next = elt->next; + return(elt); +} + +/* + * Remove and return element at tail of queue. + */ +queue_entry_t dequeue_tail( + register queue_t que) +{ + register queue_entry_t elt; + + if (que->prev == que) + return((queue_entry_t)0); + + elt = que->prev; + elt->prev->next = que; + que->prev = elt->prev; + return(elt); +} + +/* + * Remove arbitrary element from queue. + * Does not check whether element is on queue - the world + * will go haywire if it isn't. + */ + +/*ARGSUSED*/ +void remqueue( + queue_t que, + register queue_entry_t elt) +{ + elt->next->prev = elt->prev; + elt->prev->next = elt->next; +} + +/* + * Routines to directly imitate the VAX hardware queue + * package. + */ +void insque( + register struct queue_entry *entry, + register struct queue_entry *pred) +{ + entry->next = pred->next; + entry->prev = pred; + (pred->next)->prev = entry; + pred->next = entry; +} + +struct queue_entry +*remque( + register struct queue_entry *elt) +{ + (elt->next)->prev = elt->prev; + (elt->prev)->next = elt->next; + return(elt); +} + diff --git a/libmachdev/queue.h b/libmachdev/queue.h new file mode 100644 index 00000000..0637dede --- /dev/null +++ b/libmachdev/queue.h @@ -0,0 +1,370 @@ +/* + * Mach Operating System + * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University + * All Rights Reserved. + * + * Permission to use, copy, modify and distribute this software and its + * documentation is hereby granted, provided that both the copyright + * notice and this permission notice appear in all copies of the + * software, derivative works or modified versions, and any portions + * thereof, and that both notices appear in supporting documentation. + * + * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" + * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR + * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. + * + * Carnegie Mellon requests users of this software to return to + * + * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU + * School of Computer Science + * Carnegie Mellon University + * Pittsburgh PA 15213-3890 + * + * any improvements or extensions that they make and grant Carnegie Mellon rights + * to redistribute these changes. + */ +/* + * File: queue.h + * Author: Avadis Tevanian, Jr. + * Date: 1985 + * + * Type definitions for generic queues. + * + */ + +#ifndef _KERN_QUEUE_H_ +#define _KERN_QUEUE_H_ + +#include <cthreads.h> + +/* + * Queue of abstract objects. Queue is maintained + * within that object. + * + * Supports fast removal from within the queue. + * + * How to declare a queue of elements of type "foo_t": + * In the "*foo_t" type, you must have a field of + * type "queue_chain_t" to hold together this queue. + * There may be more than one chain through a + * "foo_t", for use by different queues. + * + * Declare the queue as a "queue_t" type. + * + * Elements of the queue (of type "foo_t", that is) + * are referred to by reference, and cast to type + * "queue_entry_t" within this module. + */ + +/* + * A generic doubly-linked list (queue). + */ + +struct queue_entry { + struct queue_entry *next; /* next element */ + struct queue_entry *prev; /* previous element */ +}; + +typedef struct queue_entry *queue_t; +typedef struct queue_entry queue_head_t; +typedef struct queue_entry queue_chain_t; +typedef struct queue_entry *queue_entry_t; + +/* + * enqueue puts "elt" on the "queue". + * dequeue returns the first element in the "queue". + * remqueue removes the specified "elt" from the specified "queue". + */ + +#define enqueue(queue,elt) enqueue_tail(queue, elt) +#define dequeue(queue) dequeue_head(queue) + +void enqueue_head(queue_t, queue_entry_t); +void enqueue_tail(queue_t, queue_entry_t); +queue_entry_t dequeue_head(queue_t); +queue_entry_t dequeue_tail(queue_t); +void remqueue(queue_t, queue_entry_t); +void insque(queue_entry_t, queue_entry_t); + +/* + * Macro: queue_init + * Function: + * Initialize the given queue. + * Header: + * void queue_init(q) + * queue_t q; *MODIFIED* + */ +#define queue_init(q) ((q)->next = (q)->prev = q) + +/* + * Macro: queue_first + * Function: + * Returns the first entry in the queue, + * Header: + * queue_entry_t queue_first(q) + * queue_t q; *IN* + */ +#define queue_first(q) ((q)->next) + +/* + * Macro: queue_next + * Function: + * Returns the entry after an item in the queue. + * Header: + * queue_entry_t queue_next(qc) + * queue_t qc; + */ +#define queue_next(qc) ((qc)->next) + +/* + * Macro: queue_last + * Function: + * Returns the last entry in the queue. + * Header: + * queue_entry_t queue_last(q) + * queue_t q; *IN* + */ +#define queue_last(q) ((q)->prev) + +/* + * Macro: queue_prev + * Function: + * Returns the entry before an item in the queue. + * Header: + * queue_entry_t queue_prev(qc) + * queue_t qc; + */ +#define queue_prev(qc) ((qc)->prev) + +/* + * Macro: queue_end + * Function: + * Tests whether a new entry is really the end of + * the queue. + * Header: + * boolean_t queue_end(q, qe) + * queue_t q; + * queue_entry_t qe; + */ +#define queue_end(q, qe) ((q) == (qe)) + +/* + * Macro: queue_empty + * Function: + * Tests whether a queue is empty. + * Header: + * boolean_t queue_empty(q) + * queue_t q; + */ +#define queue_empty(q) queue_end((q), queue_first(q)) + + +/*----------------------------------------------------------------*/ +/* + * Macros that operate on generic structures. The queue + * chain may be at any location within the structure, and there + * may be more than one chain. + */ + +/* + * Macro: queue_enter + * Function: + * Insert a new element at the tail of the queue. + * Header: + * void queue_enter(q, elt, type, field) + * queue_t q; + * <type> elt; + * <type> is what's in our queue + * <field> is the chain field in (*<type>) + */ +#define queue_enter(head, elt, type, field) \ +{ \ + register queue_entry_t prev; \ + \ + prev = (head)->prev; \ + if ((head) == prev) { \ + (head)->next = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)prev)->field.next = (queue_entry_t)(elt);\ + } \ + (elt)->field.prev = prev; \ + (elt)->field.next = head; \ + (head)->prev = (queue_entry_t) elt; \ +} + +/* + * Macro: queue_enter_first + * Function: + * Insert a new element at the head of the queue. + * Header: + * void queue_enter_first(q, elt, type, field) + * queue_t q; + * <type> elt; + * <type> is what's in our queue + * <field> is the chain field in (*<type>) + */ +#define queue_enter_first(head, elt, type, field) \ +{ \ + register queue_entry_t next; \ + \ + next = (head)->next; \ + if ((head) == next) { \ + (head)->prev = (queue_entry_t) (elt); \ + } \ + else { \ + ((type)next)->field.prev = (queue_entry_t)(elt);\ + } \ + (elt)->field.next = next; \ + (elt)->field.prev = head; \ + (head)->next = (queue_entry_t) elt; \ +} + +/* + * Macro: queue_field [internal use only] + * Function: + * Find the queue_chain_t (or queue_t) for the + * given element (thing) in the given queue (head) + */ +#define queue_field(head, thing, type, field) \ + (((head) == (thing)) ? (head) : &((type)(thing))->field) + +/* + * Macro: queue_remove + * Function: + * Remove an arbitrary item from the queue. + * Header: + * void queue_remove(q, qe, type, field) + * arguments as in queue_enter + */ +#define queue_remove(head, elt, type, field) \ +{ \ + register queue_entry_t next, prev; \ + \ + next = (elt)->field.next; \ + prev = (elt)->field.prev; \ + \ + if ((head) == next) \ + (head)->prev = prev; \ + else \ + ((type)next)->field.prev = prev; \ + \ + if ((head) == prev) \ + (head)->next = next; \ + else \ + ((type)prev)->field.next = next; \ +} + +/* + * Macro: queue_remove_first + * Function: + * Remove and return the entry at the head of + * the queue. + * Header: + * queue_remove_first(head, entry, type, field) + * entry is returned by reference + */ +#define queue_remove_first(head, entry, type, field) \ +{ \ + register queue_entry_t next; \ + \ + (entry) = (type) ((head)->next); \ + next = (entry)->field.next; \ + \ + if ((head) == next) \ + (head)->prev = (head); \ + else \ + ((type)(next))->field.prev = (head); \ + (head)->next = next; \ +} + +/* + * Macro: queue_remove_last + * Function: + * Remove and return the entry at the tail of + * the queue. + * Header: + * queue_remove_last(head, entry, type, field) + * entry is returned by reference + */ +#define queue_remove_last(head, entry, type, field) \ +{ \ + register queue_entry_t prev; \ + \ + (entry) = (type) ((head)->prev); \ + prev = (entry)->field.prev; \ + \ + if ((head) == prev) \ + (head)->next = (head); \ + else \ + ((type)(prev))->field.next = (head); \ + (head)->prev = prev; \ +} + +/* + * Macro: queue_assign + */ +#define queue_assign(to, from, type, field) \ +{ \ + ((type)((from)->prev))->field.next = (to); \ + ((type)((from)->next))->field.prev = (to); \ + *to = *from; \ +} + +/* + * Macro: queue_iterate + * Function: + * iterate over each item in the queue. + * Generates a 'for' loop, setting elt to + * each item in turn (by reference). + * Header: + * queue_iterate(q, elt, type, field) + * queue_t q; + * <type> elt; + * <type> is what's in our queue + * <field> is the chain field in (*<type>) + */ +#define queue_iterate(head, elt, type, field) \ + for ((elt) = (type) queue_first(head); \ + !queue_end((head), (queue_entry_t)(elt)); \ + (elt) = (type) queue_next(&(elt)->field)) + + + +/*----------------------------------------------------------------*/ +/* + * Define macros for queues with locks. + */ +struct mpqueue_head { + struct queue_entry head; /* header for queue */ + struct mutex lock; /* lock for queue */ +}; + +typedef struct mpqueue_head mpqueue_head_t; + +#define round_mpq(size) (size) + +#define mpqueue_init(q) \ + { \ + queue_init(&(q)->head); \ + mutex_init(&(q)->lock); \ + } + +#define mpenqueue_tail(q, elt) \ + mutex_lock(&(q)->lock); \ + enqueue_tail(&(q)->head, elt); \ + mutex_unlock(&(q)->lock); + +#define mpdequeue_head(q, elt) \ + mutex_lock(&(q)->lock); \ + if (queue_empty(&(q)->head)) \ + *(elt) = 0; \ + else \ + *(elt) = dequeue_head(&(q)->head); \ + mutex_unlock(&(q)->lock); + +/* + * Old queue stuff, will go away soon. + */ + +#endif /* _KERN_QUEUE_H_ */ diff --git a/libmachdev/trivfs_server.c b/libmachdev/trivfs_server.c new file mode 100644 index 00000000..74b8e6b7 --- /dev/null +++ b/libmachdev/trivfs_server.c @@ -0,0 +1,154 @@ +#include <stdio.h> +#include <fcntl.h> +#include <pciaccess.h> +#include <error.h> +#include <hurd/ports.h> +#include <hurd/trivfs.h> +#include <hurd.h> + +extern struct port_bucket *port_bucket; + +/* Trivfs hooks. */ +int trivfs_fstype = FSTYPE_MISC; +int trivfs_fsid = 0; +int trivfs_support_read = 0; +int trivfs_support_write = 0; +int trivfs_support_exec = 0; +int trivfs_allow_open = O_READ | O_WRITE; + +struct port_class *trivfs_protid_portclasses[1]; +struct port_class *trivfs_cntl_portclasses[1]; +int trivfs_protid_nportclasses = 1; +int trivfs_cntl_nportclasses = 1; + +/* Implementation of notify interface */ +kern_return_t +do_mach_notify_port_deleted (mach_port_t notify, + mach_port_t name) +{ + return EOPNOTSUPP; +} + +kern_return_t +do_mach_notify_msg_accepted (mach_port_t notify, + mach_port_t name) +{ + return EOPNOTSUPP; +} + +kern_return_t +do_mach_notify_port_destroyed (mach_port_t notify, + mach_port_t port) +{ + return EOPNOTSUPP; +} + +kern_return_t +do_mach_notify_no_senders (mach_port_t notify, + mach_port_mscount_t mscount) +{ + return ports_do_mach_notify_no_senders (notify, mscount); +} + +kern_return_t +do_mach_notify_send_once (mach_port_t notify) +{ + return EOPNOTSUPP; +} + +kern_return_t +do_mach_notify_dead_name (mach_port_t notify, + mach_port_t name) +{ + return EOPNOTSUPP; +} + +boolean_t +is_master_device (mach_port_t port) +{ + struct port_info *pi = ports_lookup_port (port_bucket, port, + trivfs_protid_portclasses[0]); + if (pi == NULL) + return FALSE; + + ports_port_deref (pi); + return TRUE; +} + +int trivfs_init() +{ + trivfs_cntl_portclasses[0] = ports_create_class (trivfs_clean_cntl, 0); + trivfs_protid_portclasses[0] = ports_create_class (trivfs_clean_protid, 0); + return 0; +} + +error_t +trivfs_goaway (struct trivfs_control *fsys, int flags) +{ + int count; + + fprintf (stderr, "check point 1\n"); + /* Stop new requests. */ + ports_inhibit_class_rpcs (trivfs_cntl_portclasses[0]); + ports_inhibit_class_rpcs (trivfs_protid_portclasses[0]); + + count = ports_count_class (trivfs_protid_portclasses[0]); + + fprintf (stderr, "check point 2\n"); + if (count && !(flags & FSYS_GOAWAY_FORCE)) + { + fprintf (stderr, "check point 4\n"); + /* We won't go away, so start things going again... */ + ports_enable_class (trivfs_protid_portclasses[0]); + ports_resume_class_rpcs (trivfs_cntl_portclasses[0]); + ports_resume_class_rpcs (trivfs_protid_portclasses[0]); + fprintf (stderr, "check point 5\n"); + return EBUSY; + } + + fprintf (stderr, "check point 3\n"); + pci_system_cleanup (); + fprintf (stderr, "trivfs goes away\n"); + exit (0); +} + +static int +demuxer (mach_msg_header_t *inp, mach_msg_header_t *outp) +{ + extern int device_server (mach_msg_header_t *, mach_msg_header_t *); + extern int notify_server (mach_msg_header_t *, mach_msg_header_t *); + return device_server (inp, outp) || notify_server (inp, outp) + || trivfs_demuxer (inp, outp); +} + +void +trivfs_modify_stat (struct trivfs_protid *cred, io_statbuf_t *stat) +{ +} + +void trivfs_server() +{ + mach_port_t bootstrap; + struct trivfs_control *fsys; + int err; + + task_get_bootstrap_port (mach_task_self (), &bootstrap); + if (bootstrap == MACH_PORT_NULL) + error (1, 0, "must be started as a translator"); + + fprintf (stderr, "after get bootstrap port\n"); + /* Reply to our parent. */ + err = trivfs_startup (bootstrap, 0, + trivfs_cntl_portclasses[0], port_bucket, + trivfs_protid_portclasses[0], port_bucket, &fsys); + mach_port_deallocate (mach_task_self (), bootstrap); + if (err) + error (1, err, "Contacting parent"); + fprintf (stderr, "start trivfs\n"); + + /* Launch. */ + do + { + ports_manage_port_operations_one_thread (port_bucket, demuxer, 0); + } while (trivfs_goaway (fsys, 0)); +} diff --git a/libmachdev/util.h b/libmachdev/util.h new file mode 100644 index 00000000..6fb1db28 --- /dev/null +++ b/libmachdev/util.h @@ -0,0 +1,33 @@ +#ifndef __UTIL_H__ +#define __UTIL_H__ + +#include <stdio.h> + +#define panic(format, ...) do \ +{ \ + char buf[1024]; \ + snprintf (buf, 1024, "devnode: %s", format); \ + fprintf (stderr , buf, ## __VA_ARGS__); \ + fflush (stderr); \ + abort (); \ +} while (0) + +#define DEBUG + +#ifdef DEBUG + +#define debug(format, ...) do \ +{ \ + char buf[1024]; \ + snprintf (buf, 1024, "pcnet32: %s: %s\n", __func__, format); \ + fprintf (stderr , buf, ## __VA_ARGS__); \ + fflush (stderr); \ +} while (0) + +#else + +#define debug(format, ...) do {} while (0) + +#endif + +#endif diff --git a/libmachdev/vm_param.h b/libmachdev/vm_param.h new file mode 100644 index 00000000..7b615c8a --- /dev/null +++ b/libmachdev/vm_param.h @@ -0,0 +1,7 @@ +#ifndef __VM_PARAM_H__ +#define __VM_PARAM_H__ + +#define PAGE_SIZE __vm_page_size +#define PAGE_MASK (PAGE_SIZE-1) + +#endif |