summaryrefslogtreecommitdiff
path: root/linux/dev
diff options
context:
space:
mode:
authorThomas Bushnell <thomas@gnu.org>1999-04-26 05:58:44 +0000
committerThomas Bushnell <thomas@gnu.org>1999-04-26 05:58:44 +0000
commit86297c41a26f18d924e64fc93321c59cbc4c48dd (patch)
tree376954c6b95b735d361875319a1a2a9db6a27527 /linux/dev
parent851137902d3e7ad87af177487df3eea53e940a1c (diff)
1998-11-30 OKUJI Yoshinori <okuji@kuicr.kyoto-u.ac.jp>
Clean up linux emulation code to make it architecture-independent as much as possible. * linux: Renamed from linuxdev. * Makefile.in (objfiles): Add linux.o instead of linuxdev.o. (MAKE): New variable. Used for the linux.o target. * configure.in: Add AC_CHECK_TOOL(MAKE, make). * i386/i386/spl.h: Include <i386/ipl.h>, for compatibility with OSF Mach 3.0. Suggested by Elgin Lee <ehl@funghi.com>. * linux/src: Renamed from linux/linux. * linux/dev: Renamed from linux/mach. * linux/Drivers.in (AC_INIT): Use dev/include/linux/autoconf.h, instead of mach/include/linux/autoconf.h. * Makefile.in (all): Target ../linux.o instead of ../linuxdev.o. * linux/dev/drivers/block/genhd.c: Include <machine/spl.h> instead of <i386/ipl.h>. * linux/dev/drivers/net/auto_irq.c: Remove unneeded header files, <i386/ipl.h> and <i386/pic.h>. * linux/dev/init/main.c: Many i386-dependent codes moved to ... * linux/dev/arch/i386/irq.c: ... here. * linux/dev/arch/i386/setup.c: New file. * linux/dev/arch/i386/linux_emul.h: Likewise. * linux/dev/arch/i386/glue/timer.c: Merged into sched.c. * linux/dev/arch/i386/glue/sched.c: Include <machine/spl.h> instead of <i386/ipl.h>, and moved to ... * linux/dev/kernel/sched.c: ... here. * linux/dev/arch/i386/glue/block.c: Include <machine/spl.h> and <linux_emul.h>, instead of i386-dependent header files, and moved to ... * linux/dev/glue/blocl.c: ... here. * linux/dev/arch/i386/glue/net.c: Include <machine/spl.h> and <linux_emul.h>, instead of i386-dependent header files, and moved to ... * linux/dev/glue/net.c: ... here. * linux/dev/arch/i386/glue/misc.c: Remove `x86' and moved to ... * linux/dev/glue/misc.c: ... here. * linux/dev/arch/i386/glue/kmem.c: Moved to ... * linux/dev/glue/kmem.c: ... here.
Diffstat (limited to 'linux/dev')
-rw-r--r--linux/dev/include/asm-i386/errno.h266
-rw-r--r--linux/dev/include/asm-i386/page.h64
-rw-r--r--linux/dev/include/asm-i386/segment.h351
-rw-r--r--linux/dev/include/asm-i386/string.h631
-rw-r--r--linux/dev/include/asm-i386/system.h336
-rw-r--r--linux/dev/include/linux/blk.h467
-rw-r--r--linux/dev/include/linux/blkdev.h72
-rw-r--r--linux/dev/include/linux/compile.h6
-rw-r--r--linux/dev/include/linux/etherdevice.h62
-rw-r--r--linux/dev/include/linux/fs.h803
-rw-r--r--linux/dev/include/linux/genhd.h141
-rw-r--r--linux/dev/include/linux/if.h184
-rw-r--r--linux/dev/include/linux/kernel.h107
-rw-r--r--linux/dev/include/linux/locks.h66
-rw-r--r--linux/dev/include/linux/malloc.h17
-rw-r--r--linux/dev/include/linux/mm.h377
-rw-r--r--linux/dev/include/linux/netdevice.h338
-rw-r--r--linux/dev/include/linux/nfs.h175
-rw-r--r--linux/dev/include/linux/notifier.h100
-rw-r--r--linux/dev/include/linux/pagemap.h150
-rw-r--r--linux/dev/include/linux/proc_fs.h292
-rw-r--r--linux/dev/include/linux/sched.h521
-rw-r--r--linux/dev/include/linux/skbuff.h470
-rw-r--r--linux/dev/include/linux/types.h112
-rw-r--r--linux/dev/lib/vsprintf.c350
-rw-r--r--linux/dev/net/core/dev.c1620
26 files changed, 8078 insertions, 0 deletions
diff --git a/linux/dev/include/asm-i386/errno.h b/linux/dev/include/asm-i386/errno.h
new file mode 100644
index 0000000..1683367
--- /dev/null
+++ b/linux/dev/include/asm-i386/errno.h
@@ -0,0 +1,266 @@
+#ifndef _I386_ERRNO_H
+#define _I386_ERRNO_H
+
+#ifdef MACH_INCLUDE
+
+#define LINUX_EPERM 1 /* Operation not permitted */
+#define LINUX_ENOENT 2 /* No such file or directory */
+#define LINUX_ESRCH 3 /* No such process */
+#define LINUX_EINTR 4 /* Interrupted system call */
+#define LINUX_EIO 5 /* I/O error */
+#define LINUX_ENXIO 6 /* No such device or address */
+#define LINUX_E2BIG 7 /* Arg list too long */
+#define LINUX_ENOEXEC 8 /* Exec format error */
+#define LINUX_EBADF 9 /* Bad file number */
+#define LINUX_ECHILD 10 /* No child processes */
+#define LINUX_EAGAIN 11 /* Try again */
+#define LINUX_ENOMEM 12 /* Out of memory */
+#define LINUX_EACCES 13 /* Permission denied */
+#define LINUX_EFAULT 14 /* Bad address */
+#define LINUX_ENOTBLK 15 /* Block device required */
+#define LINUX_EBUSY 16 /* Device or resource busy */
+#define LINUX_EEXIST 17 /* File exists */
+#define LINUX_EXDEV 18 /* Cross-device link */
+#define LINUX_ENODEV 19 /* No such device */
+#define LINUX_ENOTDIR 20 /* Not a directory */
+#define LINUX_EISDIR 21 /* Is a directory */
+#define LINUX_EINVAL 22 /* Invalid argument */
+#define LINUX_ENFILE 23 /* File table overflow */
+#define LINUX_EMFILE 24 /* Too many open files */
+#define LINUX_ENOTTY 25 /* Not a typewriter */
+#define LINUX_ETXTBSY 26 /* Text file busy */
+#define LINUX_EFBIG 27 /* File too large */
+#define LINUX_ENOSPC 28 /* No space left on device */
+#define LINUX_ESPIPE 29 /* Illegal seek */
+#define LINUX_EROFS 30 /* Read-only file system */
+#define LINUX_EMLINK 31 /* Too many links */
+#define LINUX_EPIPE 32 /* Broken pipe */
+#define LINUX_EDOM 33 /* Math argument out of domain of func */
+#define LINUX_ERANGE 34 /* Math result not representable */
+#define LINUX_EDEADLK 35 /* Resource deadlock would occur */
+#define LINUX_ENAMETOOLONG 36 /* File name too long */
+#define LINUX_ENOLCK 37 /* No record locks available */
+#define LINUX_ENOSYS 38 /* Function not implemented */
+#define LINUX_ENOTEMPTY 39 /* Directory not empty */
+#define LINUX_ELOOP 40 /* Too many symbolic links encountered */
+#define LINUX_EWOULDBLOCK LINUX_EAGAIN /* Operation would block */
+#define LINUX_ENOMSG 42 /* No message of desired type */
+#define LINUX_EIDRM 43 /* Identifier removed */
+#define LINUX_ECHRNG 44 /* Channel number out of range */
+#define LINUX_EL2NSYNC 45 /* Level 2 not synchronized */
+#define LINUX_EL3HLT 46 /* Level 3 halted */
+#define LINUX_EL3RST 47 /* Level 3 reset */
+#define LINUX_ELNRNG 48 /* Link number out of range */
+#define LINUX_EUNATCH 49 /* Protocol driver not attached */
+#define LINUX_ENOCSI 50 /* No CSI structure available */
+#define LINUX_EL2HLT 51 /* Level 2 halted */
+#define LINUX_EBADE 52 /* Invalid exchange */
+#define LINUX_EBADR 53 /* Invalid request descriptor */
+#define LINUX_EXFULL 54 /* Exchange full */
+#define LINUX_ENOANO 55 /* No anode */
+#define LINUX_EBADRQC 56 /* Invalid request code */
+#define LINUX_EBADSLT 57 /* Invalid slot */
+
+#define LINUX_EDEADLOCK LINUX_EDEADLK
+
+#define LINUX_EBFONT 59 /* Bad font file format */
+#define LINUX_ENOSTR 60 /* Device not a stream */
+#define LINUX_ENODATA 61 /* No data available */
+#define LINUX_ETIME 62 /* Timer expired */
+#define LINUX_ENOSR 63 /* Out of streams resources */
+#define LINUX_ENONET 64 /* Machine is not on the network */
+#define LINUX_ENOPKG 65 /* Package not installed */
+#define LINUX_EREMOTE 66 /* Object is remote */
+#define LINUX_ENOLINK 67 /* Link has been severed */
+#define LINUX_EADV 68 /* Advertise error */
+#define LINUX_ESRMNT 69 /* Srmount error */
+#define LINUX_ECOMM 70 /* Communication error on send */
+#define LINUX_EPROTO 71 /* Protocol error */
+#define LINUX_EMULTIHOP 72 /* Multihop attempted */
+#define LINUX_EDOTDOT 73 /* RFS specific error */
+#define LINUX_EBADMSG 74 /* Not a data message */
+#define LINUX_EOVERFLOW 75 /* Value too large for defined data type */
+#define LINUX_ENOTUNIQ 76 /* Name not unique on network */
+#define LINUX_EBADFD 77 /* File descriptor in bad state */
+#define LINUX_EREMCHG 78 /* Remote address changed */
+#define LINUX_ELIBACC 79 /* Can not access a needed shared library */
+#define LINUX_ELIBBAD 80 /* Accessing a corrupted shared library */
+#define LINUX_ELIBSCN 81 /* .lib section in a.out corrupted */
+#define LINUX_ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define LINUX_ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define LINUX_EILSEQ 84 /* Illegal byte sequence */
+#define LINUX_ERESTART 85 /* Interrupted system call should be restarted */
+#define LINUX_ESTRPIPE 86 /* Streams pipe error */
+#define LINUX_EUSERS 87 /* Too many users */
+#define LINUX_ENOTSOCK 88 /* Socket operation on non-socket */
+#define LINUX_EDESTADDRREQ 89 /* Destination address required */
+#define LINUX_EMSGSIZE 90 /* Message too long */
+#define LINUX_EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define LINUX_ENOPROTOOPT 92 /* Protocol not available */
+#define LINUX_EPROTONOSUPPORT 93 /* Protocol not supported */
+#define LINUX_ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define LINUX_EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define LINUX_EPFNOSUPPORT 96 /* Protocol family not supported */
+#define LINUX_EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define LINUX_EADDRINUSE 98 /* Address already in use */
+#define LINUX_EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define LINUX_ENETDOWN 100 /* Network is down */
+#define LINUX_ENETUNREACH 101 /* Network is unreachable */
+#define LINUX_ENETRESET 102 /* Network dropped connection because of reset */
+#define LINUX_ECONNABORTED 103 /* Software caused connection abort */
+#define LINUX_ECONNRESET 104 /* Connection reset by peer */
+#define LINUX_ENOBUFS 105 /* No buffer space available */
+#define LINUX_EISCONN 106 /* Transport endpoint is already connected */
+#define LINUX_ENOTCONN 107 /* Transport endpoint is not connected */
+#define LINUX_ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define LINUX_ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define LINUX_ETIMEDOUT 110 /* Connection timed out */
+#define LINUX_ECONNREFUSED 111 /* Connection refused */
+#define LINUX_EHOSTDOWN 112 /* Host is down */
+#define LINUX_EHOSTUNREACH 113 /* No route to host */
+#define LINUX_EALREADY 114 /* Operation already in progress */
+#define LINUX_EINPROGRESS 115 /* Operation now in progress */
+#define LINUX_ESTALE 116 /* Stale NFS file handle */
+#define LINUX_EUCLEAN 117 /* Structure needs cleaning */
+#define LINUX_ENOTNAM 118 /* Not a XENIX named type file */
+#define LINUX_ENAVAIL 119 /* No XENIX semaphores available */
+#define LINUX_EISNAM 120 /* Is a named type file */
+#define LINUX_EREMOTEIO 121 /* Remote I/O error */
+#define LINUX_EDQUOT 122 /* Quota exceeded */
+
+#define LINUX_ENOMEDIUM 123 /* No medium found */
+#define LINUX_EMEDIUMTYPE 124 /* Wrong medium type */
+
+#else /* !MACH_INCLUDE */
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Arg list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+
+#endif /* !MACH_INCLUDE */
+
+#endif
diff --git a/linux/dev/include/asm-i386/page.h b/linux/dev/include/asm-i386/page.h
new file mode 100644
index 0000000..2bb6837
--- /dev/null
+++ b/linux/dev/include/asm-i386/page.h
@@ -0,0 +1,64 @@
+#ifndef _I386_PAGE_H
+#define _I386_PAGE_H
+
+#ifndef MACH_INCLUDE
+/* PAGE_SHIFT determines the page size */
+#define PAGE_SHIFT 12
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE-1))
+#endif
+
+#ifdef __KERNEL__
+
+#define STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { unsigned long pte; } pte_t;
+typedef struct { unsigned long pmd; } pmd_t;
+typedef struct { unsigned long pgd; } pgd_t;
+typedef struct { unsigned long pgprot; } pgprot_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((x).pmd)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+#else
+/*
+ * .. while these make it easier on the compiler
+ */
+typedef unsigned long pte_t;
+typedef unsigned long pmd_t;
+typedef unsigned long pgd_t;
+typedef unsigned long pgprot_t;
+
+#define pte_val(x) (x)
+#define pmd_val(x) (x)
+#define pgd_val(x) (x)
+#define pgprot_val(x) (x)
+
+#define __pte(x) (x)
+#define __pmd(x) (x)
+#define __pgd(x) (x)
+#define __pgprot(x) (x)
+
+#endif
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
+
+/* This handles the memory map.. */
+#define PAGE_OFFSET 0
+#define MAP_NR(addr) (((unsigned long)(addr)) >> PAGE_SHIFT)
+
+#endif /* __KERNEL__ */
+
+#endif /* _I386_PAGE_H */
diff --git a/linux/dev/include/asm-i386/segment.h b/linux/dev/include/asm-i386/segment.h
new file mode 100644
index 0000000..73952b0
--- /dev/null
+++ b/linux/dev/include/asm-i386/segment.h
@@ -0,0 +1,351 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+#ifdef MACH
+
+#define KERNEL_CS 0x08
+#define KERNEL_DS 0x10
+
+#define USER_CS 0x17
+#define USER_DS 0x1F
+
+#else /* !MACH */
+
+#define KERNEL_CS 0x10
+#define KERNEL_DS 0x18
+
+#define USER_CS 0x23
+#define USER_DS 0x2B
+
+#endif /* !MACH */
+
+#ifndef __ASSEMBLY__
+
+/*
+ * Uh, these should become the main single-value transfer routines..
+ * They automatically use the right size if we just have the right
+ * pointer type..
+ */
+#define put_user(x,ptr) __put_user((unsigned long)(x),(ptr),sizeof(*(ptr)))
+#define get_user(ptr) ((__typeof__(*(ptr)))__get_user((ptr),sizeof(*(ptr))))
+
+/*
+ * This is a silly but good way to make sure that
+ * the __put_user function is indeed always optimized,
+ * and that we use the correct sizes..
+ */
+extern int bad_user_access_length(void);
+
+/*
+ * dummy pointer type structure.. gcc won't try to do something strange
+ * this way..
+ */
+struct __segment_dummy { unsigned long a[100]; };
+#define __sd(x) ((struct __segment_dummy *) (x))
+#define __const_sd(x) ((const struct __segment_dummy *) (x))
+
+static inline void __put_user(unsigned long x, void * y, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__ ("movb %b1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"iq" ((unsigned char) x), "m" (*__sd(y)));
+ break;
+ case 2:
+ __asm__ ("movw %w1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" ((unsigned short) x), "m" (*__sd(y)));
+ break;
+ case 4:
+ __asm__ ("movl %1,%%fs:%0"
+ :"=m" (*__sd(y))
+ :"ir" (x), "m" (*__sd(y)));
+ break;
+ default:
+ bad_user_access_length();
+ }
+}
+
+static inline unsigned long __get_user(const void * y, int size)
+{
+ unsigned long result;
+
+ switch (size) {
+ case 1:
+ __asm__ ("movb %%fs:%1,%b0"
+ :"=q" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned char) result;
+ case 2:
+ __asm__ ("movw %%fs:%1,%w0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return (unsigned short) result;
+ case 4:
+ __asm__ ("movl %%fs:%1,%0"
+ :"=r" (result)
+ :"m" (*__const_sd(y)));
+ return result;
+ default:
+ return bad_user_access_length();
+ }
+}
+
+static inline void __generic_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ (" cld
+ push %%es
+ push %%fs
+ cmpl $3,%0
+ pop %%es
+ jbe 1f
+ movl %%edi,%%ecx
+ negl %%ecx
+ andl $3,%%ecx
+ subl %%ecx,%0
+ rep; movsb
+ movl %0,%%ecx
+ shrl $2,%%ecx
+ rep; movsl
+ andl $3,%0
+ 1: movl %0,%%ecx
+ rep; movsb
+ pop %%es"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si");
+}
+
+static inline void __constant_memcpy_tofs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ __put_user(*(const char *) from, (char *) to, 1);
+ return;
+ case 2:
+ __put_user(*(const short *) from, (short *) to, 2);
+ return;
+ case 3:
+ __put_user(*(const short *) from, (short *) to, 2);
+ __put_user(*(2+(const char *) from), 2+(char *) to, 1);
+ return;
+ case 4:
+ __put_user(*(const int *) from, (int *) to, 4);
+ return;
+ case 8:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ return;
+ case 12:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ return;
+ case 16:
+ __put_user(*(const int *) from, (int *) to, 4);
+ __put_user(*(1+(const int *) from), 1+(int *) to, 4);
+ __put_user(*(2+(const int *) from), 2+(int *) to, 4);
+ __put_user(*(3+(const int *) from), 3+(int *) to, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "push %%es\n\t" \
+ "push %%fs\n\t" \
+ "pop %%es\n\t" \
+ "rep ; movsl\n\t" \
+ x \
+ "pop %%es" \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("movsb\n\t");
+ return;
+ case 2:
+ COMMON("movsw\n\t");
+ return;
+ case 3:
+ COMMON("movsw\n\tmovsb\n\t");
+ return;
+ }
+#undef COMMON
+}
+
+static inline void __generic_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ __asm__ volatile
+ (" cld
+ cmpl $3,%0
+ jbe 1f
+ movl %%edi,%%ecx
+ negl %%ecx
+ andl $3,%%ecx
+ subl %%ecx,%0
+ fs; rep; movsb
+ movl %0,%%ecx
+ shrl $2,%%ecx
+ fs; rep; movsl
+ andl $3,%0
+ 1: movl %0,%%ecx
+ fs; rep; movsb"
+ :"=abd" (n)
+ :"0" (n),"D" ((long) to),"S" ((long) from)
+ :"cx","di","si", "memory");
+}
+
+static inline void __constant_memcpy_fromfs(void * to, const void * from, unsigned long n)
+{
+ switch (n) {
+ case 0:
+ return;
+ case 1:
+ *(char *)to = __get_user((const char *) from, 1);
+ return;
+ case 2:
+ *(short *)to = __get_user((const short *) from, 2);
+ return;
+ case 3:
+ *(short *) to = __get_user((const short *) from, 2);
+ *((char *) to + 2) = __get_user(2+(const char *) from, 1);
+ return;
+ case 4:
+ *(int *) to = __get_user((const int *) from, 4);
+ return;
+ case 8:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ return;
+ case 12:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ return;
+ case 16:
+ *(int *) to = __get_user((const int *) from, 4);
+ *(1+(int *) to) = __get_user(1+(const int *) from, 4);
+ *(2+(int *) to) = __get_user(2+(const int *) from, 4);
+ *(3+(int *) to) = __get_user(3+(const int *) from, 4);
+ return;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; fs ; movsl\n\t" \
+ x \
+ : /* no outputs */ \
+ :"c" (n/4),"D" ((long) to),"S" ((long) from) \
+ :"cx","di","si","memory")
+
+ switch (n % 4) {
+ case 0:
+ COMMON("");
+ return;
+ case 1:
+ COMMON("fs ; movsb");
+ return;
+ case 2:
+ COMMON("fs ; movsw");
+ return;
+ case 3:
+ COMMON("fs ; movsw\n\tfs ; movsb");
+ return;
+ }
+#undef COMMON
+}
+
+#define memcpy_fromfs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_fromfs((to),(from),(n)) : \
+ __generic_memcpy_fromfs((to),(from),(n)))
+
+#define memcpy_tofs(to, from, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy_tofs((to),(from),(n)) : \
+ __generic_memcpy_tofs((to),(from),(n)))
+
+/*
+ * These are deprecated..
+ *
+ * Use "put_user()" and "get_user()" with the proper pointer types instead.
+ */
+
+#define get_fs_byte(addr) __get_user((const unsigned char *)(addr),1)
+#define get_fs_word(addr) __get_user((const unsigned short *)(addr),2)
+#define get_fs_long(addr) __get_user((const unsigned int *)(addr),4)
+
+#define put_fs_byte(x,addr) __put_user((x),(unsigned char *)(addr),1)
+#define put_fs_word(x,addr) __put_user((x),(unsigned short *)(addr),2)
+#define put_fs_long(x,addr) __put_user((x),(unsigned int *)(addr),4)
+
+#ifdef WE_REALLY_WANT_TO_USE_A_BROKEN_INTERFACE
+
+static inline unsigned short get_user_word(const short *addr)
+{
+ return __get_user(addr, 2);
+}
+
+static inline unsigned char get_user_byte(const char * addr)
+{
+ return __get_user(addr,1);
+}
+
+static inline unsigned long get_user_long(const int *addr)
+{
+ return __get_user(addr, 4);
+}
+
+static inline void put_user_byte(char val,char *addr)
+{
+ __put_user(val, addr, 1);
+}
+
+static inline void put_user_word(short val,short * addr)
+{
+ __put_user(val, addr, 2);
+}
+
+static inline void put_user_long(unsigned long val,int * addr)
+{
+ __put_user(val, addr, 4);
+}
+
+#endif
+
+/*
+ * Someone who knows GNU asm better than I should double check the following.
+ * It seems to work, but I don't know if I'm doing something subtly wrong.
+ * --- TYT, 11/24/91
+ * [ nothing wrong here, Linus: I just changed the ax to be any reg ]
+ */
+
+static inline unsigned long get_fs(void)
+{
+ unsigned long _v;
+ __asm__("mov %%fs,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline unsigned long get_ds(void)
+{
+ unsigned long _v;
+ __asm__("mov %%ds,%w0":"=r" (_v):"0" (0));
+ return _v;
+}
+
+static inline void set_fs(unsigned long val)
+{
+ __asm__ __volatile__("mov %w0,%%fs": /* no output */ :"r" (val));
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_SEGMENT_H */
diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h
new file mode 100644
index 0000000..144e86e
--- /dev/null
+++ b/linux/dev/include/asm-i386/string.h
@@ -0,0 +1,631 @@
+#ifndef _I386_STRING_H_
+#define _I386_STRING_H_
+
+/*
+ * On a 486 or Pentium, we are better off not using the
+ * byte string operations. But on a 386 or a PPro the
+ * byte string ops are faster than doing it by hand
+ * (MUCH faster on a Pentium).
+ *
+ * Also, the byte strings actually work correctly. Forget
+ * the i486 routines for now as they may be broken..
+ */
+#if FIXED_486_STRING && (CPU == 486 || CPU == 586)
+#include <asm/string-486.h>
+#else
+
+/*
+ * This string-include defines all string functions as inline
+ * functions. Use gcc. It also assumes ds=es=data space, this should be
+ * normal. Most of the string-functions are rather heavily hand-optimized,
+ * see especially strtok,strstr,str[c]spn. They should work, but are not
+ * very easy to understand. Everything is done entirely within the register
+ * set, making the functions fast and clean. String instructions have been
+ * used through-out, making for "slightly" unclear code :-)
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#define __HAVE_ARCH_STRCPY
+extern inline char * strcpy(char * dest,const char *src)
+{
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : /* no output */
+ :"S" (src),"D" (dest):"si","di","ax","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCPY
+extern inline char * strncpy(char * dest,const char *src,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %2\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "rep\n\t"
+ "stosb\n"
+ "2:"
+ : /* no output */
+ :"S" (src),"D" (dest),"c" (count):"si","di","ax","cx","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCAT
+extern inline char * strcat(char * dest,const char * src)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n"
+ "1:\tlodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b"
+ : /* no output */
+ :"S" (src),"D" (dest),"a" (0),"c" (0xffffffff):"si","di","ax","cx");
+return dest;
+}
+
+#define __HAVE_ARCH_STRNCAT
+extern inline char * strncat(char * dest,const char * src,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "decl %1\n\t"
+ "movl %4,%3\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "stosb\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %2,%2\n\t"
+ "stosb"
+ : /* no output */
+ :"S" (src),"D" (dest),"a" (0),"c" (0xffffffff),"g" (count)
+ :"si","di","ax","cx","memory");
+return dest;
+}
+
+#define __HAVE_ARCH_STRCMP
+extern inline int strcmp(const char * cs,const char * ct)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tlodsb\n\t"
+ "scasb\n\t"
+ "jne 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "jmp 3f\n"
+ "2:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%eax\n"
+ "3:"
+ :"=a" (__res):"S" (cs),"D" (ct):"si","di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRNCMP
+extern inline int strncmp(const char * cs,const char * ct,size_t count)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n"
+ "1:\tdecl %3\n\t"
+ "js 2f\n\t"
+ "lodsb\n\t"
+ "scasb\n\t"
+ "jne 3f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n"
+ "2:\txorl %%eax,%%eax\n\t"
+ "jmp 4f\n"
+ "3:\tsbbl %%eax,%%eax\n\t"
+ "orb $1,%%al\n"
+ "4:"
+ :"=a" (__res):"S" (cs),"D" (ct),"c" (count):"si","di","cx");
+return __res;
+}
+
+#define __HAVE_ARCH_STRCHR
+extern inline char * strchr(const char * s, int c)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "je 2f\n\t"
+ "testb %%al,%%al\n\t"
+ "jne 1b\n\t"
+ "movl $1,%1\n"
+ "2:\tmovl %1,%0\n\t"
+ "decl %0"
+ :"=a" (__res):"S" (s),"0" (c):"si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRRCHR
+extern inline char * strrchr(const char * s, int c)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movb %%al,%%ah\n"
+ "1:\tlodsb\n\t"
+ "cmpb %%ah,%%al\n\t"
+ "jne 2f\n\t"
+ "leal -1(%%esi),%0\n"
+ "2:\ttestb %%al,%%al\n\t"
+ "jne 1b"
+ :"=d" (__res):"0" (0),"S" (s),"a" (c):"ax","si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRSPN
+extern inline size_t strspn(const char * cs, const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1b\n"
+ "2:\tdecl %0"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res-cs;
+}
+
+#define __HAVE_ARCH_STRCSPN
+extern inline size_t strcspn(const char * cs, const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 1b\n"
+ "2:\tdecl %0"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res-cs;
+}
+
+#define __HAVE_ARCH_STRPBRK
+extern inline char * strpbrk(const char * cs,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "movl %%ecx,%%edx\n"
+ "1:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 2f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 1b\n\t"
+ "decl %0\n\t"
+ "jmp 3f\n"
+ "2:\txorl %0,%0\n"
+ "3:"
+ :"=S" (__res):"a" (0),"c" (0xffffffff),"0" (cs),"g" (ct)
+ :"ax","cx","dx","di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRSTR
+extern inline char * strstr(const char * cs,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "cld\n\t" \
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t" /* NOTE! This also sets Z if searchstring='' */
+ "movl %%ecx,%%edx\n"
+ "1:\tmovl %4,%%edi\n\t"
+ "movl %%esi,%%eax\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repe\n\t"
+ "cmpsb\n\t"
+ "je 2f\n\t" /* also works for empty string, see above */
+ "xchgl %%eax,%%esi\n\t"
+ "incl %%esi\n\t"
+ "cmpb $0,-1(%%eax)\n\t"
+ "jne 1b\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "2:"
+ :"=a" (__res):"0" (0),"c" (0xffffffff),"S" (cs),"D" (ct),
+ "g" (*cs), "g" (*ct)
+ :"cx","dx","di","si");
+return __res;
+}
+
+#define __HAVE_ARCH_STRLEN
+extern inline size_t strlen(const char * s)
+{
+register int __res;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %0\n\t"
+ "decl %0"
+ :"=c" (__res):"D" (s),"a" (0),"0" (0xffffffff):"di");
+return __res;
+}
+
+#define __HAVE_ARCH_STRTOK
+extern inline char * strtok(char * s,const char * ct)
+{
+register char * __res;
+__asm__ __volatile__(
+ "testl %1,%1\n\t"
+ "jne 1f\n\t"
+ "testl %0,%0\n\t"
+ "je 8f\n\t"
+ "movl %0,%1\n"
+ "1:\txorl %0,%0\n\t"
+ "movl $-1,%%ecx\n\t"
+ "xorl %%eax,%%eax\n\t"
+ "cld\n\t"
+ "movl %4,%%edi\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "notl %%ecx\n\t"
+ "decl %%ecx\n\t"
+ "je 7f\n\t" /* empty delimiter-string */
+ "movl %%ecx,%%edx\n"
+ "2:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 7f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 2b\n\t"
+ "decl %1\n\t"
+ "cmpb $0,(%1)\n\t"
+ "je 7f\n\t"
+ "movl %1,%0\n"
+ "3:\tlodsb\n\t"
+ "testb %%al,%%al\n\t"
+ "je 5f\n\t"
+ "movl %4,%%edi\n\t"
+ "movl %%edx,%%ecx\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "jne 3b\n\t"
+ "decl %1\n\t"
+ "cmpb $0,(%1)\n\t"
+ "je 5f\n\t"
+ "movb $0,(%1)\n\t"
+ "incl %1\n\t"
+ "jmp 6f\n"
+ "5:\txorl %1,%1\n"
+ "6:\tcmpb $0,(%0)\n\t"
+ "jne 7f\n\t"
+ "xorl %0,%0\n"
+ "7:\ttestl %0,%0\n\t"
+ "jne 8f\n\t"
+ "movl %0,%1\n"
+ "8:"
+ :"=b" (__res),"=S" (___strtok)
+ :"0" (___strtok),"1" (s),"g" (ct)
+ :"ax","cx","dx","di","memory");
+return __res;
+}
+
+extern inline void * __memcpy(void * to, const void * from, size_t n)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; movsl\n\t"
+ "testb $2,%b1\n\t"
+ "je 1f\n\t"
+ "movsw\n"
+ "1:\ttestb $1,%b1\n\t"
+ "je 2f\n\t"
+ "movsb\n"
+ "2:"
+ : /* no output */
+ :"c" (n/4), "q" (n),"D" ((long) to),"S" ((long) from)
+ : "cx","di","si","memory");
+return (to);
+}
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as the count is constant.
+ */
+extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+{
+ switch (n) {
+ case 0:
+ return to;
+ case 1:
+ *(unsigned char *)to = *(const unsigned char *)from;
+ return to;
+ case 2:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ return to;
+ case 3:
+ *(unsigned short *)to = *(const unsigned short *)from;
+ *(2+(unsigned char *)to) = *(2+(const unsigned char *)from);
+ return to;
+ case 4:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ return to;
+ case 8:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ return to;
+ case 12:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ return to;
+ case 16:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ return to;
+ case 20:
+ *(unsigned long *)to = *(const unsigned long *)from;
+ *(1+(unsigned long *)to) = *(1+(const unsigned long *)from);
+ *(2+(unsigned long *)to) = *(2+(const unsigned long *)from);
+ *(3+(unsigned long *)to) = *(3+(const unsigned long *)from);
+ *(4+(unsigned long *)to) = *(4+(const unsigned long *)from);
+ return to;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; movsl" \
+ x \
+ : /* no outputs */ \
+ : "c" (n/4),"D" ((long) to),"S" ((long) from) \
+ : "cx","di","si","memory");
+
+ switch (n % 4) {
+ case 0: COMMON(""); return to;
+ case 1: COMMON("\n\tmovsb"); return to;
+ case 2: COMMON("\n\tmovsw"); return to;
+ default: COMMON("\n\tmovsw\n\tmovsb"); return to;
+ }
+#undef COMMON
+}
+
+#define __HAVE_ARCH_MEMCPY
+#define memcpy(t, f, n) \
+(__builtin_constant_p(n) ? \
+ __constant_memcpy((t),(f),(n)) : \
+ __memcpy((t),(f),(n)))
+
+#define __HAVE_ARCH_MEMMOVE
+extern inline void * memmove(void * dest,const void * src, size_t n)
+{
+if (dest<src)
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "movsb"
+ : /* no output */
+ :"c" (n),"S" (src),"D" (dest)
+ :"cx","si","di");
+else
+__asm__ __volatile__(
+ "std\n\t"
+ "rep\n\t"
+ "movsb\n\t"
+ "cld"
+ : /* no output */
+ :"c" (n),
+ "S" (n-1+(const char *)src),
+ "D" (n-1+(char *)dest)
+ :"cx","si","di","memory");
+return dest;
+}
+
+#define memcmp __builtin_memcmp
+
+#define __HAVE_ARCH_MEMCHR
+extern inline void * memchr(const void * cs,int c,size_t count)
+{
+register void * __res;
+if (!count)
+ return NULL;
+__asm__ __volatile__(
+ "cld\n\t"
+ "repne\n\t"
+ "scasb\n\t"
+ "je 1f\n\t"
+ "movl $1,%0\n"
+ "1:\tdecl %0"
+ :"=D" (__res):"a" (c),"D" (cs),"c" (count)
+ :"cx");
+return __res;
+}
+
+extern inline void * __memset_generic(void * s, char c,size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep\n\t"
+ "stosb"
+ : /* no output */
+ :"a" (c),"D" (s),"c" (count)
+ :"cx","di","memory");
+return s;
+}
+
+/* we might want to write optimized versions of these later */
+#define __constant_count_memset(s,c,count) __memset_generic((s),(c),(count))
+
+/*
+ * memset(x,0,y) is a reasonably common thing to do, so we want to fill
+ * things 32 bits at a time even when we don't know the size of the
+ * area at compile-time..
+ */
+extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+{
+__asm__ __volatile__(
+ "cld\n\t"
+ "rep ; stosl\n\t"
+ "testb $2,%b1\n\t"
+ "je 1f\n\t"
+ "stosw\n"
+ "1:\ttestb $1,%b1\n\t"
+ "je 2f\n\t"
+ "stosb\n"
+ "2:"
+ : /* no output */
+ :"a" (c), "q" (count), "c" (count/4), "D" ((long) s)
+ :"cx","di","memory");
+return (s);
+}
+
+/* Added by Gertjan van Wingerde to make minix and sysv module work */
+#define __HAVE_ARCH_STRNLEN
+extern inline size_t strnlen(const char * s, size_t count)
+{
+register int __res;
+__asm__ __volatile__(
+ "movl %1,%0\n\t"
+ "jmp 2f\n"
+ "1:\tcmpb $0,(%0)\n\t"
+ "je 3f\n\t"
+ "incl %0\n"
+ "2:\tdecl %2\n\t"
+ "cmpl $-1,%2\n\t"
+ "jne 1b\n"
+ "3:\tsubl %1,%0"
+ :"=a" (__res)
+ :"c" (s),"d" (count)
+ :"dx");
+return __res;
+}
+/* end of additional stuff */
+
+/*
+ * This looks horribly ugly, but the compiler can optimize it totally,
+ * as we by now know that both pattern and count is constant..
+ */
+extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+{
+ switch (count) {
+ case 0:
+ return s;
+ case 1:
+ *(unsigned char *)s = pattern;
+ return s;
+ case 2:
+ *(unsigned short *)s = pattern;
+ return s;
+ case 3:
+ *(unsigned short *)s = pattern;
+ *(2+(unsigned char *)s) = pattern;
+ return s;
+ case 4:
+ *(unsigned long *)s = pattern;
+ return s;
+ }
+#define COMMON(x) \
+__asm__("cld\n\t" \
+ "rep ; stosl" \
+ x \
+ : /* no outputs */ \
+ : "a" (pattern),"c" (count/4),"D" ((long) s) \
+ : "cx","di","memory")
+
+ switch (count % 4) {
+ case 0: COMMON(""); return s;
+ case 1: COMMON("\n\tstosb"); return s;
+ case 2: COMMON("\n\tstosw"); return s;
+ default: COMMON("\n\tstosw\n\tstosb"); return s;
+ }
+#undef COMMON
+}
+
+#define __constant_c_x_memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_c_and_count_memset((s),(c),(count)) : \
+ __constant_c_memset((s),(c),(count)))
+
+#define __memset(s, c, count) \
+(__builtin_constant_p(count) ? \
+ __constant_count_memset((s),(c),(count)) : \
+ __memset_generic((s),(c),(count)))
+
+#define __HAVE_ARCH_MEMSET
+#define memset(s, c, count) \
+(__builtin_constant_p(c) ? \
+ __constant_c_x_memset((s),(0x01010101UL*(unsigned char)c),(count)) : \
+ __memset((s),(c),(count)))
+
+/*
+ * find the first occurrence of byte 'c', or 1 past the area if none
+ */
+#define __HAVE_ARCH_MEMSCAN
+extern inline void * memscan(void * addr, int c, size_t size)
+{
+ if (!size)
+ return addr;
+ __asm__("cld
+ repnz; scasb
+ jnz 1f
+ dec %%edi
+1: "
+ : "=D" (addr), "=c" (size)
+ : "0" (addr), "1" (size), "a" (c));
+ return addr;
+}
+
+#endif
+#endif
diff --git a/linux/dev/include/asm-i386/system.h b/linux/dev/include/asm-i386/system.h
new file mode 100644
index 0000000..ecd2d75
--- /dev/null
+++ b/linux/dev/include/asm-i386/system.h
@@ -0,0 +1,336 @@
+#ifndef __ASM_SYSTEM_H
+#define __ASM_SYSTEM_H
+
+#include <asm/segment.h>
+
+/*
+ * Entry into gdt where to find first TSS. GDT layout:
+ * 0 - null
+ * 1 - not used
+ * 2 - kernel code segment
+ * 3 - kernel data segment
+ * 4 - user code segment
+ * 5 - user data segment
+ * ...
+ * 8 - TSS #0
+ * 9 - LDT #0
+ * 10 - TSS #1
+ * 11 - LDT #1
+ */
+#define FIRST_TSS_ENTRY 8
+#define FIRST_LDT_ENTRY (FIRST_TSS_ENTRY+1)
+#define _TSS(n) ((((unsigned long) n)<<4)+(FIRST_TSS_ENTRY<<3))
+#define _LDT(n) ((((unsigned long) n)<<4)+(FIRST_LDT_ENTRY<<3))
+#define load_TR(n) __asm__("ltr %%ax": /* no output */ :"a" (_TSS(n)))
+#define load_ldt(n) __asm__("lldt %%ax": /* no output */ :"a" (_LDT(n)))
+#define store_TR(n) \
+__asm__("str %%ax\n\t" \
+ "subl %2,%%eax\n\t" \
+ "shrl $4,%%eax" \
+ :"=a" (n) \
+ :"0" (0),"i" (FIRST_TSS_ENTRY<<3))
+
+/* This special macro can be used to load a debugging register */
+
+#define loaddebug(tsk,register) \
+ __asm__("movl %0,%%edx\n\t" \
+ "movl %%edx,%%db" #register "\n\t" \
+ : /* no output */ \
+ :"m" (tsk->debugreg[register]) \
+ :"dx");
+
+
+/*
+ * switch_to(n) should switch tasks to task nr n, first
+ * checking that n isn't the current task, in which case it does nothing.
+ * This also clears the TS-flag if the task we switched to has used
+ * the math co-processor latest.
+ *
+ * It also reloads the debug regs if necessary..
+ */
+
+
+#ifdef __SMP__
+ /*
+ * Keep the lock depth straight. If we switch on an interrupt from
+ * kernel->user task we need to lose a depth, and if we switch the
+ * other way we need to gain a depth. Same layer switches come out
+ * the same.
+ *
+ * We spot a switch in user mode because the kernel counter is the
+ * same as the interrupt counter depth. (We never switch during the
+ * message/invalidate IPI).
+ *
+ * We fsave/fwait so that an exception goes off at the right time
+ * (as a call from the fsave or fwait in effect) rather than to
+ * the wrong process.
+ */
+
+#define switch_to(prev,next) do { \
+ cli();\
+ if(prev->flags&PF_USEDFPU) \
+ { \
+ __asm__ __volatile__("fnsave %0":"=m" (prev->tss.i387.hard)); \
+ __asm__ __volatile__("fwait"); \
+ prev->flags&=~PF_USEDFPU; \
+ } \
+ prev->lock_depth=syscall_count; \
+ kernel_counter+=next->lock_depth-prev->lock_depth; \
+ syscall_count=next->lock_depth; \
+__asm__("pushl %%edx\n\t" \
+ "movl "SYMBOL_NAME_STR(apic_reg)",%%edx\n\t" \
+ "movl 0x20(%%edx), %%edx\n\t" \
+ "shrl $22,%%edx\n\t" \
+ "and $0x3C,%%edx\n\t" \
+ "movl %%ecx,"SYMBOL_NAME_STR(current_set)"(,%%edx)\n\t" \
+ "popl %%edx\n\t" \
+ "ljmp %0\n\t" \
+ "sti\n\t" \
+ : /* no output */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "c" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+
+#else
+#define switch_to(prev,next) do { \
+__asm__("movl %2,"SYMBOL_NAME_STR(current_set)"\n\t" \
+ "ljmp %0\n\t" \
+ "cmpl %1,"SYMBOL_NAME_STR(last_task_used_math)"\n\t" \
+ "jne 1f\n\t" \
+ "clts\n" \
+ "1:" \
+ : /* no outputs */ \
+ :"m" (*(((char *)&next->tss.tr)-4)), \
+ "r" (prev), "r" (next)); \
+ /* Now maybe reload the debug registers */ \
+ if(prev->debugreg[7]){ \
+ loaddebug(prev,0); \
+ loaddebug(prev,1); \
+ loaddebug(prev,2); \
+ loaddebug(prev,3); \
+ loaddebug(prev,6); \
+ } \
+} while (0)
+#endif
+
+#define _set_base(addr,base) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %%dl,%1\n\t" \
+ "movb %%dh,%2" \
+ : /* no output */ \
+ :"m" (*((addr)+2)), \
+ "m" (*((addr)+4)), \
+ "m" (*((addr)+7)), \
+ "d" (base) \
+ :"dx")
+
+#define _set_limit(addr,limit) \
+__asm__("movw %%dx,%0\n\t" \
+ "rorl $16,%%edx\n\t" \
+ "movb %1,%%dh\n\t" \
+ "andb $0xf0,%%dh\n\t" \
+ "orb %%dh,%%dl\n\t" \
+ "movb %%dl,%1" \
+ : /* no output */ \
+ :"m" (*(addr)), \
+ "m" (*((addr)+6)), \
+ "d" (limit) \
+ :"dx")
+
+#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , base )
+#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , (limit-1)>>12 )
+
+static inline unsigned long _get_base(char * addr)
+{
+ unsigned long __base;
+ __asm__("movb %3,%%dh\n\t"
+ "movb %2,%%dl\n\t"
+ "shll $16,%%edx\n\t"
+ "movw %1,%%dx"
+ :"=&d" (__base)
+ :"m" (*((addr)+2)),
+ "m" (*((addr)+4)),
+ "m" (*((addr)+7)));
+ return __base;
+}
+
+#define get_base(ldt) _get_base( ((char *)&(ldt)) )
+
+static inline unsigned long get_limit(unsigned long segment)
+{
+ unsigned long __limit;
+ __asm__("lsll %1,%0"
+ :"=r" (__limit):"r" (segment));
+ return __limit+1;
+}
+
+#define nop() __asm__ __volatile__ ("nop")
+
+/*
+ * Clear and set 'TS' bit respectively
+ */
+#define clts() __asm__ __volatile__ ("clts")
+#define stts() \
+__asm__ __volatile__ ( \
+ "movl %%cr0,%%eax\n\t" \
+ "orl $8,%%eax\n\t" \
+ "movl %%eax,%%cr0" \
+ : /* no outputs */ \
+ : /* no inputs */ \
+ :"ax")
+
+
+#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+#define tas(ptr) (xchg((ptr),1))
+
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
+{
+ switch (size) {
+ case 1:
+ __asm__("xchgb %b0,%1"
+ :"=q" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 2:
+ __asm__("xchgw %w0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ case 4:
+ __asm__("xchgl %0,%1"
+ :"=r" (x)
+ :"m" (*__xg(ptr)), "0" (x)
+ :"memory");
+ break;
+ }
+ return x;
+}
+
+#define mb() __asm__ __volatile__ ("" : : :"memory")
+#define sti() __asm__ __volatile__ ("sti": : :"memory")
+#define cli() __asm__ __volatile__ ("cli": : :"memory")
+
+#define save_flags(x) \
+__asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
+
+#define restore_flags(x) \
+__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
+
+#define iret() __asm__ __volatile__ ("iret": : :"memory")
+
+#define _set_gate(gate_addr,type,dpl,addr) \
+__asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
+ "movw %2,%%dx\n\t" \
+ "movl %%eax,%0\n\t" \
+ "movl %%edx,%1" \
+ :"=m" (*((long *) (gate_addr))), \
+ "=m" (*(1+(long *) (gate_addr))) \
+ :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
+ "d" ((char *) (addr)),"a" (KERNEL_CS << 16) \
+ :"ax","dx")
+
+#define set_intr_gate(n,addr) \
+ _set_gate(&idt[n],14,0,addr)
+
+#define set_trap_gate(n,addr) \
+ _set_gate(&idt[n],15,0,addr)
+
+#define set_system_gate(n,addr) \
+ _set_gate(&idt[n],15,3,addr)
+
+#define set_call_gate(a,addr) \
+ _set_gate(a,12,3,addr)
+
+#define _set_seg_desc(gate_addr,type,dpl,base,limit) {\
+ *((gate_addr)+1) = ((base) & 0xff000000) | \
+ (((base) & 0x00ff0000)>>16) | \
+ ((limit) & 0xf0000) | \
+ ((dpl)<<13) | \
+ (0x00408000) | \
+ ((type)<<8); \
+ *(gate_addr) = (((base) & 0x0000ffff)<<16) | \
+ ((limit) & 0x0ffff); }
+
+#define _set_tssldt_desc(n,addr,limit,type) \
+__asm__ __volatile__ ("movw $" #limit ",%1\n\t" \
+ "movw %%ax,%2\n\t" \
+ "rorl $16,%%eax\n\t" \
+ "movb %%al,%3\n\t" \
+ "movb $" type ",%4\n\t" \
+ "movb $0x00,%5\n\t" \
+ "movb %%ah,%6\n\t" \
+ "rorl $16,%%eax" \
+ : /* no output */ \
+ :"a" (addr+0xc0000000), "m" (*(n)), "m" (*(n+2)), "m" (*(n+4)), \
+ "m" (*(n+5)), "m" (*(n+6)), "m" (*(n+7)) \
+ )
+
+#define set_tss_desc(n,addr) _set_tssldt_desc(((char *) (n)),((int)(addr)),235,"0x89")
+#define set_ldt_desc(n,addr,size) \
+ _set_tssldt_desc(((char *) (n)),((int)(addr)),((size << 3) - 1),"0x82")
+
+/*
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+extern struct desc_struct default_ldt;
+
+/*
+ * disable hlt during certain critical i/o operations
+ */
+#ifndef MACH
+#define HAVE_DISABLE_HLT
+#endif
+void disable_hlt(void);
+void enable_hlt(void);
+
+static __inline__ unsigned long long rdmsr(unsigned int msr)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdmsr"
+ : "=A" (ret)
+ : "c" (msr));
+ return ret;
+}
+
+static __inline__ void wrmsr(unsigned int msr,unsigned long long val)
+{
+ __asm__ __volatile__("wrmsr"
+ : /* no Outputs */
+ : "c" (msr), "A" (val));
+}
+
+
+static __inline__ unsigned long long rdtsc(void)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdtsc"
+ : "=A" (ret)
+ : /* no inputs */);
+ return ret;
+}
+
+static __inline__ unsigned long long rdpmc(unsigned int counter)
+{
+ unsigned long long ret;
+ __asm__ __volatile__("rdpmc"
+ : "=A" (ret)
+ : "c" (counter));
+ return ret;
+}
+
+#endif
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
new file mode 100644
index 0000000..ff4e46b
--- /dev/null
+++ b/linux/dev/include/linux/blk.h
@@ -0,0 +1,467 @@
+/* Is this okay? by OKUJI Yoshinori */
+#ifndef _BLK_H
+#define _BLK_H
+
+#include <linux/blkdev.h>
+#include <linux/locks.h>
+#include <linux/malloc.h>
+#include <linux/config.h>
+#include <linux/md.h>
+
+/*
+ * NR_REQUEST is the number of entries in the request-queue.
+ * NOTE that writes may use only the low 2/3 of these: reads
+ * take precedence.
+ */
+#define NR_REQUEST 64
+
+/*
+ * This is used in the elevator algorithm. We don't prioritise reads
+ * over writes any more --- although reads are more time-critical than
+ * writes, by treating them equally we increase filesystem throughput.
+ * This turns out to give better overall performance. -- sct
+ */
+#define IN_ORDER(s1,s2) \
+((s1)->rq_dev < (s2)->rq_dev || (((s1)->rq_dev == (s2)->rq_dev && \
+(s1)->sector < (s2)->sector)))
+
+/*
+ * These will have to be changed to be aware of different buffer
+ * sizes etc.. It actually needs a major cleanup.
+ */
+#if defined(IDE_DRIVER) || defined(MD_DRIVER)
+#define SECTOR_MASK ((BLOCK_SIZE >> 9) - 1)
+#else
+#define SECTOR_MASK (blksize_size[MAJOR_NR] && \
+ blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] ? \
+ ((blksize_size[MAJOR_NR][MINOR(CURRENT->rq_dev)] >> 9) - 1) : \
+ ((BLOCK_SIZE >> 9) - 1))
+#endif /* IDE_DRIVER */
+
+#define SUBSECTOR(block) (CURRENT->current_nr_sectors > 0)
+
+#ifdef CONFIG_CDU31A
+extern int cdu31a_init(void);
+#endif CONFIG_CDU31A
+#ifdef CONFIG_MCD
+extern int mcd_init(void);
+#endif CONFIG_MCD
+#ifdef CONFIG_MCDX
+extern int mcdx_init(void);
+#endif CONFIG_MCDX
+#ifdef CONFIG_SBPCD
+extern int sbpcd_init(void);
+#endif CONFIG_SBPCD
+#ifdef CONFIG_AZTCD
+extern int aztcd_init(void);
+#endif CONFIG_AZTCD
+#ifdef CONFIG_CDU535
+extern int sony535_init(void);
+#endif CONFIG_CDU535
+#ifdef CONFIG_GSCD
+extern int gscd_init(void);
+#endif CONFIG_GSCD
+#ifdef CONFIG_CM206
+extern int cm206_init(void);
+#endif CONFIG_CM206
+#ifdef CONFIG_OPTCD
+extern int optcd_init(void);
+#endif CONFIG_OPTCD
+#ifdef CONFIG_SJCD
+extern int sjcd_init(void);
+#endif CONFIG_SJCD
+#ifdef CONFIG_CDI_INIT
+extern int cdi_init(void);
+#endif CONFIG_CDI_INIT
+#ifdef CONFIG_BLK_DEV_HD
+extern int hd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_IDE
+extern int ide_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_XD
+extern int xd_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_LOOP
+extern int loop_init(void);
+#endif
+#ifdef CONFIG_BLK_DEV_MD
+extern int md_init(void);
+#endif CONFIG_BLK_DEV_MD
+
+extern void set_device_ro(kdev_t dev,int flag);
+void add_blkdev_randomness(int major);
+
+extern int floppy_init(void);
+extern void rd_load(void);
+extern int rd_init(void);
+extern int rd_doload; /* 1 = load ramdisk, 0 = don't load */
+extern int rd_prompt; /* 1 = prompt for ramdisk, 0 = don't prompt */
+extern int rd_image_start; /* starting block # of image */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+
+#define INITRD_MINOR 250 /* shouldn't collide with /dev/ram* too soon ... */
+
+extern unsigned long initrd_start,initrd_end;
+extern int mount_initrd; /* zero if initrd should not be mounted */
+void initrd_init(void);
+
+#endif
+
+#define RO_IOCTLS(dev,where) \
+ case BLKROSET: { int __err; if (!suser()) return -EACCES; \
+ __err = verify_area(VERIFY_READ, (void *) (where), sizeof(long)); \
+ if (!__err) set_device_ro((dev),get_fs_long((long *) (where))); return __err; } \
+ case BLKROGET: { int __err = verify_area(VERIFY_WRITE, (void *) (where), sizeof(long)); \
+ if (!__err) put_fs_long(0!=is_read_only(dev),(long *) (where)); return __err; }
+
+#if defined(MAJOR_NR) || defined(IDE_DRIVER)
+
+/*
+ * Add entries as needed.
+ */
+
+#ifdef IDE_DRIVER
+
+#define DEVICE_NR(device) (MINOR(device) >> PARTN_BITS)
+#define DEVICE_ON(device) /* nothing */
+#define DEVICE_OFF(device) /* nothing */
+
+#elif (MAJOR_NR == RAMDISK_MAJOR)
+
+/* ram disk */
+#define DEVICE_NAME "ramdisk"
+#define DEVICE_REQUEST rd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+#define DEVICE_NO_RANDOM
+
+#elif (MAJOR_NR == FLOPPY_MAJOR)
+
+static void floppy_off(unsigned int nr);
+
+#define DEVICE_NAME "floppy"
+#define DEVICE_INTR do_floppy
+#define DEVICE_REQUEST do_fd_request
+#define DEVICE_NR(device) ( (MINOR(device) & 3) | ((MINOR(device) & 0x80 ) >> 5 ))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device) floppy_off(DEVICE_NR(device))
+
+#elif (MAJOR_NR == HD_MAJOR)
+
+/* harddisk: timeout is 6 seconds.. */
+#define DEVICE_NAME "harddisk"
+#define DEVICE_INTR do_hd
+#define DEVICE_TIMEOUT HD_TIMER
+#define TIMEOUT_VALUE (6*HZ)
+#define DEVICE_REQUEST do_hd_request
+#define DEVICE_NR(device) (MINOR(device)>>6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_DISK_MAJOR)
+
+#define DEVICE_NAME "scsidisk"
+#define DEVICE_INTR do_sd
+#define TIMEOUT_VALUE (2*HZ)
+#define DEVICE_REQUEST do_sd_request
+#define DEVICE_NR(device) (MINOR(device) >> 4)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+/* Kludge to use the same number for both char and block major numbers */
+#elif (MAJOR_NR == MD_MAJOR) && defined(MD_DRIVER)
+
+#define DEVICE_NAME "Multiple devices driver"
+#define DEVICE_REQUEST do_md_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_TAPE_MAJOR)
+
+#define DEVICE_NAME "scsitape"
+#define DEVICE_INTR do_st
+#define DEVICE_NR(device) (MINOR(device) & 0x7f)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SCSI_CDROM_MAJOR)
+
+#define DEVICE_NAME "CD-ROM"
+#define DEVICE_INTR do_sr
+#define DEVICE_REQUEST do_sr_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == XT_DISK_MAJOR)
+
+#define DEVICE_NAME "xt disk"
+#define DEVICE_REQUEST do_xd_request
+#define DEVICE_NR(device) (MINOR(device) >> 6)
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU31A_CDROM_MAJOR)
+
+#define DEVICE_NAME "CDU31A"
+#define DEVICE_REQUEST do_cdu31a_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcd */
+#define DEVICE_REQUEST do_mcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MITSUMI_X_CDROM_MAJOR)
+
+#define DEVICE_NAME "Mitsumi CD-ROM"
+/* #define DEVICE_INTR do_mcdx */
+#define DEVICE_REQUEST do_mcdx_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #1"
+#define DEVICE_REQUEST do_sbpcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM2_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #2"
+#define DEVICE_REQUEST do_sbpcd2_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM3_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #3"
+#define DEVICE_REQUEST do_sbpcd3_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == MATSUSHITA_CDROM4_MAJOR)
+
+#define DEVICE_NAME "Matsushita CD-ROM controller #4"
+#define DEVICE_REQUEST do_sbpcd4_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == AZTECH_CDROM_MAJOR)
+
+#define DEVICE_NAME "Aztech CD-ROM"
+#define DEVICE_REQUEST do_aztcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CDU535_CDROM_MAJOR)
+
+#define DEVICE_NAME "SONY-CDU535"
+#define DEVICE_INTR do_cdu535
+#define DEVICE_REQUEST do_cdu535_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == GOLDSTAR_CDROM_MAJOR)
+
+#define DEVICE_NAME "Goldstar R420"
+#define DEVICE_REQUEST do_gscd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == CM206_CDROM_MAJOR)
+#define DEVICE_NAME "Philips/LMS cd-rom cm206"
+#define DEVICE_REQUEST do_cm206_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == OPTICS_CDROM_MAJOR)
+
+#define DEVICE_NAME "DOLPHIN 8000AT CD-ROM"
+#define DEVICE_REQUEST do_optcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#elif (MAJOR_NR == SANYO_CDROM_MAJOR)
+
+#define DEVICE_NAME "Sanyo H94A CD-ROM"
+#define DEVICE_REQUEST do_sjcd_request
+#define DEVICE_NR(device) (MINOR(device))
+#define DEVICE_ON(device)
+#define DEVICE_OFF(device)
+
+#endif /* MAJOR_NR == whatever */
+
+#if (MAJOR_NR != SCSI_TAPE_MAJOR)
+#if !defined(IDE_DRIVER)
+
+#ifndef CURRENT
+#define CURRENT (blk_dev[MAJOR_NR].current_request)
+#endif
+
+#define CURRENT_DEV DEVICE_NR(CURRENT->rq_dev)
+
+#ifdef DEVICE_INTR
+static void (*DEVICE_INTR)(void) = NULL;
+#endif
+#ifdef DEVICE_TIMEOUT
+
+#define SET_TIMER \
+((timer_table[DEVICE_TIMEOUT].expires = jiffies + TIMEOUT_VALUE), \
+(timer_active |= 1<<DEVICE_TIMEOUT))
+
+#define CLEAR_TIMER \
+timer_active &= ~(1<<DEVICE_TIMEOUT)
+
+#define SET_INTR(x) \
+if ((DEVICE_INTR = (x)) != NULL) \
+ SET_TIMER; \
+else \
+ CLEAR_TIMER;
+
+#else
+
+#define SET_INTR(x) (DEVICE_INTR = (x))
+
+#endif /* DEVICE_TIMEOUT */
+
+static void (DEVICE_REQUEST)(void);
+
+#ifdef DEVICE_INTR
+#define CLEAR_INTR SET_INTR(NULL)
+#else
+#define CLEAR_INTR
+#endif
+
+#define INIT_REQUEST \
+ if (!CURRENT) {\
+ CLEAR_INTR; \
+ return; \
+ } \
+ if (MAJOR(CURRENT->rq_dev) != MAJOR_NR) \
+ panic(DEVICE_NAME ": request list destroyed"); \
+ if (CURRENT->bh) { \
+ if (!buffer_locked(CURRENT->bh)) \
+ panic(DEVICE_NAME ": block not locked"); \
+ }
+
+#endif /* !defined(IDE_DRIVER) */
+
+/* end_request() - SCSI devices have their own version */
+/* - IDE drivers have their own copy too */
+
+#if ! SCSI_BLK_MAJOR(MAJOR_NR)
+
+#if defined(IDE_DRIVER) && !defined(_IDE_C) /* shared copy for IDE modules */
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup);
+#else
+
+#ifdef IDE_DRIVER
+void ide_end_request(byte uptodate, ide_hwgroup_t *hwgroup) {
+ struct request *req = hwgroup->rq;
+#else
+static void end_request(int uptodate) {
+ struct request *req = CURRENT;
+#endif /* IDE_DRIVER */
+ struct buffer_head * bh;
+ int nsect;
+
+ req->errors = 0;
+ if (!uptodate) {
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
+#ifdef MACH
+ for (bh = req->bh; bh; )
+ {
+ struct buffer_head *next = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+ mark_buffer_uptodate (bh, 0);
+ unlock_buffer (bh);
+ bh = next;
+ }
+ req->bh = NULL;
+#else
+ if ((bh = req->bh) != NULL) {
+ nsect = bh->b_size >> 9;
+ req->nr_sectors--;
+ req->nr_sectors &= ~(nsect - 1);
+ req->sector += nsect;
+ req->sector &= ~(nsect - 1);
+ }
+#endif
+ }
+
+ if ((bh = req->bh) != NULL) {
+ req->bh = bh->b_reqnext;
+ bh->b_reqnext = NULL;
+
+ /*
+ * This is our 'MD IO has finished' event handler.
+ * note that b_state should be cached in a register
+ * anyways, so the overhead if this checking is almost
+ * zero. But anyways .. we never get OO for free :)
+ */
+ if (test_bit(BH_MD, &bh->b_state)) {
+ struct md_personality * pers=(struct md_personality *)bh->personality;
+ pers->end_request(bh,uptodate);
+ }
+ /*
+ * the normal (nonmirrored and no RAID5) case:
+ */
+ else {
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+ }
+ if ((bh = req->bh) != NULL) {
+ req->current_nr_sectors = bh->b_size >> 9;
+ if (req->nr_sectors < req->current_nr_sectors) {
+ req->nr_sectors = req->current_nr_sectors;
+ printk("end_request: buffer-list destroyed\n");
+ }
+ req->buffer = bh->b_data;
+ return;
+ }
+ }
+#ifndef DEVICE_NO_RANDOM
+ add_blkdev_randomness(MAJOR(req->rq_dev));
+#endif
+#ifdef IDE_DRIVER
+ blk_dev[MAJOR(req->rq_dev)].current_request = req->next;
+ hwgroup->rq = NULL;
+#else
+ DEVICE_OFF(req->rq_dev);
+ CURRENT = req->next;
+#endif /* IDE_DRIVER */
+ if (req->sem != NULL)
+ up(req->sem);
+ req->rq_status = RQ_INACTIVE;
+ wake_up(&wait_for_request);
+}
+#endif /* defined(IDE_DRIVER) && !defined(_IDE_C) */
+#endif /* ! SCSI_BLK_MAJOR(MAJOR_NR) */
+#endif /* (MAJOR_NR != SCSI_TAPE_MAJOR) */
+
+#endif /* defined(MAJOR_NR) || defined(IDE_DRIVER) */
+
+#endif /* _BLK_H */
diff --git a/linux/dev/include/linux/blkdev.h b/linux/dev/include/linux/blkdev.h
new file mode 100644
index 0000000..e9a40d7
--- /dev/null
+++ b/linux/dev/include/linux/blkdev.h
@@ -0,0 +1,72 @@
+#ifndef _LINUX_BLKDEV_H
+#define _LINUX_BLKDEV_H
+
+#include <linux/major.h>
+#include <linux/sched.h>
+#include <linux/genhd.h>
+#include <linux/tqueue.h>
+
+/*
+ * Ok, this is an expanded form so that we can use the same
+ * request for paging requests when that is implemented. In
+ * paging, 'bh' is NULL, and the semaphore is used to wait
+ * for read/write completion.
+ */
+struct request {
+ volatile int rq_status; /* should split this into a few status bits */
+#define RQ_INACTIVE (-1)
+#define RQ_ACTIVE 1
+#define RQ_SCSI_BUSY 0xffff
+#define RQ_SCSI_DONE 0xfffe
+#define RQ_SCSI_DISCONNECTING 0xffe0
+
+ kdev_t rq_dev;
+ int cmd; /* READ or WRITE */
+ int errors;
+ unsigned long sector;
+ unsigned long nr_sectors;
+ unsigned long current_nr_sectors;
+ char * buffer;
+ struct semaphore * sem;
+ struct buffer_head * bh;
+ struct buffer_head * bhtail;
+ struct request * next;
+};
+
+struct blk_dev_struct {
+ void (*request_fn)(void);
+ struct request * current_request;
+ struct request plug;
+ struct tq_struct plug_tq;
+};
+
+struct sec_size {
+ unsigned block_size;
+ unsigned block_size_bits;
+};
+
+extern struct sec_size * blk_sec[MAX_BLKDEV];
+extern struct blk_dev_struct blk_dev[MAX_BLKDEV];
+extern struct wait_queue * wait_for_request;
+extern void resetup_one_dev(struct gendisk *dev, int drive);
+
+#ifdef MACH
+extern inline void unplug_device(void *data) { }
+#else
+extern void unplug_device(void * data);
+#endif
+
+extern void make_request(int major,int rw, struct buffer_head * bh);
+
+/* md needs this function to remap requests */
+extern int md_map (int minor, kdev_t *rdev, unsigned long *rsector, unsigned long size);
+extern int md_make_request (int major, int rw, struct buffer_head * bh);
+extern int md_error (kdev_t mddev, kdev_t rdev);
+
+extern int * blk_size[MAX_BLKDEV];
+
+extern int * blksize_size[MAX_BLKDEV];
+
+extern int * hardsect_size[MAX_BLKDEV];
+
+#endif
diff --git a/linux/dev/include/linux/compile.h b/linux/dev/include/linux/compile.h
new file mode 100644
index 0000000..7d43a20
--- /dev/null
+++ b/linux/dev/include/linux/compile.h
@@ -0,0 +1,6 @@
+#define UTS_VERSION "#11 Fri Apr 24 23:03:10 JST 1998"
+#define LINUX_COMPILE_TIME "23:03:10"
+#define LINUX_COMPILE_BY "somebody"
+#define LINUX_COMPILE_HOST "unknown"
+#define LINUX_COMPILE_DOMAIN "somewhere.org"
+#define LINUX_COMPILER "gcc version 2.7.2.3"
diff --git a/linux/dev/include/linux/etherdevice.h b/linux/dev/include/linux/etherdevice.h
new file mode 100644
index 0000000..eb262b2
--- /dev/null
+++ b/linux/dev/include/linux/etherdevice.h
@@ -0,0 +1,62 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. NET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Ethernet handlers.
+ *
+ * Version: @(#)eth.h 1.0.4 05/13/93
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Relocated to include/linux where it belongs by Alan Cox
+ * <gw4pts@gw4pts.ampr.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * WARNING: This move may well be temporary. This file will get merged with others RSN.
+ *
+ */
+#ifndef _LINUX_ETHERDEVICE_H
+#define _LINUX_ETHERDEVICE_H
+
+
+#include <linux/if_ether.h>
+
+#ifdef __KERNEL__
+extern int eth_header(struct sk_buff *skb, struct device *dev,
+ unsigned short type, void *daddr,
+ void *saddr, unsigned len);
+extern int eth_rebuild_header(void *buff, struct device *dev,
+ unsigned long dst, struct sk_buff *skb);
+
+/* This cause skb->protocol = 0. I don't sure if this is really ok.
+ * Last modified: 19980402 by OKUJI Yoshinori <okuji@kmc.kyoto-u.ac.jp>
+ */
+#ifdef MACH
+#define eth_type_trans(skb, dev) ((unsigned short)0)
+#else
+extern unsigned short eth_type_trans(struct sk_buff *skb, struct device *dev);
+#endif
+
+extern void eth_header_cache_bind(struct hh_cache ** hhp, struct device *dev,
+ unsigned short htype, __u32 daddr);
+extern void eth_header_cache_update(struct hh_cache *hh, struct device *dev, unsigned char * haddr);
+
+#ifdef MACH
+#define eth_copy_and_sum(dest, src, length, base) \
+ memcpy((dest)->data, src, length)
+#else
+extern void eth_copy_and_sum(struct sk_buff *dest,
+ unsigned char *src, int length, int base);
+#endif
+
+extern struct device * init_etherdev(struct device *, int);
+
+#endif
+
+#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/linux/dev/include/linux/fs.h b/linux/dev/include/linux/fs.h
new file mode 100644
index 0000000..974e9e7
--- /dev/null
+++ b/linux/dev/include/linux/fs.h
@@ -0,0 +1,803 @@
+#ifndef _LINUX_FS_H
+#define _LINUX_FS_H
+
+/*
+ * This file has definitions for some important file table
+ * structures etc.
+ */
+
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <linux/limits.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <linux/vfs.h>
+#include <linux/net.h>
+#include <linux/kdev_t.h>
+#include <linux/ioctl.h>
+
+/*
+ * It's silly to have NR_OPEN bigger than NR_FILE, but I'll fix
+ * that later. Anyway, now the file code is no longer dependent
+ * on bitmaps in unsigned longs, but uses the new fd_set structure..
+ *
+ * Some programs (notably those using select()) may have to be
+ * recompiled to take full advantage of the new limits..
+ */
+
+/* Fixed constants first: */
+#undef NR_OPEN
+#define NR_OPEN 256
+
+#define NR_SUPER 64
+#define BLOCK_SIZE 1024
+#define BLOCK_SIZE_BITS 10
+
+/* And dynamically-tunable limits and defaults: */
+extern int max_inodes, nr_inodes;
+extern int max_files, nr_files;
+#define NR_INODE 3072 /* this should be bigger than NR_FILE */
+#define NR_FILE 1024 /* this can well be larger on a larger system */
+
+#define MAY_EXEC 1
+#define MAY_WRITE 2
+#define MAY_READ 4
+
+#define FMODE_READ 1
+#define FMODE_WRITE 2
+
+#define READ 0
+#define WRITE 1
+#define READA 2 /* read-ahead - don't block if no resources */
+#define WRITEA 3 /* write-ahead - don't block if no resources */
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#define NIL_FILP ((struct file *)0)
+#define SEL_IN 1
+#define SEL_OUT 2
+#define SEL_EX 4
+
+/*
+ * These are the fs-independent mount-flags: up to 16 flags are supported
+ */
+#define MS_RDONLY 1 /* Mount read-only */
+#define MS_NOSUID 2 /* Ignore suid and sgid bits */
+#define MS_NODEV 4 /* Disallow access to device special files */
+#define MS_NOEXEC 8 /* Disallow program execution */
+#define MS_SYNCHRONOUS 16 /* Writes are synced at once */
+#define MS_REMOUNT 32 /* Alter flags of a mounted FS */
+#define MS_MANDLOCK 64 /* Allow mandatory locks on an FS */
+#define S_WRITE 128 /* Write on file/directory/symlink */
+#define S_APPEND 256 /* Append-only file */
+#define S_IMMUTABLE 512 /* Immutable file */
+#define MS_NOATIME 1024 /* Do not update access times. */
+#define S_BAD_INODE 2048 /* Marker for unreadable inodes */
+#define S_ZERO_WR 4096 /* Device accepts 0 length writes */
+/*
+ * Flags that can be altered by MS_REMOUNT
+ */
+#define MS_RMT_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS|MS_MANDLOCK|MS_NOATIME)
+
+/*
+ * Magic mount flag number. Has to be or-ed to the flag values.
+ */
+#define MS_MGC_VAL 0xC0ED0000 /* magic flag number to indicate "new" flags */
+#define MS_MGC_MSK 0xffff0000 /* magic flag number mask */
+
+/*
+ * Note that read-only etc flags are inode-specific: setting some file-system
+ * flags just means all the inodes inherit those flags by default. It might be
+ * possible to override it selectively if you really wanted to with some
+ * ioctl() that is not currently implemented.
+ *
+ * Exception: MS_RDONLY is always applied to the entire file system.
+ */
+#define IS_RDONLY(inode) (((inode)->i_sb) && ((inode)->i_sb->s_flags & MS_RDONLY))
+#define IS_NOSUID(inode) ((inode)->i_flags & MS_NOSUID)
+#define IS_NODEV(inode) ((inode)->i_flags & MS_NODEV)
+#define IS_NOEXEC(inode) ((inode)->i_flags & MS_NOEXEC)
+#define IS_SYNC(inode) ((inode)->i_flags & MS_SYNCHRONOUS)
+#define IS_MANDLOCK(inode) ((inode)->i_flags & MS_MANDLOCK)
+
+#define IS_WRITABLE(inode) ((inode)->i_flags & S_WRITE)
+#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
+#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_NOATIME(inode) ((inode)->i_flags & MS_NOATIME)
+#define IS_ZERO_WR(inode) ((inode)->i_flags & S_ZERO_WR)
+
+#define UPDATE_ATIME(inode) \
+ if (!IS_NOATIME(inode) && !IS_RDONLY(inode)) { \
+ inode->i_atime = CURRENT_TIME; \
+ inode->i_dirt = 1; \
+ }
+
+/* the read-only stuff doesn't really belong here, but any other place is
+ probably as bad and I don't want to create yet another include file. */
+
+#define BLKROSET _IO(0x12,93) /* set device read-only (0 = read-write) */
+#define BLKROGET _IO(0x12,94) /* get read-only status (0 = read_write) */
+#define BLKRRPART _IO(0x12,95) /* re-read partition table */
+#define BLKGETSIZE _IO(0x12,96) /* return device size */
+#define BLKFLSBUF _IO(0x12,97) /* flush buffer cache */
+#define BLKRASET _IO(0x12,98) /* Set read ahead for block device */
+#define BLKRAGET _IO(0x12,99) /* get current read ahead setting */
+
+#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
+#define FIBMAP _IO(0x00,1) /* bmap access */
+#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
+
+#ifdef __KERNEL__
+
+#include <asm/semaphore.h>
+#include <asm/bitops.h>
+
+extern void buffer_init(void);
+extern unsigned long inode_init(unsigned long start, unsigned long end);
+extern unsigned long file_table_init(unsigned long start, unsigned long end);
+extern unsigned long name_cache_init(unsigned long start, unsigned long end);
+
+typedef char buffer_block[BLOCK_SIZE];
+
+/* bh state bits */
+#define BH_Uptodate 0 /* 1 if the buffer contains valid data */
+#define BH_Dirty 1 /* 1 if the buffer is dirty */
+#define BH_Lock 2 /* 1 if the buffer is locked */
+#define BH_Req 3 /* 0 if the buffer has been invalidated */
+#define BH_Touched 4 /* 1 if the buffer has been touched (aging) */
+#define BH_Has_aged 5 /* 1 if the buffer has been aged (aging) */
+#define BH_Protected 6 /* 1 if the buffer is protected */
+#define BH_FreeOnIO 7 /* 1 to discard the buffer_head after IO */
+#define BH_MD 8 /* 1 if the buffer is an MD request */
+
+/*
+ * Try to keep the most commonly used fields in single cache lines (16
+ * bytes) to improve performance. This ordering should be
+ * particularly beneficial on 32-bit processors.
+ *
+ * We use the first 16 bytes for the data which is used in searches
+ * over the block hash lists (ie. getblk(), find_buffer() and
+ * friends).
+ *
+ * The second 16 bytes we use for lru buffer scans, as used by
+ * sync_buffers() and refill_freelist(). -- sct
+ */
+struct buffer_head {
+ /* First cache line: */
+ unsigned long b_blocknr; /* block number */
+ kdev_t b_dev; /* device (B_FREE = free) */
+ kdev_t b_rdev; /* Real device */
+ unsigned long b_rsector; /* Real buffer location on disk */
+ struct buffer_head * b_next; /* Hash queue list */
+ struct buffer_head * b_this_page; /* circular list of buffers in one page */
+
+ /* Second cache line: */
+ unsigned long b_state; /* buffer state bitmap (see above) */
+ struct buffer_head * b_next_free;
+ unsigned int b_count; /* users using this block */
+ unsigned long b_size; /* block size */
+
+ /* Non-performance-critical data follows. */
+ char * b_data; /* pointer to data block (1024 bytes) */
+ unsigned int b_list; /* List that this buffer appears */
+ unsigned long b_flushtime; /* Time when this (dirty) buffer
+ * should be written */
+ unsigned long b_lru_time; /* Time when this buffer was
+ * last used. */
+ struct wait_queue * b_wait;
+ struct buffer_head * b_prev; /* doubly linked list of hash-queue */
+ struct buffer_head * b_prev_free; /* doubly linked list of buffers */
+ struct buffer_head * b_reqnext; /* request queue */
+
+/*
+ * Some MD stuff like RAID5 needs special event handlers and
+ * special private buffer_head fields:
+ */
+ void * personality;
+ void * private_bh;
+};
+
+static inline int buffer_uptodate(struct buffer_head * bh)
+{
+ return test_bit(BH_Uptodate, &bh->b_state);
+}
+
+static inline int buffer_dirty(struct buffer_head * bh)
+{
+ return test_bit(BH_Dirty, &bh->b_state);
+}
+
+static inline int buffer_locked(struct buffer_head * bh)
+{
+ return test_bit(BH_Lock, &bh->b_state);
+}
+
+static inline int buffer_req(struct buffer_head * bh)
+{
+ return test_bit(BH_Req, &bh->b_state);
+}
+
+static inline int buffer_touched(struct buffer_head * bh)
+{
+ return test_bit(BH_Touched, &bh->b_state);
+}
+
+static inline int buffer_has_aged(struct buffer_head * bh)
+{
+ return test_bit(BH_Has_aged, &bh->b_state);
+}
+
+static inline int buffer_protected(struct buffer_head * bh)
+{
+ return test_bit(BH_Protected, &bh->b_state);
+}
+
+#ifndef MACH
+#include <linux/pipe_fs_i.h>
+#include <linux/minix_fs_i.h>
+#include <linux/ext_fs_i.h>
+#include <linux/ext2_fs_i.h>
+#include <linux/hpfs_fs_i.h>
+#include <linux/msdos_fs_i.h>
+#include <linux/umsdos_fs_i.h>
+#include <linux/iso_fs_i.h>
+#include <linux/nfs_fs_i.h>
+#include <linux/xia_fs_i.h>
+#include <linux/sysv_fs_i.h>
+#include <linux/affs_fs_i.h>
+#include <linux/ufs_fs_i.h>
+#endif
+
+/*
+ * Attribute flags. These should be or-ed together to figure out what
+ * has been changed!
+ */
+#define ATTR_MODE 1
+#define ATTR_UID 2
+#define ATTR_GID 4
+#define ATTR_SIZE 8
+#define ATTR_ATIME 16
+#define ATTR_MTIME 32
+#define ATTR_CTIME 64
+#define ATTR_ATIME_SET 128
+#define ATTR_MTIME_SET 256
+#define ATTR_FORCE 512 /* Not a change, but a change it */
+
+/*
+ * This is the Inode Attributes structure, used for notify_change(). It
+ * uses the above definitions as flags, to know which values have changed.
+ * Also, in this manner, a Filesystem can look at only the values it cares
+ * about. Basically, these are the attributes that the VFS layer can
+ * request to change from the FS layer.
+ *
+ * Derek Atkins <warlord@MIT.EDU> 94-10-20
+ */
+struct iattr {
+ unsigned int ia_valid;
+ umode_t ia_mode;
+ uid_t ia_uid;
+ gid_t ia_gid;
+ off_t ia_size;
+ time_t ia_atime;
+ time_t ia_mtime;
+ time_t ia_ctime;
+};
+
+#include <linux/quota.h>
+
+#ifdef MACH
+/* Supress GCC's warnings. by OKUJI Yoshinori. */
+struct vm_area_struct;
+struct page;
+
+struct inode
+{
+ umode_t i_mode;
+ kdev_t i_rdev;
+};
+
+struct file
+{
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ int f_resid;
+ void *f_object;
+ void *f_np;
+};
+
+#else /* !MACH */
+
+struct inode {
+ kdev_t i_dev;
+ unsigned long i_ino;
+ umode_t i_mode;
+ nlink_t i_nlink;
+ uid_t i_uid;
+ gid_t i_gid;
+ kdev_t i_rdev;
+ off_t i_size;
+ time_t i_atime;
+ time_t i_mtime;
+ time_t i_ctime;
+ unsigned long i_blksize;
+ unsigned long i_blocks;
+ unsigned long i_version;
+ unsigned long i_nrpages;
+ struct semaphore i_sem;
+ struct inode_operations *i_op;
+ struct super_block *i_sb;
+ struct wait_queue *i_wait;
+ struct file_lock *i_flock;
+ struct vm_area_struct *i_mmap;
+ struct page *i_pages;
+ struct dquot *i_dquot[MAXQUOTAS];
+ struct inode *i_next, *i_prev;
+ struct inode *i_hash_next, *i_hash_prev;
+ struct inode *i_bound_to, *i_bound_by;
+ struct inode *i_mount;
+ unsigned long i_count;
+ unsigned short i_flags;
+ unsigned short i_writecount;
+ unsigned char i_lock;
+ unsigned char i_dirt;
+ unsigned char i_pipe;
+ unsigned char i_sock;
+ unsigned char i_seek;
+ unsigned char i_update;
+ unsigned char i_condemned;
+ union {
+ struct pipe_inode_info pipe_i;
+ struct minix_inode_info minix_i;
+ struct ext_inode_info ext_i;
+ struct ext2_inode_info ext2_i;
+ struct hpfs_inode_info hpfs_i;
+ struct msdos_inode_info msdos_i;
+ struct umsdos_inode_info umsdos_i;
+ struct iso_inode_info isofs_i;
+ struct nfs_inode_info nfs_i;
+ struct xiafs_inode_info xiafs_i;
+ struct sysv_inode_info sysv_i;
+ struct affs_inode_info affs_i;
+ struct ufs_inode_info ufs_i;
+ struct socket socket_i;
+ void * generic_ip;
+ } u;
+};
+
+struct fown_struct {
+ int pid; /* pid or -pgrp where SIGIO should be sent */
+ uid_t uid, euid; /* uid/euid of process setting the owner */
+};
+
+struct file {
+ mode_t f_mode;
+ loff_t f_pos;
+ unsigned short f_flags;
+ unsigned short f_count;
+ unsigned long f_reada, f_ramax, f_raend, f_ralen, f_rawin;
+ struct file *f_next, *f_prev;
+ struct fown_struct f_owner;
+ struct inode * f_inode;
+ struct file_operations * f_op;
+ unsigned long f_version;
+ void *private_data; /* needed for tty driver, and maybe others */
+};
+#endif /* !MACH */
+
+#define FL_POSIX 1
+#define FL_FLOCK 2
+#define FL_BROKEN 4 /* broken flock() emulation */
+#define FL_ACCESS 8 /* for processes suspended by mandatory locking */
+
+struct file_lock {
+ struct file_lock *fl_next; /* singly linked list for this inode */
+ struct file_lock *fl_nextlink; /* doubly linked list of all locks */
+ struct file_lock *fl_prevlink; /* used to simplify lock removal */
+ struct file_lock *fl_nextblock; /* circular list of blocked processes */
+ struct file_lock *fl_prevblock;
+ struct task_struct *fl_owner;
+ struct wait_queue *fl_wait;
+ struct file *fl_file;
+ unsigned char fl_flags;
+ unsigned char fl_type;
+ off_t fl_start;
+ off_t fl_end;
+};
+
+#include <linux/fcntl.h>
+
+extern int fcntl_getlk(unsigned int fd, struct flock *l);
+extern int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l);
+extern void locks_remove_locks(struct task_struct *task, struct file *filp);
+
+#include <linux/stat.h>
+
+#define FLOCK_VERIFY_READ 1
+#define FLOCK_VERIFY_WRITE 2
+
+extern int locks_mandatory_locked(struct inode *inode);
+extern int locks_mandatory_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count);
+
+#ifndef MACH
+extern inline int locks_verify_locked(struct inode *inode)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_locked(inode));
+ return (0);
+}
+extern inline int locks_verify_area(int read_write, struct inode *inode,
+ struct file *filp, unsigned int offset,
+ unsigned int count)
+{
+ /* Candidates for mandatory locking have the setgid bit set
+ * but no group execute bit - an otherwise meaningless combination.
+ */
+ if (IS_MANDLOCK(inode) &&
+ (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID)
+ return (locks_mandatory_area(read_write, inode, filp, offset,
+ count));
+ return (0);
+}
+#endif
+
+struct fasync_struct {
+ int magic;
+ struct fasync_struct *fa_next; /* singly linked list */
+ struct file *fa_file;
+};
+
+#define FASYNC_MAGIC 0x4601
+
+extern int fasync_helper(struct inode *, struct file *, int, struct fasync_struct **);
+
+#ifndef MACH
+#include <linux/minix_fs_sb.h>
+#include <linux/ext_fs_sb.h>
+#include <linux/ext2_fs_sb.h>
+#include <linux/hpfs_fs_sb.h>
+#include <linux/msdos_fs_sb.h>
+#include <linux/iso_fs_sb.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/xia_fs_sb.h>
+#include <linux/sysv_fs_sb.h>
+#include <linux/affs_fs_sb.h>
+#include <linux/ufs_fs_sb.h>
+
+struct super_block {
+ kdev_t s_dev;
+ unsigned long s_blocksize;
+ unsigned char s_blocksize_bits;
+ unsigned char s_lock;
+ unsigned char s_rd_only;
+ unsigned char s_dirt;
+ struct file_system_type *s_type;
+ struct super_operations *s_op;
+ struct dquot_operations *dq_op;
+ unsigned long s_flags;
+ unsigned long s_magic;
+ unsigned long s_time;
+ struct inode * s_covered;
+ struct inode * s_mounted;
+ struct wait_queue * s_wait;
+ union {
+ struct minix_sb_info minix_sb;
+ struct ext_sb_info ext_sb;
+ struct ext2_sb_info ext2_sb;
+ struct hpfs_sb_info hpfs_sb;
+ struct msdos_sb_info msdos_sb;
+ struct isofs_sb_info isofs_sb;
+ struct nfs_sb_info nfs_sb;
+ struct xiafs_sb_info xiafs_sb;
+ struct sysv_sb_info sysv_sb;
+ struct affs_sb_info affs_sb;
+ struct ufs_sb_info ufs_sb;
+ void *generic_sbp;
+ } u;
+};
+#endif /* !MACH */
+
+/*
+ * This is the "filldir" function type, used by readdir() to let
+ * the kernel specify what kind of dirent layout it wants to have.
+ * This allows the kernel to read directories into kernel space or
+ * to have different dirent layouts depending on the binary type.
+ */
+typedef int (*filldir_t)(void *, const char *, int, off_t, ino_t);
+
+struct file_operations {
+ int (*lseek) (struct inode *, struct file *, off_t, int);
+ int (*read) (struct inode *, struct file *, char *, int);
+ int (*write) (struct inode *, struct file *, const char *, int);
+ int (*readdir) (struct inode *, struct file *, void *, filldir_t);
+ int (*select) (struct inode *, struct file *, int, select_table *);
+ int (*ioctl) (struct inode *, struct file *, unsigned int, unsigned long);
+ int (*mmap) (struct inode *, struct file *, struct vm_area_struct *);
+ int (*open) (struct inode *, struct file *);
+ void (*release) (struct inode *, struct file *);
+ int (*fsync) (struct inode *, struct file *);
+ int (*fasync) (struct inode *, struct file *, int);
+ int (*check_media_change) (kdev_t dev);
+ int (*revalidate) (kdev_t dev);
+};
+
+struct inode_operations {
+ struct file_operations * default_file_ops;
+ int (*create) (struct inode *,const char *,int,int,struct inode **);
+ int (*lookup) (struct inode *,const char *,int,struct inode **);
+ int (*link) (struct inode *,struct inode *,const char *,int);
+ int (*unlink) (struct inode *,const char *,int);
+ int (*symlink) (struct inode *,const char *,int,const char *);
+ int (*mkdir) (struct inode *,const char *,int,int);
+ int (*rmdir) (struct inode *,const char *,int);
+ int (*mknod) (struct inode *,const char *,int,int,int);
+ int (*rename) (struct inode *,const char *,int,struct inode *,const char *,int, int);
+ int (*readlink) (struct inode *,char *,int);
+ int (*follow_link) (struct inode *,struct inode *,int,int,struct inode **);
+ int (*readpage) (struct inode *, struct page *);
+ int (*writepage) (struct inode *, struct page *);
+ int (*bmap) (struct inode *,int);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
+ int (*smap) (struct inode *,int);
+};
+
+struct super_operations {
+ void (*read_inode) (struct inode *);
+ int (*notify_change) (struct inode *, struct iattr *);
+ void (*write_inode) (struct inode *);
+ void (*put_inode) (struct inode *);
+ void (*put_super) (struct super_block *);
+ void (*write_super) (struct super_block *);
+ void (*statfs) (struct super_block *, struct statfs *, int);
+ int (*remount_fs) (struct super_block *, int *, char *);
+};
+
+struct dquot_operations {
+ void (*initialize) (struct inode *, short);
+ void (*drop) (struct inode *);
+ int (*alloc_block) (const struct inode *, unsigned long);
+ int (*alloc_inode) (const struct inode *, unsigned long);
+ void (*free_block) (const struct inode *, unsigned long);
+ void (*free_inode) (const struct inode *, unsigned long);
+ int (*transfer) (struct inode *, struct iattr *, char);
+};
+
+struct file_system_type {
+ struct super_block *(*read_super) (struct super_block *, void *, int);
+ const char *name;
+ int requires_dev;
+ struct file_system_type * next;
+};
+
+extern int register_filesystem(struct file_system_type *);
+extern int unregister_filesystem(struct file_system_type *);
+
+asmlinkage int sys_open(const char *, int, int);
+asmlinkage int sys_close(unsigned int); /* yes, it's really unsigned */
+asmlinkage int sys_read(unsigned int, char *, int);
+
+extern void kill_fasync(struct fasync_struct *fa, int sig);
+
+extern int getname(const char * filename, char **result);
+extern void putname(char * name);
+extern int do_truncate(struct inode *, unsigned long);
+extern int register_blkdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_blkdev(unsigned int major, const char * name);
+extern int blkdev_open(struct inode * inode, struct file * filp);
+extern void blkdev_release (struct inode * inode);
+extern struct file_operations def_blk_fops;
+extern struct inode_operations blkdev_inode_operations;
+
+extern int register_chrdev(unsigned int, const char *, struct file_operations *);
+extern int unregister_chrdev(unsigned int major, const char * name);
+extern int chrdev_open(struct inode * inode, struct file * filp);
+extern struct file_operations def_chr_fops;
+extern struct inode_operations chrdev_inode_operations;
+
+extern void init_fifo(struct inode * inode);
+extern struct inode_operations fifo_inode_operations;
+
+extern struct file_operations connecting_fifo_fops;
+extern struct file_operations read_fifo_fops;
+extern struct file_operations write_fifo_fops;
+extern struct file_operations rdwr_fifo_fops;
+extern struct file_operations read_pipe_fops;
+extern struct file_operations write_pipe_fops;
+extern struct file_operations rdwr_pipe_fops;
+
+extern struct file_system_type *get_fs_type(const char *name);
+
+extern int fs_may_mount(kdev_t dev);
+extern int fs_may_umount(kdev_t dev, struct inode * mount_root);
+extern int fs_may_remount_ro(kdev_t dev);
+
+extern struct file *first_file;
+extern struct super_block super_blocks[NR_SUPER];
+
+extern void refile_buffer(struct buffer_head * buf);
+extern void set_writetime(struct buffer_head * buf, int flag);
+extern int try_to_free_buffer(struct buffer_head*, struct buffer_head**, int);
+
+extern int nr_buffers;
+extern int buffermem;
+extern int nr_buffer_heads;
+
+#define BUF_CLEAN 0
+#define BUF_LOCKED 1 /* Buffers scheduled for write */
+#define BUF_LOCKED1 2 /* Supers, inodes */
+#define BUF_DIRTY 3 /* Dirty buffers, not yet scheduled for write */
+#define NR_LIST 4
+
+#ifdef MACH
+extern inline void
+mark_buffer_uptodate (struct buffer_head *bh, int on)
+{
+ if (on)
+ set_bit (BH_Uptodate, &bh->b_state);
+ else
+ clear_bit (BH_Uptodate, &bh->b_state);
+}
+#else
+void mark_buffer_uptodate(struct buffer_head * bh, int on);
+#endif
+
+extern inline void mark_buffer_clean(struct buffer_head * bh)
+{
+#ifdef MACH
+ clear_bit (BH_Dirty, &bh->b_state);
+#else
+ if (clear_bit(BH_Dirty, &bh->b_state)) {
+ if (bh->b_list == BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+{
+#ifdef MACH
+ set_bit (BH_Dirty, &bh->b_state);
+#else
+ if (!set_bit(BH_Dirty, &bh->b_state)) {
+ set_writetime(bh, flag);
+ if (bh->b_list != BUF_DIRTY)
+ refile_buffer(bh);
+ }
+#endif
+}
+
+extern int check_disk_change(kdev_t dev);
+
+#ifdef MACH
+#define invalidate_inodes(dev)
+#else
+extern void invalidate_inodes(kdev_t dev);
+#endif
+
+extern void invalidate_inode_pages(struct inode *);
+
+#ifdef MACH
+#define invalidate_buffers(dev)
+#else
+extern void invalidate_buffers(kdev_t dev);
+#endif
+
+extern int floppy_is_wp(int minor);
+extern void sync_inodes(kdev_t dev);
+
+#ifdef MACH
+#define sync_dev(dev)
+#define fsync_dev(dev)
+#else
+extern void sync_dev(kdev_t dev);
+extern int fsync_dev(kdev_t dev);
+#endif
+
+extern void sync_supers(kdev_t dev);
+extern int bmap(struct inode * inode,int block);
+extern int notify_change(struct inode *, struct iattr *);
+extern int namei(const char * pathname, struct inode ** res_inode);
+extern int lnamei(const char * pathname, struct inode ** res_inode);
+
+#ifdef MACH
+#define permission(inode, mask) 0
+#else
+extern int permission(struct inode * inode,int mask);
+#endif
+
+extern int get_write_access(struct inode *inode);
+extern void put_write_access(struct inode *inode);
+extern int open_namei(const char * pathname, int flag, int mode,
+ struct inode ** res_inode, struct inode * base);
+extern int do_mknod(const char * filename, int mode, dev_t dev);
+extern int do_pipe(int *);
+extern void iput(struct inode * inode);
+extern struct inode * __iget(struct super_block * sb,int nr,int crsmnt);
+extern struct inode * get_empty_inode(void);
+extern void insert_inode_hash(struct inode *);
+extern void clear_inode(struct inode *);
+extern struct inode * get_pipe_inode(void);
+extern void make_bad_inode(struct inode *);
+extern int get_unused_fd(void);
+extern void put_unused_fd(int);
+extern struct file * get_empty_filp(void);
+extern int close_fp(struct file *filp);
+extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
+extern struct buffer_head * getblk(kdev_t dev, int block, int size);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[]);
+extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
+extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
+extern int is_read_only(kdev_t dev);
+extern void __brelse(struct buffer_head *buf);
+extern inline void brelse(struct buffer_head *buf)
+{
+ if (buf)
+ __brelse(buf);
+}
+extern void __bforget(struct buffer_head *buf);
+extern inline void bforget(struct buffer_head *buf)
+{
+ if (buf)
+ __bforget(buf);
+}
+extern void set_blocksize(kdev_t dev, int size);
+extern struct buffer_head * bread(kdev_t dev, int block, int size);
+extern struct buffer_head * breada(kdev_t dev,int block, int size,
+ unsigned int pos, unsigned int filesize);
+
+extern int generic_readpage(struct inode *, struct page *);
+extern int generic_file_read(struct inode *, struct file *, char *, int);
+extern int generic_file_mmap(struct inode *, struct file *, struct vm_area_struct *);
+extern int brw_page(int, struct page *, kdev_t, int [], int, int);
+
+extern void put_super(kdev_t dev);
+unsigned long generate_cluster(kdev_t dev, int b[], int size);
+extern kdev_t ROOT_DEV;
+
+extern void show_buffers(void);
+extern void mount_root(void);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+extern kdev_t real_root_dev;
+extern int change_root(kdev_t new_root_dev,const char *put_old);
+#endif
+
+extern int char_read(struct inode *, struct file *, char *, int);
+extern int block_read(struct inode *, struct file *, char *, int);
+extern int read_ahead[];
+
+extern int char_write(struct inode *, struct file *, const char *, int);
+extern int block_write(struct inode *, struct file *, const char *, int);
+
+extern int block_fsync(struct inode *, struct file *);
+extern int file_fsync(struct inode *, struct file *);
+
+extern void dcache_add(struct inode *, const char *, int, unsigned long);
+extern int dcache_lookup(struct inode *, const char *, int, unsigned long *);
+
+extern int inode_change_ok(struct inode *, struct iattr *);
+extern void inode_setattr(struct inode *, struct iattr *);
+
+extern inline struct inode * iget(struct super_block * sb,int nr)
+{
+ return __iget(sb, nr, 1);
+}
+
+/* kludge to get SCSI modules working */
+#ifndef MACH
+#include <linux/minix_fs.h>
+#include <linux/minix_fs_sb.h>
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/genhd.h b/linux/dev/include/linux/genhd.h
new file mode 100644
index 0000000..20a6c97
--- /dev/null
+++ b/linux/dev/include/linux/genhd.h
@@ -0,0 +1,141 @@
+#ifndef _LINUX_GENHD_H
+#define _LINUX_GENHD_H
+
+/*
+ * genhd.h Copyright (C) 1992 Drew Eckhardt
+ * Generic hard disk header file by
+ * Drew Eckhardt
+ *
+ * <drew@colorado.edu>
+ */
+
+#include <linux/config.h>
+
+#define CONFIG_MSDOS_PARTITION 1
+
+#ifdef __alpha__
+#define CONFIG_OSF_PARTITION 1
+#endif
+
+#if defined(__sparc__) || defined(CONFIG_SMD_DISKLABEL)
+#define CONFIG_SUN_PARTITION 1
+#endif
+
+/* These three have identical behaviour; use the second one if DOS fdisk gets
+ confused about extended/logical partitions starting past cylinder 1023. */
+#define DOS_EXTENDED_PARTITION 5
+#define LINUX_EXTENDED_PARTITION 0x85
+#define WIN98_EXTENDED_PARTITION 0x0f
+
+#define DM6_PARTITION 0x54 /* has DDO: use xlated geom & offset */
+#define EZD_PARTITION 0x55 /* EZ-DRIVE: same as DM6 (we think) */
+#define DM6_AUX1PARTITION 0x51 /* no DDO: use xlated geom */
+#define DM6_AUX3PARTITION 0x53 /* no DDO: use xlated geom */
+
+#ifdef MACH_INCLUDE
+struct linux_partition
+{
+#else
+struct partition {
+#endif
+ unsigned char boot_ind; /* 0x80 - active */
+ unsigned char head; /* starting head */
+ unsigned char sector; /* starting sector */
+ unsigned char cyl; /* starting cylinder */
+ unsigned char sys_ind; /* What partition type */
+ unsigned char end_head; /* end head */
+ unsigned char end_sector; /* end sector */
+ unsigned char end_cyl; /* end cylinder */
+ unsigned int start_sect; /* starting sector counting from 0 */
+ unsigned int nr_sects; /* nr of sectors in partition */
+} __attribute((packed)); /* Give a polite hint to egcs/alpha to generate
+ unaligned operations */
+
+struct hd_struct {
+ long start_sect;
+ long nr_sects;
+};
+
+struct gendisk {
+ int major; /* major number of driver */
+ const char *major_name; /* name of major driver */
+ int minor_shift; /* number of times minor is shifted to
+ get real minor */
+ int max_p; /* maximum partitions per device */
+ int max_nr; /* maximum number of real devices */
+
+ void (*init)(struct gendisk *); /* Initialization called before we do our thing */
+ struct hd_struct *part; /* partition table */
+ int *sizes; /* device size in blocks, copied to blk_size[] */
+ int nr_real; /* number of real devices */
+
+ void *real_devices; /* internal use */
+ struct gendisk *next;
+};
+
+#ifdef CONFIG_BSD_DISKLABEL
+/*
+ * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
+ */
+
+#define BSD_PARTITION 0xa5 /* Partition ID */
+
+#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
+#define BSD_MAXPARTITIONS 8
+#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
+struct bsd_disklabel {
+ __u32 d_magic; /* the magic number */
+ __s16 d_type; /* drive type */
+ __s16 d_subtype; /* controller/d_type specific */
+ char d_typename[16]; /* type name, e.g. "eagle" */
+ char d_packname[16]; /* pack identifier */
+ __u32 d_secsize; /* # of bytes per sector */
+ __u32 d_nsectors; /* # of data sectors per track */
+ __u32 d_ntracks; /* # of tracks per cylinder */
+ __u32 d_ncylinders; /* # of data cylinders per unit */
+ __u32 d_secpercyl; /* # of data sectors per cylinder */
+ __u32 d_secperunit; /* # of data sectors per unit */
+ __u16 d_sparespertrack; /* # of spare sectors per track */
+ __u16 d_sparespercyl; /* # of spare sectors per cylinder */
+ __u32 d_acylinders; /* # of alt. cylinders per unit */
+ __u16 d_rpm; /* rotational speed */
+ __u16 d_interleave; /* hardware sector interleave */
+ __u16 d_trackskew; /* sector 0 skew, per track */
+ __u16 d_cylskew; /* sector 0 skew, per cylinder */
+ __u32 d_headswitch; /* head switch time, usec */
+ __u32 d_trkseek; /* track-to-track seek, usec */
+ __u32 d_flags; /* generic flags */
+#define NDDATA 5
+ __u32 d_drivedata[NDDATA]; /* drive-type specific information */
+#define NSPARE 5
+ __u32 d_spare[NSPARE]; /* reserved for future use */
+ __u32 d_magic2; /* the magic number (again) */
+ __u16 d_checksum; /* xor of data incl. partitions */
+
+ /* filesystem and partition information: */
+ __u16 d_npartitions; /* number of partitions in following */
+ __u32 d_bbsize; /* size of boot area at sn0, bytes */
+ __u32 d_sbsize; /* max size of fs superblock, bytes */
+ struct bsd_partition { /* the partition table */
+ __u32 p_size; /* number of sectors in partition */
+ __u32 p_offset; /* starting sector */
+ __u32 p_fsize; /* filesystem basic fragment size */
+ __u8 p_fstype; /* filesystem type, see below */
+ __u8 p_frag; /* filesystem fragments per block */
+ __u16 p_cpg; /* filesystem cylinders per group */
+ } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
+};
+
+#endif /* CONFIG_BSD_DISKLABEL */
+
+extern struct gendisk *gendisk_head; /* linked list of disks */
+
+/*
+ * disk_name() is used by genhd.c and md.c.
+ * It formats the devicename of the indicated disk
+ * into the supplied buffer, and returns a pointer
+ * to that same buffer (for convenience).
+ */
+char *disk_name (struct gendisk *hd, int minor, char *buf);
+
+#endif
diff --git a/linux/dev/include/linux/if.h b/linux/dev/include/linux/if.h
new file mode 100644
index 0000000..50dd138
--- /dev/null
+++ b/linux/dev/include/linux/if.h
@@ -0,0 +1,184 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Global definitions for the INET interface module.
+ *
+ * Version: @(#)if.h 1.0.2 04/18/93
+ *
+ * Authors: Original taken from Berkeley UNIX 4.3, (c) UCB 1982-1988
+ * Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _LINUX_IF_H
+#define _LINUX_IF_H
+
+#include <linux/types.h> /* for "caddr_t" et al */
+#include <linux/socket.h> /* for "struct sockaddr" et al */
+
+/* Standard interface flags. */
+
+#ifdef MACH_INCLUDE
+
+#define LINUX_IFF_UP 0x1 /* interface is up */
+#define LINUX_IFF_BROADCAST 0x2 /* broadcast address valid */
+#define LINUX_IFF_DEBUG 0x4 /* turn on debugging */
+#define LINUX_IFF_LOOPBACK 0x8 /* is a loopback net */
+#define LINUX_IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define LINUX_IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define LINUX_IFF_RUNNING 0x40 /* resources allocated */
+#define LINUX_IFF_NOARP 0x80 /* no ARP protocol */
+#define LINUX_IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define LINUX_IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define LINUX_IFF_MASTER 0x400 /* master of a load balancer */
+#define LINUX_IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define LINUX_IFF_MULTICAST 0x1000 /* Supports multicast */
+#define LINUX_IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+
+#else /* !MACH_INCLUDE */
+
+#define IFF_UP 0x1 /* interface is up */
+#define IFF_BROADCAST 0x2 /* broadcast address valid */
+#define IFF_DEBUG 0x4 /* turn on debugging */
+#define IFF_LOOPBACK 0x8 /* is a loopback net */
+#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
+#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
+#define IFF_RUNNING 0x40 /* resources allocated */
+#define IFF_NOARP 0x80 /* no ARP protocol */
+#define IFF_PROMISC 0x100 /* receive all packets */
+/* Not supported */
+#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
+
+#define IFF_MASTER 0x400 /* master of a load balancer */
+#define IFF_SLAVE 0x800 /* slave of a load balancer */
+
+#define IFF_MULTICAST 0x1000 /* Supports multicast */
+#define IFF_SOFTHEADERS 0x2000 /* Device cannot construct headers
+ * until broadcast time. Therefore
+ * SOCK_PACKET must call header
+ * construction. Private flag.
+ * Never visible outside of kernel.
+ */
+#endif /* !MACH_INCLUDE */
+
+/*
+ * The ifaddr structure contains information about one address
+ * of an interface. They are maintained by the different address
+ * families, are allocated and attached when an address is set,
+ * and are linked together so all addresses for an interface can
+ * be located.
+ */
+
+struct ifaddr
+{
+ struct sockaddr ifa_addr; /* address of interface */
+ union {
+ struct sockaddr ifu_broadaddr;
+ struct sockaddr ifu_dstaddr;
+ } ifa_ifu;
+ struct iface *ifa_ifp; /* back-pointer to interface */
+ struct ifaddr *ifa_next; /* next address for interface */
+};
+
+#define ifa_broadaddr ifa_ifu.ifu_broadaddr /* broadcast address */
+#define ifa_dstaddr ifa_ifu.ifu_dstaddr /* other end of link */
+
+/*
+ * Device mapping structure. I'd just gone off and designed a
+ * beautiful scheme using only loadable modules with arguments
+ * for driver options and along come the PCMCIA people 8)
+ *
+ * Ah well. The get() side of this is good for WDSETUP, and it'll
+ * be handy for debugging things. The set side is fine for now and
+ * being very small might be worth keeping for clean configuration.
+ */
+
+struct ifmap
+{
+ unsigned long mem_start;
+ unsigned long mem_end;
+ unsigned short base_addr;
+ unsigned char irq;
+ unsigned char dma;
+ unsigned char port;
+ /* 3 bytes spare */
+};
+
+/*
+ * Interface request structure used for socket
+ * ioctl's. All interface ioctl's must have parameter
+ * definitions which begin with ifr_name. The
+ * remainder may be interface specific.
+ */
+
+struct ifreq
+{
+#define IFHWADDRLEN 6
+#define IFNAMSIZ 16
+ union
+ {
+ char ifrn_name[IFNAMSIZ]; /* if name, e.g. "en0" */
+ } ifr_ifrn;
+
+ union {
+ struct sockaddr ifru_addr;
+ struct sockaddr ifru_dstaddr;
+ struct sockaddr ifru_broadaddr;
+ struct sockaddr ifru_netmask;
+ struct sockaddr ifru_hwaddr;
+ short ifru_flags;
+ int ifru_metric;
+ int ifru_mtu;
+ struct ifmap ifru_map;
+ char ifru_slave[IFNAMSIZ]; /* Just fits the size */
+ caddr_t ifru_data;
+ } ifr_ifru;
+};
+
+#define ifr_name ifr_ifrn.ifrn_name /* interface name */
+#define ifr_hwaddr ifr_ifru.ifru_hwaddr /* MAC address */
+#define ifr_addr ifr_ifru.ifru_addr /* address */
+#define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-p lnk */
+#define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */
+#define ifr_netmask ifr_ifru.ifru_netmask /* interface net mask */
+#define ifr_flags ifr_ifru.ifru_flags /* flags */
+#define ifr_metric ifr_ifru.ifru_metric /* metric */
+#define ifr_mtu ifr_ifru.ifru_mtu /* mtu */
+#define ifr_map ifr_ifru.ifru_map /* device map */
+#define ifr_slave ifr_ifru.ifru_slave /* slave device */
+#define ifr_data ifr_ifru.ifru_data /* for use by interface */
+
+/*
+ * Structure used in SIOCGIFCONF request.
+ * Used to retrieve interface configuration
+ * for machine (useful for programs which
+ * must know all networks accessible).
+ */
+
+struct ifconf
+{
+ int ifc_len; /* size of buffer */
+ union
+ {
+ caddr_t ifcu_buf;
+ struct ifreq *ifcu_req;
+ } ifc_ifcu;
+};
+#define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */
+#define ifc_req ifc_ifcu.ifcu_req /* array of structures */
+
+#endif /* _LINUX_IF_H */
diff --git a/linux/dev/include/linux/kernel.h b/linux/dev/include/linux/kernel.h
new file mode 100644
index 0000000..aef73ac
--- /dev/null
+++ b/linux/dev/include/linux/kernel.h
@@ -0,0 +1,107 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+
+/* Optimization barrier */
+#define barrier() __asm__("": : :"memory")
+
+#define INT_MAX ((int)(~0U>>1))
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define ULONG_MAX (~0UL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+# define NORET_TYPE /**/
+# define ATTRIB_NORET __attribute__((noreturn))
+# define NORET_AND noreturn,
+
+extern void math_error(void);
+NORET_TYPE void panic(const char * fmt, ...)
+ __attribute__ ((NORET_AND format (printf, 1, 2)));
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+
+extern int linux_sprintf(char *buf, const char *fmt, ...);
+extern int linux_vsprintf(char *buf, const char *fmt, va_list args);
+
+#ifndef MACH_INCLUDE
+#define sprintf linux_sprintf
+#define vsprintf linux_vsprintf
+#endif
+
+extern int session_of_pgrp(int pgrp);
+
+extern int kill_proc(int pid, int sig, int priv);
+extern int kill_pg(int pgrp, int sig, int priv);
+extern int kill_sl(int sess, int sig, int priv);
+
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
+
+#if DEBUG
+#define pr_debug(fmt,arg...) \
+ printk(KERN_DEBUG fmt,##arg)
+#else
+#define pr_debug(fmt,arg...) \
+ do { } while (0)
+#endif
+
+#define pr_info(fmt,arg...) \
+ printk(KERN_INFO fmt,##arg)
+
+/*
+ * "suser()" checks against the effective user id, while "fsuser()"
+ * is used for file permission checking and checks against the fsuid..
+ */
+#ifdef MACH
+#define fsuser() 1
+#else
+#define fsuser() (current->fsuid == 0)
+#endif
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ (((addr) >> 0) & 0xff), \
+ (((addr) >> 8) & 0xff), \
+ (((addr) >> 16) & 0xff), \
+ (((addr) >> 24) & 0xff)
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ char _f[22]; /* Pads structure to 64 bytes */
+};
+
+#endif
diff --git a/linux/dev/include/linux/locks.h b/linux/dev/include/linux/locks.h
new file mode 100644
index 0000000..72cf108
--- /dev/null
+++ b/linux/dev/include/linux/locks.h
@@ -0,0 +1,66 @@
+#ifndef _LINUX_LOCKS_H
+#define _LINUX_LOCKS_H
+
+#ifndef _LINUX_MM_H
+#include <linux/mm.h>
+#endif
+#ifndef _LINUX_PAGEMAP_H
+#include <linux/pagemap.h>
+#endif
+
+/*
+ * Unlocked, temporary IO buffer_heads gets moved to the reuse_list
+ * once their page becomes unlocked.
+ */
+extern struct buffer_head *reuse_list;
+
+/*
+ * Buffer cache locking - note that interrupts may only unlock, not
+ * lock buffers.
+ */
+extern void __wait_on_buffer(struct buffer_head *);
+
+extern inline void wait_on_buffer(struct buffer_head * bh)
+{
+ if (test_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+extern inline void lock_buffer(struct buffer_head * bh)
+{
+ while (set_bit(BH_Lock, &bh->b_state))
+ __wait_on_buffer(bh);
+}
+
+void unlock_buffer(struct buffer_head *);
+
+#ifndef MACH
+/*
+ * super-block locking. Again, interrupts may only unlock
+ * a super-block (although even this isn't done right now.
+ * nfs may need it).
+ */
+extern void __wait_on_super(struct super_block *);
+
+extern inline void wait_on_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+}
+
+extern inline void lock_super(struct super_block * sb)
+{
+ if (sb->s_lock)
+ __wait_on_super(sb);
+ sb->s_lock = 1;
+}
+
+extern inline void unlock_super(struct super_block * sb)
+{
+ sb->s_lock = 0;
+ wake_up(&sb->s_wait);
+}
+#endif /* !MACH */
+
+#endif /* _LINUX_LOCKS_H */
+
diff --git a/linux/dev/include/linux/malloc.h b/linux/dev/include/linux/malloc.h
new file mode 100644
index 0000000..084aa05
--- /dev/null
+++ b/linux/dev/include/linux/malloc.h
@@ -0,0 +1,17 @@
+#ifndef _LINUX_MALLOC_H
+#define _LINUX_MALLOC_H
+
+#include <linux/mm.h>
+
+#ifndef MACH_INCLUDE
+#define kmalloc linux_kmalloc
+#define kfree linux_kfree
+#define kfree_s linux_kfree_s
+#endif
+
+extern void *linux_kmalloc (unsigned int size, int priority);
+extern void linux_kfree (void *obj);
+
+#define linux_kfree_s(a,b) linux_kfree(a)
+
+#endif /* _LINUX_MALLOC_H */
diff --git a/linux/dev/include/linux/mm.h b/linux/dev/include/linux/mm.h
new file mode 100644
index 0000000..0500e0c
--- /dev/null
+++ b/linux/dev/include/linux/mm.h
@@ -0,0 +1,377 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+
+#ifdef __KERNEL__
+
+#include <linux/string.h>
+
+extern unsigned long high_memory;
+
+#include <asm/page.h>
+#include <asm/atomic.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+extern int verify_area(int, const void *, unsigned long);
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+/*
+ * This struct defines a memory VMM memory area. There is one of these
+ * per VM-area/task. A VM area is any part of the process virtual memory
+ * space that has a special rule for the page-fault handlers (ie a shared
+ * library, the executable area etc).
+ */
+struct vm_area_struct {
+ struct mm_struct * vm_mm; /* VM area parameters */
+ unsigned long vm_start;
+ unsigned long vm_end;
+ pgprot_t vm_page_prot;
+ unsigned short vm_flags;
+/* AVL tree of VM areas per task, sorted by address */
+ short vm_avl_height;
+ struct vm_area_struct * vm_avl_left;
+ struct vm_area_struct * vm_avl_right;
+/* linked list of VM areas per task, sorted by address */
+ struct vm_area_struct * vm_next;
+/* for areas with inode, the circular list inode->i_mmap */
+/* for shm areas, the circular list of attaches */
+/* otherwise unused */
+ struct vm_area_struct * vm_next_share;
+ struct vm_area_struct * vm_prev_share;
+/* more */
+ struct vm_operations_struct * vm_ops;
+ unsigned long vm_offset;
+ struct inode * vm_inode;
+ unsigned long vm_pte; /* shared mem */
+};
+
+/*
+ * vm_flags..
+ */
+#define VM_READ 0x0001 /* currently active flags */
+#define VM_WRITE 0x0002
+#define VM_EXEC 0x0004
+#define VM_SHARED 0x0008
+
+#define VM_MAYREAD 0x0010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x0020
+#define VM_MAYEXEC 0x0040
+#define VM_MAYSHARE 0x0080
+
+#define VM_GROWSDOWN 0x0100 /* general info on the segment */
+#define VM_GROWSUP 0x0200
+#define VM_SHM 0x0400 /* shared memory area, don't swap out */
+#define VM_DENYWRITE 0x0800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x1000
+#define VM_LOCKED 0x2000
+
+#define VM_STACK_FLAGS 0x0177
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ void (*unmap)(struct vm_area_struct *area, unsigned long, size_t);
+ void (*protect)(struct vm_area_struct *area, unsigned long, size_t, unsigned int newprot);
+ int (*sync)(struct vm_area_struct *area, unsigned long, size_t, unsigned int flags);
+ void (*advise)(struct vm_area_struct *area, unsigned long, size_t, unsigned int advise);
+ unsigned long (*nopage)(struct vm_area_struct * area, unsigned long address, int write_access);
+ unsigned long (*wppage)(struct vm_area_struct * area, unsigned long address,
+ unsigned long page);
+ int (*swapout)(struct vm_area_struct *, unsigned long, pte_t *);
+ pte_t (*swapin)(struct vm_area_struct *, unsigned long, unsigned long);
+};
+
+/*
+ * Try to keep the most commonly accessed fields in single cache lines
+ * here (16 bytes or greater). This ordering should be particularly
+ * beneficial on 32-bit processors.
+ *
+ * The first line is data used in page cache lookup, the second line
+ * is used for linear searches (eg. clock algorithm scans).
+ */
+typedef struct page {
+ /* these must be first (free area handling) */
+ struct page *next;
+ struct page *prev;
+ struct inode *inode;
+ unsigned long offset;
+ struct page *next_hash;
+ atomic_t count;
+ unsigned flags; /* atomic flags, some possibly updated asynchronously */
+ unsigned dirty:16,
+ age:8;
+ struct wait_queue *wait;
+ struct page *prev_hash;
+ struct buffer_head * buffers;
+ unsigned long swap_unlock_entry;
+ unsigned long map_nr; /* page->map_nr == page - mem_map */
+} mem_map_t;
+
+/* Page flag bit values */
+#define PG_locked 0
+#define PG_error 1
+#define PG_referenced 2
+#define PG_uptodate 3
+#define PG_free_after 4
+#define PG_decr_after 5
+#define PG_swap_unlock_after 6
+#define PG_DMA 7
+#define PG_reserved 31
+
+/* Make it prettier to test the above... */
+#define PageLocked(page) (test_bit(PG_locked, &(page)->flags))
+#define PageError(page) (test_bit(PG_error, &(page)->flags))
+#define PageReferenced(page) (test_bit(PG_referenced, &(page)->flags))
+#define PageDirty(page) (test_bit(PG_dirty, &(page)->flags))
+#define PageUptodate(page) (test_bit(PG_uptodate, &(page)->flags))
+#define PageFreeAfter(page) (test_bit(PG_free_after, &(page)->flags))
+#define PageDecrAfter(page) (test_bit(PG_decr_after, &(page)->flags))
+#define PageSwapUnlockAfter(page) (test_bit(PG_swap_unlock_after, &(page)->flags))
+#define PageDMA(page) (test_bit(PG_DMA, &(page)->flags))
+#define PageReserved(page) (test_bit(PG_reserved, &(page)->flags))
+
+/*
+ * page->reserved denotes a page which must never be accessed (which
+ * may not even be present).
+ *
+ * page->dma is set for those pages which lie in the range of
+ * physical addresses capable of carrying DMA transfers.
+ *
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page->count denotes a reference count.
+ * page->count == 0 means the page is free.
+ * page->count == 1 means the page is used for exactly one purpose
+ * (e.g. a private data page of one process).
+ *
+ * A page may be used for kmalloc() or anyone else who does a
+ * get_free_page(). In this case the page->count is at least 1, and
+ * all other fields are unused but should be 0 or NULL. The
+ * management of this page is the responsibility of the one who uses
+ * it.
+ *
+ * The other pages (we may call them "process pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A page may belong to an inode's memory mapping. In this case,
+ * page->inode is the inode, and page->offset is the file offset
+ * of the page (not necessarily a multiple of PAGE_SIZE).
+ *
+ * A page may have buffers allocated to it. In this case,
+ * page->buffers is a circular list of these buffer heads. Else,
+ * page->buffers == NULL.
+ *
+ * For pages belonging to inodes, the page->count is the number of
+ * attaches, plus 1 if buffers are allocated to the page.
+ *
+ * All pages belonging to an inode make up a doubly linked list
+ * inode->i_pages, using the fields page->next and page->prev. (These
+ * fields are also used for freelist management when page->count==0.)
+ * There is also a hash table mapping (inode,offset) to the page
+ * in memory if present. The lists for this hash table use the fields
+ * page->next_hash and page->prev_hash.
+ *
+ * All process pages can do I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written to disk,
+ * - private pages which have been modified may need to be swapped out
+ * to swap space and (later) to be read back into memory.
+ * During disk I/O, page->locked is true. This bit is set before I/O
+ * and reset when I/O completes. page->wait is a wait queue of all
+ * tasks waiting for the I/O on this page to complete.
+ * page->uptodate tells whether the page's contents is valid.
+ * When a read completes, the page becomes uptodate, unless a disk I/O
+ * error happened.
+ * When a write completes, and page->free_after is true, the page is
+ * freed without any further delay.
+ *
+ * For choosing which pages to swap out, inode pages carry a
+ * page->referenced bit, which is set any time the system accesses
+ * that page through the (inode,offset) hash table.
+ * There is also the page->age counter, which implements a linear
+ * decay (why not an exponential decay?), see swapctl.h.
+ */
+
+extern mem_map_t * mem_map;
+
+/*
+ * This is timing-critical - most of the time in getting a new page
+ * goes to clearing the page. If you want a page without the clearing
+ * overhead, just use __get_free_page() directly..
+ */
+#define __get_free_page(priority) __get_free_pages((priority),0,0)
+#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
+extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
+
+extern inline unsigned long get_free_page(int priority)
+{
+ unsigned long page;
+
+ page = __get_free_page(priority);
+ if (page)
+ memset((void *) page, 0, PAGE_SIZE);
+ return page;
+}
+
+/* memory.c & swap.c*/
+
+#define free_page(addr) free_pages((addr),0)
+extern void free_pages(unsigned long addr, unsigned long order);
+extern void __free_page(struct page *);
+
+extern void show_free_areas(void);
+extern unsigned long put_dirty_page(struct task_struct * tsk,unsigned long page,
+ unsigned long address);
+
+extern void free_page_tables(struct mm_struct * mm);
+extern void clear_page_tables(struct task_struct * tsk);
+extern int new_page_tables(struct task_struct * tsk);
+extern int copy_page_tables(struct task_struct * to);
+
+extern int zap_page_range(struct mm_struct *mm, unsigned long address, unsigned long size);
+extern int copy_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *vma);
+extern int remap_page_range(unsigned long from, unsigned long to, unsigned long size, pgprot_t prot);
+extern int zeromap_page_range(unsigned long from, unsigned long size, pgprot_t prot);
+
+extern void vmtruncate(struct inode * inode, unsigned long offset);
+extern void handle_mm_fault(struct vm_area_struct *vma, unsigned long address, int write_access);
+extern void do_wp_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+extern void do_no_page(struct task_struct * tsk, struct vm_area_struct * vma, unsigned long address, int write_access);
+
+extern unsigned long paging_init(unsigned long start_mem, unsigned long end_mem);
+extern void mem_init(unsigned long start_mem, unsigned long end_mem);
+extern void show_mem(void);
+extern void oom(struct task_struct * tsk);
+extern void si_meminfo(struct sysinfo * val);
+
+/* vmalloc.c */
+
+extern void * vmalloc(unsigned long size);
+extern void * vremap(unsigned long offset, unsigned long size);
+extern void vfree(void * addr);
+extern int vread(char *buf, char *addr, int count);
+
+/* mmap.c */
+extern unsigned long do_mmap(struct file * file, unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags, unsigned long off);
+extern void merge_segments(struct mm_struct *, unsigned long, unsigned long);
+extern void insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void remove_shared_vm_struct(struct vm_area_struct *);
+extern void build_mmap_avl(struct mm_struct *);
+extern void exit_mmap(struct mm_struct *);
+extern int do_munmap(unsigned long, size_t);
+extern unsigned long get_unmapped_area(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(unsigned long);
+extern int shrink_mmap(int, int, int);
+extern void truncate_inode_pages(struct inode *, unsigned long);
+
+#define GFP_BUFFER 0x00
+#define GFP_ATOMIC 0x01
+#define GFP_USER 0x02
+#define GFP_KERNEL 0x03
+#define GFP_NOBUFFER 0x04
+#define GFP_NFS 0x05
+#define GFP_IO 0x06
+
+/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
+ platforms, used as appropriate on others */
+
+#define GFP_DMA 0x80
+
+#define GFP_LEVEL_MASK 0xf
+
+#ifndef MACH
+/* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+{
+ unsigned long grow;
+
+ address &= PAGE_MASK;
+ grow = vma->vm_start - address;
+ if (vma->vm_end - address
+ > (unsigned long) current->rlim[RLIMIT_STACK].rlim_cur ||
+ (vma->vm_mm->total_vm << PAGE_SHIFT) + grow
+ > (unsigned long) current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+ vma->vm_start = address;
+ vma->vm_offset -= grow;
+ vma->vm_mm->total_vm += grow >> PAGE_SHIFT;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow >> PAGE_SHIFT;
+ return 0;
+}
+
+#define avl_empty (struct vm_area_struct *) NULL
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+static inline struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr)
+{
+ struct vm_area_struct * result = NULL;
+
+ if (mm) {
+ struct vm_area_struct * tree = mm->mmap_avl;
+ for (;;) {
+ if (tree == avl_empty)
+ break;
+ if (tree->vm_end > addr) {
+ result = tree;
+ if (tree->vm_start <= addr)
+ break;
+ tree = tree->vm_avl_left;
+ } else
+ tree = tree->vm_avl_right;
+ }
+ }
+ return result;
+}
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma;
+
+ vma = find_vma(mm,start_addr);
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+#endif /* !MACH */
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/netdevice.h b/linux/dev/include/linux/netdevice.h
new file mode 100644
index 0000000..ff25df1
--- /dev/null
+++ b/linux/dev/include/linux/netdevice.h
@@ -0,0 +1,338 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Definitions for the Interfaces handler.
+ *
+ * Version: @(#)dev.h 1.0.11 07/31/96
+ *
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Corey Minyard <wf-rch!minyard@relay.EU.net>
+ * Donald J. Becker, <becker@super.org>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ * Bjorn Ekwall. <bj0rn@blox.se>
+ * Lawrence V. Stefani, <stefani@lkg.dec.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Moved to /usr/include/linux for NET3
+ * Added extern for fddi_setup()
+ */
+#ifndef _LINUX_NETDEVICE_H
+#define _LINUX_NETDEVICE_H
+
+#include <linux/config.h>
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+/* for future expansion when we will have different priorities. */
+#define DEV_NUMBUFFS 3
+#define MAX_ADDR_LEN 7
+#ifndef CONFIG_AX25
+#ifndef CONFIG_TR
+#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE)
+#define MAX_HEADER 32 /* We really need about 18 worst case .. so 32 is aligned */
+#else
+#define MAX_HEADER 80 /* We need to allow for having tunnel headers */
+#endif /* IPIP */
+#else
+#define MAX_HEADER 48 /* Token Ring header needs 40 bytes ... 48 is aligned */
+#endif /* TR */
+#else
+#define MAX_HEADER 96 /* AX.25 + NetROM */
+#endif /* AX25 */
+
+#define IS_MYADDR 1 /* address is (one of) our own */
+#define IS_LOOPBACK 2 /* address is for LOOPBACK */
+#define IS_BROADCAST 3 /* address is a valid broadcast */
+#define IS_INVBCAST 4 /* Wrong netmask bcast not for us (unused)*/
+#define IS_MULTICAST 5 /* Multicast IP address */
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+/*
+ * We tag multicasts with these structures.
+ */
+
+struct dev_mc_list
+{
+ struct dev_mc_list *next;
+ char dmi_addr[MAX_ADDR_LEN];
+ unsigned short dmi_addrlen;
+ unsigned short dmi_users;
+};
+
+struct hh_cache
+{
+ struct hh_cache *hh_next;
+ void *hh_arp; /* Opaque pointer, used by
+ * any address resolution module,
+ * not only ARP.
+ */
+ int hh_refcnt; /* number of users */
+ unsigned short hh_type; /* protocol identifier, f.e ETH_P_IP */
+ char hh_uptodate; /* hh_data is valid */
+ char hh_data[16]; /* cached hardware header */
+};
+
+/*
+ * The DEVICE structure.
+ * Actually, this whole structure is a big mistake. It mixes I/O
+ * data with strictly "high-level" data, and it has to know about
+ * almost every data structure used in the INET module.
+ */
+
+#ifdef MACH
+
+#ifndef MACH_INCLUDE
+#define device linux_device
+#endif
+
+struct linux_device
+
+#else
+
+struct device
+
+#endif
+{
+
+ /*
+ * This is the first field of the "visible" part of this structure
+ * (i.e. as seen by users in the "Space.c" file). It is the name
+ * the interface.
+ */
+ char *name;
+
+ /* I/O specific fields - FIXME: Merge these and struct ifmap into one */
+ unsigned long rmem_end; /* shmem "recv" end */
+ unsigned long rmem_start; /* shmem "recv" start */
+ unsigned long mem_end; /* shared mem end */
+ unsigned long mem_start; /* shared mem start */
+ unsigned long base_addr; /* device I/O address */
+ unsigned char irq; /* device IRQ number */
+
+ /* Low-level status flags. */
+ volatile unsigned char start, /* start an operation */
+ interrupt; /* interrupt arrived */
+ unsigned long tbusy; /* transmitter busy must be long for bitops */
+
+ struct linux_device *next;
+
+ /* The device initialization function. Called only once. */
+ int (*init)(struct linux_device *dev);
+
+ /* Some hardware also needs these fields, but they are not part of the
+ usual set specified in Space.c. */
+ unsigned char if_port; /* Selectable AUI, TP,..*/
+ unsigned char dma; /* DMA channel */
+
+ struct enet_statistics* (*get_stats)(struct linux_device *dev);
+
+ /*
+ * This marks the end of the "visible" part of the structure. All
+ * fields hereafter are internal to the system, and may change at
+ * will (read: may be cleaned up at will).
+ */
+
+ /* These may be needed for future network-power-down code. */
+ unsigned long trans_start; /* Time (in jiffies) of last Tx */
+ unsigned long last_rx; /* Time of last Rx */
+
+ unsigned short flags; /* interface flags (a la BSD) */
+ unsigned short family; /* address family ID (AF_INET) */
+ unsigned short metric; /* routing metric (not used) */
+ unsigned short mtu; /* interface MTU value */
+ unsigned short type; /* interface hardware type */
+ unsigned short hard_header_len; /* hardware hdr length */
+ void *priv; /* pointer to private data */
+
+ /* Interface address info. */
+ unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+ unsigned char pad; /* make dev_addr aligned to 8 bytes */
+ unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address */
+ unsigned char addr_len; /* hardware address length */
+ unsigned long pa_addr; /* protocol address */
+ unsigned long pa_brdaddr; /* protocol broadcast addr */
+ unsigned long pa_dstaddr; /* protocol P-P other side addr */
+ unsigned long pa_mask; /* protocol netmask */
+ unsigned short pa_alen; /* protocol address length */
+
+ struct dev_mc_list *mc_list; /* Multicast mac addresses */
+ int mc_count; /* Number of installed mcasts */
+
+ struct ip_mc_list *ip_mc_list; /* IP multicast filter chain */
+ __u32 tx_queue_len; /* Max frames per queue allowed */
+
+ /* For load balancing driver pair support */
+
+ unsigned long pkt_queue; /* Packets queued */
+ struct linux_device *slave; /* Slave device */
+ struct net_alias_info *alias_info; /* main dev alias info */
+ struct net_alias *my_alias; /* alias devs */
+
+ /* Pointer to the interface buffers. */
+ struct sk_buff_head buffs[DEV_NUMBUFFS];
+
+ /* Pointers to interface service routines. */
+ int (*open)(struct linux_device *dev);
+ int (*stop)(struct linux_device *dev);
+ int (*hard_start_xmit) (struct sk_buff *skb,
+ struct linux_device *dev);
+ int (*hard_header) (struct sk_buff *skb,
+ struct linux_device *dev,
+ unsigned short type,
+ void *daddr,
+ void *saddr,
+ unsigned len);
+ int (*rebuild_header)(void *eth, struct linux_device *dev,
+ unsigned long raddr, struct sk_buff *skb);
+#define HAVE_MULTICAST
+ void (*set_multicast_list)(struct linux_device *dev);
+#define HAVE_SET_MAC_ADDR
+ int (*set_mac_address)(struct linux_device *dev, void *addr);
+#define HAVE_PRIVATE_IOCTL
+ int (*do_ioctl)(struct linux_device *dev, struct ifreq *ifr, int cmd);
+#define HAVE_SET_CONFIG
+ int (*set_config)(struct linux_device *dev, struct ifmap *map);
+#define HAVE_HEADER_CACHE
+ void (*header_cache_bind)(struct hh_cache **hhp, struct linux_device *dev, unsigned short htype, __u32 daddr);
+ void (*header_cache_update)(struct hh_cache *hh, struct linux_device *dev, unsigned char * haddr);
+#define HAVE_CHANGE_MTU
+ int (*change_mtu)(struct linux_device *dev, int new_mtu);
+
+ struct iw_statistics* (*get_wireless_stats)(struct linux_device *dev);
+
+#ifdef MACH
+
+#ifdef MACH_INCLUDE
+ struct net_data *net_data;
+#else
+ void *net_data;
+#endif
+
+#endif
+};
+
+
+struct packet_type {
+ unsigned short type; /* This is really htons(ether_type). */
+ struct linux_device * dev;
+ int (*func) (struct sk_buff *, struct linux_device *,
+ struct packet_type *);
+ void *data;
+ struct packet_type *next;
+};
+
+
+#include <linux/interrupt.h>
+#include <linux/notifier.h>
+
+/* Used by dev_rint */
+#define IN_SKBUFF 1
+
+extern volatile unsigned long in_bh;
+
+extern struct linux_device loopback_dev;
+extern struct linux_device *dev_base;
+extern struct packet_type *ptype_base[16];
+
+
+extern int ip_addr_match(unsigned long addr1, unsigned long addr2);
+extern int ip_chk_addr(unsigned long addr);
+extern struct linux_device *ip_dev_bynet(unsigned long daddr, unsigned long mask);
+extern unsigned long ip_my_addr(void);
+extern unsigned long ip_get_mask(unsigned long addr);
+extern struct linux_device *ip_dev_find(unsigned long addr);
+extern struct linux_device *dev_getbytype(unsigned short type);
+
+extern void dev_add_pack(struct packet_type *pt);
+extern void dev_remove_pack(struct packet_type *pt);
+extern struct linux_device *dev_get(const char *name);
+extern int dev_open(struct linux_device *dev);
+extern int dev_close(struct linux_device *dev);
+extern void dev_queue_xmit(struct sk_buff *skb, struct linux_device *dev,
+ int pri);
+
+#define HAVE_NETIF_RX 1
+extern void netif_rx(struct sk_buff *skb);
+extern void net_bh(void);
+
+#ifdef MACH
+#define dev_tint(dev)
+#else
+extern void dev_tint(struct linux_device *dev);
+#endif
+
+extern int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy);
+extern int dev_ioctl(unsigned int cmd, void *);
+
+extern void dev_init(void);
+
+/* Locking protection for page faults during outputs to devices unloaded during the fault */
+
+extern int dev_lockct;
+
+/*
+ * These two don't currently need to be interrupt-safe
+ * but they may do soon. Do it properly anyway.
+ */
+
+extern __inline__ void dev_lock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct++;
+ restore_flags(flags);
+}
+
+extern __inline__ void dev_unlock_list(void)
+{
+ unsigned long flags;
+ save_flags(flags);
+ cli();
+ dev_lockct--;
+ restore_flags(flags);
+}
+
+/*
+ * This almost never occurs, isn't in performance critical paths
+ * and we can thus be relaxed about it
+ */
+
+extern __inline__ void dev_lock_wait(void)
+{
+ while(dev_lockct)
+ schedule();
+}
+
+
+/* These functions live elsewhere (drivers/net/net_init.c, but related) */
+
+extern void ether_setup(struct linux_device *dev);
+extern void tr_setup(struct linux_device *dev);
+extern void fddi_setup(struct linux_device *dev);
+extern int ether_config(struct linux_device *dev, struct ifmap *map);
+/* Support for loadable net-drivers */
+extern int register_netdev(struct linux_device *dev);
+extern void unregister_netdev(struct linux_device *dev);
+extern int register_netdevice_notifier(struct notifier_block *nb);
+extern int unregister_netdevice_notifier(struct notifier_block *nb);
+/* Functions used for multicast support */
+extern void dev_mc_upload(struct linux_device *dev);
+extern void dev_mc_delete(struct linux_device *dev, void *addr, int alen, int all);
+extern void dev_mc_add(struct linux_device *dev, void *addr, int alen, int newonly);
+extern void dev_mc_discard(struct linux_device *dev);
+/* This is the wrong place but it'll do for the moment */
+extern void ip_mc_allhost(struct linux_device *dev);
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_DEV_H */
diff --git a/linux/dev/include/linux/nfs.h b/linux/dev/include/linux/nfs.h
new file mode 100644
index 0000000..ef37aa3
--- /dev/null
+++ b/linux/dev/include/linux/nfs.h
@@ -0,0 +1,175 @@
+#ifndef _LINUX_NFS_H
+#define _LINUX_NFS_H
+
+#ifndef MACH_INCLUDE
+
+#define NFS_PORT 2049
+#define NFS_MAXDATA 8192
+#define NFS_MAXPATHLEN 1024
+#define NFS_MAXNAMLEN 255
+#define NFS_MAXGROUPS 16
+#define NFS_FHSIZE 32
+#define NFS_COOKIESIZE 4
+#define NFS_FIFO_DEV (-1)
+#define NFSMODE_FMT 0170000
+#define NFSMODE_DIR 0040000
+#define NFSMODE_CHR 0020000
+#define NFSMODE_BLK 0060000
+#define NFSMODE_REG 0100000
+#define NFSMODE_LNK 0120000
+#define NFSMODE_SOCK 0140000
+#define NFSMODE_FIFO 0010000
+
+#ifdef __KERNEL__ /* user programs should get these from the rpc header files */
+
+#define RPC_VERSION 2
+
+enum rpc_auth_flavor {
+ RPC_AUTH_NULL = 0,
+ RPC_AUTH_UNIX = 1,
+ RPC_AUTH_SHORT = 2
+};
+
+enum rpc_msg_type {
+ RPC_CALL = 0,
+ RPC_REPLY = 1
+};
+
+enum rpc_reply_stat {
+ RPC_MSG_ACCEPTED = 0,
+ RPC_MSG_DENIED = 1
+};
+
+enum rpc_accept_stat {
+ RPC_SUCCESS = 0,
+ RPC_PROG_UNAVAIL = 1,
+ RPC_PROG_MISMATCH = 2,
+ RPC_PROC_UNAVAIL = 3,
+ RPC_GARBAGE_ARGS = 4
+};
+
+enum rpc_reject_stat {
+ RPC_MISMATCH = 0,
+ RPC_AUTH_ERROR = 1
+};
+
+enum rpc_auth_stat {
+ RPC_AUTH_BADCRED = 1,
+ RPC_AUTH_REJECTEDCRED = 2,
+ RPC_AUTH_BADVERF = 3,
+ RPC_AUTH_REJECTEDVERF = 4,
+ RPC_AUTH_TOOWEAK = 5
+};
+
+#endif /* __KERNEL__ */
+
+enum nfs_stat {
+ NFS_OK = 0,
+ NFSERR_PERM = 1,
+ NFSERR_NOENT = 2,
+ NFSERR_IO = 5,
+ NFSERR_NXIO = 6,
+ NFSERR_EAGAIN = 11,
+ NFSERR_ACCES = 13,
+ NFSERR_EXIST = 17,
+ NFSERR_XDEV = 18,
+ NFSERR_NODEV = 19,
+ NFSERR_NOTDIR = 20,
+ NFSERR_ISDIR = 21,
+ NFSERR_INVAL = 22, /* that Sun forgot */
+ NFSERR_FBIG = 27,
+ NFSERR_NOSPC = 28,
+ NFSERR_ROFS = 30,
+ NFSERR_NAMETOOLONG = 63,
+ NFSERR_NOTEMPTY = 66,
+ NFSERR_DQUOT = 69,
+ NFSERR_STALE = 70,
+ NFSERR_WFLUSH = 99
+};
+
+enum nfs_ftype {
+ NFNON = 0,
+ NFREG = 1,
+ NFDIR = 2,
+ NFBLK = 3,
+ NFCHR = 4,
+ NFLNK = 5,
+ NFSOCK = 6,
+ NFBAD = 7,
+ NFFIFO = 8
+};
+
+#define NFS_PROGRAM 100003
+#define NFS_VERSION 2
+#define NFSPROC_NULL 0
+#define NFSPROC_GETATTR 1
+#define NFSPROC_SETATTR 2
+#define NFSPROC_ROOT 3
+#define NFSPROC_LOOKUP 4
+#define NFSPROC_READLINK 5
+#define NFSPROC_READ 6
+#define NFSPROC_WRITECACHE 7
+#define NFSPROC_WRITE 8
+#define NFSPROC_CREATE 9
+#define NFSPROC_REMOVE 10
+#define NFSPROC_RENAME 11
+#define NFSPROC_LINK 12
+#define NFSPROC_SYMLINK 13
+#define NFSPROC_MKDIR 14
+#define NFSPROC_RMDIR 15
+#define NFSPROC_READDIR 16
+#define NFSPROC_STATFS 17
+
+struct nfs_fh {
+ char data[NFS_FHSIZE];
+};
+
+struct nfs_time {
+ u_int seconds;
+ u_int useconds;
+};
+
+struct nfs_fattr {
+ enum nfs_ftype type;
+ u_int mode;
+ u_int nlink;
+ u_int uid;
+ u_int gid;
+ u_int size;
+ u_int blocksize;
+ u_int rdev;
+ u_int blocks;
+ u_int fsid;
+ u_int fileid;
+ struct nfs_time atime;
+ struct nfs_time mtime;
+ struct nfs_time ctime;
+};
+
+struct nfs_sattr {
+ u_int mode;
+ u_int uid;
+ u_int gid;
+ u_int size;
+ struct nfs_time atime;
+ struct nfs_time mtime;
+};
+
+struct nfs_entry {
+ u_int fileid;
+ char *name;
+ int cookie;
+ int eof;
+};
+
+struct nfs_fsinfo {
+ u_int tsize;
+ u_int bsize;
+ u_int blocks;
+ u_int bfree;
+ u_int bavail;
+};
+
+#endif
+
+#endif
diff --git a/linux/dev/include/linux/notifier.h b/linux/dev/include/linux/notifier.h
new file mode 100644
index 0000000..eede20f
--- /dev/null
+++ b/linux/dev/include/linux/notifier.h
@@ -0,0 +1,100 @@
+/*
+ * Routines to manage notifier chains for passing status changes to any
+ * interested routines. We need this instead of hard coded call lists so
+ * that modules can poke their nose into the innards. The network devices
+ * needed them so here they are for the rest of you.
+ *
+ * Alan Cox <Alan.Cox@linux.org>
+ */
+
+#ifndef _LINUX_NOTIFIER_H
+#define _LINUX_NOTIFIER_H
+#include <linux/errno.h>
+
+struct notifier_block
+{
+ int (*notifier_call)(struct notifier_block *this, unsigned long, void *);
+ struct notifier_block *next;
+ int priority;
+};
+
+
+#ifdef __KERNEL__
+
+#define NOTIFY_DONE 0x0000 /* Don't care */
+#define NOTIFY_OK 0x0001 /* Suits me */
+#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
+#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */
+
+extern __inline__ int notifier_chain_register(struct notifier_block **list, struct notifier_block *n)
+{
+ while(*list)
+ {
+ if(n->priority > (*list)->priority)
+ break;
+ list= &((*list)->next);
+ }
+ n->next = *list;
+ *list=n;
+ return 0;
+}
+
+/*
+ * Warning to any non GPL module writers out there.. these functions are
+ * GPL'd
+ */
+
+extern __inline__ int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n)
+{
+ while((*nl)!=NULL)
+ {
+ if((*nl)==n)
+ {
+ *nl=n->next;
+ return 0;
+ }
+ nl=&((*nl)->next);
+ }
+#ifdef MACH_INCLUDE
+ return -LINUX_ENOENT;
+#else
+ return -ENOENT;
+#endif
+}
+
+/*
+ * This is one of these things that is generally shorter inline
+ */
+
+extern __inline__ int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v)
+{
+ int ret=NOTIFY_DONE;
+ struct notifier_block *nb = *n;
+ while(nb)
+ {
+ ret=nb->notifier_call(nb,val,v);
+ if(ret&NOTIFY_STOP_MASK)
+ return ret;
+ nb=nb->next;
+ }
+ return ret;
+}
+
+
+/*
+ * Declared notifiers so far. I can imagine quite a few more chains
+ * over time (eg laptop power reset chains, reboot chain (to clean
+ * device units up), device [un]mount chain, module load/unload chain,
+ * low memory chain, screenblank chain (for plug in modular screenblankers)
+ * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
+ */
+
+/* netdevice notifier chain */
+#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
+#define NETDEV_DOWN 0x0002
+#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
+ detected a hardware crash and restarted
+ - we can use this eg to kick tcp sessions
+ once done */
+#endif
+#endif
diff --git a/linux/dev/include/linux/pagemap.h b/linux/dev/include/linux/pagemap.h
new file mode 100644
index 0000000..6e21f3d
--- /dev/null
+++ b/linux/dev/include/linux/pagemap.h
@@ -0,0 +1,150 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+#include <asm/system.h>
+
+/*
+ * Page-mapping primitive inline functions
+ *
+ * Copyright 1995 Linus Torvalds
+ */
+
+#ifndef MACH
+
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/swapctl.h>
+
+static inline unsigned long page_address(struct page * page)
+{
+ return PAGE_OFFSET + PAGE_SIZE * page->map_nr;
+}
+
+#define PAGE_HASH_BITS 11
+#define PAGE_HASH_SIZE (1 << PAGE_HASH_BITS)
+
+#define PAGE_AGE_VALUE ((PAGE_INITIAL_AGE)+(PAGE_ADVANCE))
+
+extern unsigned long page_cache_size; /* # of pages currently in the hash table */
+extern struct page * page_hash_table[PAGE_HASH_SIZE];
+
+/*
+ * We use a power-of-two hash table to avoid a modulus,
+ * and get a reasonable hash by knowing roughly how the
+ * inode pointer and offsets are distributed (ie, we
+ * roughly know which bits are "significant")
+ */
+static inline unsigned long _page_hashfn(struct inode * inode, unsigned long offset)
+{
+#define i (((unsigned long) inode)/(sizeof(struct inode) & ~ (sizeof(struct inode) - 1)))
+#define o (offset >> PAGE_SHIFT)
+#define s(x) ((x)+((x)>>PAGE_HASH_BITS))
+ return s(i+o) & (PAGE_HASH_SIZE-1);
+#undef i
+#undef o
+#undef s
+}
+
+#define page_hash(inode,offset) (page_hash_table+_page_hashfn(inode,offset))
+
+static inline struct page * __find_page(struct inode * inode, unsigned long offset, struct page *page)
+{
+ goto inside;
+ for (;;) {
+ page = page->next_hash;
+inside:
+ if (!page)
+ goto not_found;
+ if (page->inode != inode)
+ continue;
+ if (page->offset == offset)
+ break;
+ }
+ /* Found the page. */
+ atomic_inc(&page->count);
+ set_bit(PG_referenced, &page->flags);
+not_found:
+ return page;
+}
+
+static inline struct page *find_page(struct inode * inode, unsigned long offset)
+{
+ return __find_page(inode, offset, *page_hash(inode, offset));
+}
+
+static inline void remove_page_from_hash_queue(struct page * page)
+{
+ struct page **p;
+ struct page *next_hash, *prev_hash;
+
+ next_hash = page->next_hash;
+ prev_hash = page->prev_hash;
+ page->next_hash = NULL;
+ page->prev_hash = NULL;
+ if (next_hash)
+ next_hash->prev_hash = prev_hash;
+ if (prev_hash)
+ prev_hash->next_hash = next_hash;
+ p = page_hash(page->inode,page->offset);
+ if (*p == page)
+ *p = next_hash;
+ page_cache_size--;
+}
+
+static inline void __add_page_to_hash_queue(struct page * page, struct page **p)
+{
+ page_cache_size++;
+ set_bit(PG_referenced, &page->flags);
+ page->age = PAGE_AGE_VALUE;
+ page->prev_hash = NULL;
+ if ((page->next_hash = *p) != NULL)
+ page->next_hash->prev_hash = page;
+ *p = page;
+}
+
+static inline void add_page_to_hash_queue(struct page * page, struct inode * inode, unsigned long offset)
+{
+ __add_page_to_hash_queue(page, page_hash(inode,offset));
+}
+
+
+static inline void remove_page_from_inode_queue(struct page * page)
+{
+ struct inode * inode = page->inode;
+
+ page->inode = NULL;
+ inode->i_nrpages--;
+ if (inode->i_pages == page)
+ inode->i_pages = page->next;
+ if (page->next)
+ page->next->prev = page->prev;
+ if (page->prev)
+ page->prev->next = page->next;
+ page->next = NULL;
+ page->prev = NULL;
+}
+
+static inline void add_page_to_inode_queue(struct inode * inode, struct page * page)
+{
+ struct page **p = &inode->i_pages;
+
+ inode->i_nrpages++;
+ page->inode = inode;
+ page->prev = NULL;
+ if ((page->next = *p) != NULL)
+ page->next->prev = page;
+ *p = page;
+}
+
+extern void __wait_on_page(struct page *);
+static inline void wait_on_page(struct page * page)
+{
+ if (PageLocked(page))
+ __wait_on_page(page);
+}
+
+extern void update_vm_cache(struct inode *, unsigned long, const char *, int);
+
+#endif /* !MACH */
+
+#endif
diff --git a/linux/dev/include/linux/proc_fs.h b/linux/dev/include/linux/proc_fs.h
new file mode 100644
index 0000000..8ce0bb2
--- /dev/null
+++ b/linux/dev/include/linux/proc_fs.h
@@ -0,0 +1,292 @@
+#ifndef _LINUX_PROC_FS_H
+#define _LINUX_PROC_FS_H
+
+#include <linux/fs.h>
+#include <linux/malloc.h>
+
+/*
+ * The proc filesystem constants/structures
+ */
+
+/*
+ * We always define these enumerators
+ */
+
+enum root_directory_inos {
+ PROC_ROOT_INO = 1,
+ PROC_LOADAVG,
+ PROC_UPTIME,
+ PROC_MEMINFO,
+ PROC_KMSG,
+ PROC_VERSION,
+ PROC_CPUINFO,
+ PROC_PCI,
+ PROC_SELF, /* will change inode # */
+ PROC_NET,
+ PROC_SCSI,
+ PROC_MALLOC,
+ PROC_KCORE,
+ PROC_MODULES,
+ PROC_STAT,
+ PROC_DEVICES,
+ PROC_INTERRUPTS,
+ PROC_FILESYSTEMS,
+ PROC_KSYMS,
+ PROC_DMA,
+ PROC_IOPORTS,
+#ifdef __SMP_PROF__
+ PROC_SMP_PROF,
+#endif
+ PROC_PROFILE, /* whether enabled or not */
+ PROC_CMDLINE,
+ PROC_SYS,
+ PROC_MTAB,
+ PROC_MD,
+ PROC_RTC,
+ PROC_LOCKS
+};
+
+enum pid_directory_inos {
+ PROC_PID_INO = 2,
+ PROC_PID_STATUS,
+ PROC_PID_MEM,
+ PROC_PID_CWD,
+ PROC_PID_ROOT,
+ PROC_PID_EXE,
+ PROC_PID_FD,
+ PROC_PID_ENVIRON,
+ PROC_PID_CMDLINE,
+ PROC_PID_STAT,
+ PROC_PID_STATM,
+ PROC_PID_MAPS
+};
+
+enum pid_subdirectory_inos {
+ PROC_PID_FD_DIR = 1
+};
+
+enum net_directory_inos {
+ PROC_NET_UNIX = 128,
+ PROC_NET_ARP,
+ PROC_NET_ROUTE,
+ PROC_NET_DEV,
+ PROC_NET_RAW,
+ PROC_NET_TCP,
+ PROC_NET_UDP,
+ PROC_NET_SNMP,
+ PROC_NET_RARP,
+ PROC_NET_IGMP,
+ PROC_NET_IPMR_VIF,
+ PROC_NET_IPMR_MFC,
+ PROC_NET_IPFWFWD,
+ PROC_NET_IPFWIN,
+ PROC_NET_IPFWOUT,
+ PROC_NET_IPACCT,
+ PROC_NET_IPMSQHST,
+ PROC_NET_WIRELESS,
+ PROC_NET_IPX_INTERFACE,
+ PROC_NET_IPX_ROUTE,
+ PROC_NET_IPX,
+ PROC_NET_ATALK,
+ PROC_NET_AT_ROUTE,
+ PROC_NET_ATIF,
+ PROC_NET_AX25_ROUTE,
+ PROC_NET_AX25,
+ PROC_NET_AX25_CALLS,
+ PROC_NET_NR_NODES,
+ PROC_NET_NR_NEIGH,
+ PROC_NET_NR,
+ PROC_NET_SOCKSTAT,
+ PROC_NET_RTCACHE,
+ PROC_NET_AX25_BPQETHER,
+ PROC_NET_ALIAS_TYPES,
+ PROC_NET_ALIASES,
+ PROC_NET_IP_MASQ_APP,
+ PROC_NET_STRIP_STATUS,
+ PROC_NET_STRIP_TRACE,
+ PROC_NET_IPAUTOFW,
+ PROC_NET_RS_NODES,
+ PROC_NET_RS_NEIGH,
+ PROC_NET_RS_ROUTES,
+ PROC_NET_RS,
+ PROC_NET_Z8530,
+ PROC_NET_LAST
+};
+
+enum scsi_directory_inos {
+ PROC_SCSI_SCSI = 256,
+ PROC_SCSI_ADVANSYS,
+ PROC_SCSI_EATA,
+ PROC_SCSI_EATA_PIO,
+ PROC_SCSI_AHA152X,
+ PROC_SCSI_AHA1542,
+ PROC_SCSI_AHA1740,
+ PROC_SCSI_AIC7XXX,
+ PROC_SCSI_BUSLOGIC,
+ PROC_SCSI_U14_34F,
+ PROC_SCSI_FDOMAIN,
+ PROC_SCSI_GENERIC_NCR5380,
+ PROC_SCSI_IN2000,
+ PROC_SCSI_PAS16,
+ PROC_SCSI_QLOGICFAS,
+ PROC_SCSI_QLOGICISP,
+ PROC_SCSI_SEAGATE,
+ PROC_SCSI_T128,
+ PROC_SCSI_DC390WUF,
+ PROC_SCSI_DC390T,
+ PROC_SCSI_NCR53C7xx,
+ PROC_SCSI_NCR53C8XX,
+ PROC_SCSI_ULTRASTOR,
+ PROC_SCSI_7000FASST,
+ PROC_SCSI_EATA2X,
+ PROC_SCSI_AM53C974,
+ PROC_SCSI_SSC,
+ PROC_SCSI_NCR53C406A,
+ PROC_SCSI_PPA,
+ PROC_SCSI_ESP,
+ PROC_SCSI_A3000,
+ PROC_SCSI_A2091,
+ PROC_SCSI_GVP11,
+ PROC_SCSI_ATARI,
+ PROC_SCSI_GDTH,
+ PROC_SCSI_IDESCSI,
+ PROC_SCSI_SCSI_DEBUG,
+ PROC_SCSI_NOT_PRESENT,
+ PROC_SCSI_FILE, /* I'm assuming here that we */
+ PROC_SCSI_LAST = (PROC_SCSI_FILE + 16) /* won't ever see more than */
+}; /* 16 HBAs in one machine */
+
+/* Finally, the dynamically allocatable proc entries are reserved: */
+
+#define PROC_DYNAMIC_FIRST 4096
+#define PROC_NDYNAMIC 4096
+
+#define PROC_SUPER_MAGIC 0x9fa0
+
+/*
+ * This is not completely implemented yet. The idea is to
+ * create a in-memory tree (like the actual /proc filesystem
+ * tree) of these proc_dir_entries, so that we can dynamically
+ * add new files to /proc.
+ *
+ * The "next" pointer creates a linked list of one /proc directory,
+ * while parent/subdir create the directory structure (every
+ * /proc file has a parent, but "subdir" is NULL for all
+ * non-directory entries).
+ *
+ * "get_info" is called at "read", while "fill_inode" is used to
+ * fill in file type/protection/owner information specific to the
+ * particular /proc file.
+ */
+struct proc_dir_entry {
+ unsigned short low_ino;
+ unsigned short namelen;
+ const char *name;
+ mode_t mode;
+ nlink_t nlink;
+ uid_t uid;
+ gid_t gid;
+ unsigned long size;
+ struct inode_operations * ops;
+ int (*get_info)(char *, char **, off_t, int, int);
+ void (*fill_inode)(struct inode *);
+ struct proc_dir_entry *next, *parent, *subdir;
+ void *data;
+};
+
+extern int (* dispatch_scsi_info_ptr) (int ino, char *buffer, char **start,
+ off_t offset, int length, int inout);
+
+extern struct proc_dir_entry proc_root;
+extern struct proc_dir_entry proc_net;
+extern struct proc_dir_entry proc_scsi;
+extern struct proc_dir_entry proc_sys;
+extern struct proc_dir_entry proc_pid;
+extern struct proc_dir_entry proc_pid_fd;
+
+extern struct inode_operations proc_scsi_inode_operations;
+
+extern void proc_root_init(void);
+extern void proc_base_init(void);
+extern void proc_net_init(void);
+
+extern int proc_register(struct proc_dir_entry *, struct proc_dir_entry *);
+extern int proc_register_dynamic(struct proc_dir_entry *,
+ struct proc_dir_entry *);
+extern int proc_unregister(struct proc_dir_entry *, int);
+
+static inline int proc_net_register(struct proc_dir_entry * x)
+{
+ return proc_register(&proc_net, x);
+}
+
+static inline int proc_net_unregister(int x)
+{
+ return proc_unregister(&proc_net, x);
+}
+
+static inline int proc_scsi_register(struct proc_dir_entry *driver,
+ struct proc_dir_entry *x)
+{
+ x->ops = &proc_scsi_inode_operations;
+ if(x->low_ino < PROC_SCSI_FILE){
+ return(proc_register(&proc_scsi, x));
+ }else{
+ return(proc_register(driver, x));
+ }
+}
+
+static inline int proc_scsi_unregister(struct proc_dir_entry *driver, int x)
+{
+ extern void scsi_init_free(char *ptr, unsigned int size);
+
+ if(x <= PROC_SCSI_FILE)
+ return(proc_unregister(&proc_scsi, x));
+ else {
+ struct proc_dir_entry **p = &driver->subdir, *dp;
+ int ret;
+
+ while ((dp = *p) != NULL) {
+ if (dp->low_ino == x)
+ break;
+ p = &dp->next;
+ }
+ ret = proc_unregister(driver, x);
+ scsi_init_free((char *) dp, sizeof(struct proc_dir_entry) + 4);
+ return(ret);
+ }
+}
+
+extern struct super_block *proc_read_super(struct super_block *,void *,int);
+extern int init_proc_fs(void);
+extern struct inode * proc_get_inode(struct super_block *, int, struct proc_dir_entry *);
+extern void proc_statfs(struct super_block *, struct statfs *, int);
+extern void proc_read_inode(struct inode *);
+extern void proc_write_inode(struct inode *);
+extern int proc_match(int, const char *, struct proc_dir_entry *);
+
+/*
+ * These are generic /proc routines that use the internal
+ * "struct proc_dir_entry" tree to traverse the filesystem.
+ *
+ * The /proc root directory has extended versions to take care
+ * of the /proc/<pid> subdirectories.
+ */
+extern int proc_readdir(struct inode *, struct file *, void *, filldir_t);
+extern int proc_lookup(struct inode *, const char *, int, struct inode **);
+
+extern struct inode_operations proc_dir_inode_operations;
+extern struct inode_operations proc_net_inode_operations;
+extern struct inode_operations proc_netdir_inode_operations;
+extern struct inode_operations proc_scsi_inode_operations;
+extern struct inode_operations proc_mem_inode_operations;
+extern struct inode_operations proc_sys_inode_operations;
+extern struct inode_operations proc_array_inode_operations;
+extern struct inode_operations proc_arraylong_inode_operations;
+extern struct inode_operations proc_kcore_inode_operations;
+extern struct inode_operations proc_profile_inode_operations;
+extern struct inode_operations proc_kmsg_inode_operations;
+extern struct inode_operations proc_link_inode_operations;
+extern struct inode_operations proc_fd_inode_operations;
+
+#endif
diff --git a/linux/dev/include/linux/sched.h b/linux/dev/include/linux/sched.h
new file mode 100644
index 0000000..3e7bcd4
--- /dev/null
+++ b/linux/dev/include/linux/sched.h
@@ -0,0 +1,521 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * define DEBUG if you want the wait-queues to have some extra
+ * debugging code. It's not normally used, but might catch some
+ * wait-queue coding errors.
+ *
+ * #define DEBUG
+ */
+
+#include <asm/param.h> /* for HZ */
+
+extern unsigned long event;
+
+#include <linux/binfmts.h>
+#include <linux/personality.h>
+#include <linux/tasks.h>
+#include <linux/kernel.h>
+
+#include <asm/system.h>
+#include <asm/semaphore.h>
+#include <asm/page.h>
+
+#include <linux/smp.h>
+#include <linux/tty.h>
+#include <linux/sem.h>
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers shared */
+#define CLONE_PID 0x00001000 /* set if pid shared */
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+#define CT_TO_SECS(x) ((x) / HZ)
+#define CT_TO_USECS(x) (((x) % HZ) * 1000000/HZ)
+
+extern int nr_running, nr_tasks;
+extern int last_pid;
+
+#define FIRST_TASK task[0]
+#define LAST_TASK task[NR_TASKS-1]
+
+#include <linux/head.h>
+#include <linux/fs.h>
+#include <linux/signal.h>
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+
+#include <asm/processor.h>
+
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define TASK_ZOMBIE 3
+#define TASK_STOPPED 4
+#define TASK_SWAPPING 5
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_OTHER 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+
+struct sched_param {
+ int sched_priority;
+};
+
+#ifndef NULL
+#define NULL ((void *) 0)
+#endif
+
+#ifdef __KERNEL__
+
+extern void sched_init(void);
+extern void show_state(void);
+extern void trap_init(void);
+
+asmlinkage void schedule(void);
+
+/* Open file table structure */
+struct files_struct {
+ int count;
+ fd_set close_on_exec;
+ fd_set open_fds;
+ struct file * fd[NR_OPEN];
+};
+
+#define INIT_FILES { \
+ 1, \
+ { { 0, } }, \
+ { { 0, } }, \
+ { NULL, } \
+}
+
+struct fs_struct {
+ int count;
+ unsigned short umask;
+ struct inode * root, * pwd;
+};
+
+#define INIT_FS { \
+ 1, \
+ 0022, \
+ NULL, NULL \
+}
+
+struct mm_struct {
+ int count;
+ pgd_t * pgd;
+ unsigned long context;
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack, start_mmap;
+ unsigned long arg_start, arg_end, env_start, env_end;
+ unsigned long rss, total_vm, locked_vm;
+ unsigned long def_flags;
+ struct vm_area_struct * mmap;
+ struct vm_area_struct * mmap_avl;
+ struct semaphore mmap_sem;
+};
+
+#define INIT_MM { \
+ 1, \
+ swapper_pg_dir, \
+ 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, 0, \
+ 0, 0, 0, \
+ 0, \
+ &init_mmap, &init_mmap, MUTEX }
+
+struct signal_struct {
+ int count;
+ struct sigaction action[32];
+};
+
+#define INIT_SIGNALS { \
+ 1, \
+ { {0,}, } }
+
+struct task_struct {
+/* these are hardcoded - don't touch */
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ long counter;
+ long priority;
+ unsigned long signal;
+ unsigned long blocked; /* bitmap of masked signals */
+ unsigned long flags; /* per process flags, defined below */
+ int errno;
+ long debugreg[8]; /* Hardware debugging registers */
+ struct exec_domain *exec_domain;
+/* various fields */
+ struct linux_binfmt *binfmt;
+ struct task_struct *next_task, *prev_task;
+ struct task_struct *next_run, *prev_run;
+ unsigned long saved_kernel_stack;
+ unsigned long kernel_stack_page;
+ int exit_code, exit_signal;
+ /* ??? */
+ unsigned long personality;
+ int dumpable:1;
+ int did_exec:1;
+ /* shouldn't this be pid_t? */
+ int pid;
+ int pgrp;
+ int tty_old_pgrp;
+ int session;
+ /* boolean value for session group leader */
+ int leader;
+ int groups[NGROUPS];
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->p_pptr->pid)
+ */
+ struct task_struct *p_opptr, *p_pptr, *p_cptr, *p_ysptr, *p_osptr;
+ struct wait_queue *wait_chldexit; /* for wait4() */
+ unsigned short uid,euid,suid,fsuid;
+ unsigned short gid,egid,sgid,fsgid;
+ unsigned long timeout, policy, rt_priority;
+ unsigned long it_real_value, it_prof_value, it_virt_value;
+ unsigned long it_real_incr, it_prof_incr, it_virt_incr;
+ struct timer_list real_timer;
+ long utime, stime, cutime, cstime, start_time;
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt, nswap, cmin_flt, cmaj_flt, cnswap;
+ int swappable:1;
+ unsigned long swap_address;
+ unsigned long old_maj_flt; /* old value of maj_flt */
+ unsigned long dec_flt; /* page fault count of the last time */
+ unsigned long swap_cnt; /* number of pages to swap on next pass */
+/* limits */
+ struct rlimit rlim[RLIM_NLIMITS];
+ unsigned short used_math;
+ char comm[16];
+/* file system info */
+ int link_count;
+ struct tty_struct *tty; /* NULL if no tty */
+/* ipc stuff */
+ struct sem_undo *semundo;
+ struct sem_queue *semsleeping;
+/* ldt for this task - used by Wine. If NULL, default_ldt is used */
+ struct desc_struct *ldt;
+/* tss for this task */
+ struct thread_struct tss;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* memory management info */
+ struct mm_struct *mm;
+/* signal handlers */
+ struct signal_struct *sig;
+#ifdef __SMP__
+ int processor;
+ int last_processor;
+ int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
+#endif
+};
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_PTRACED 0x00000010 /* set if ptrace (0) has been called. */
+#define PF_TRACESYS 0x00000020 /* tracing system calls */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+
+#define PF_USEDFPU 0x00100000 /* Process used the FPU this quantum (SMP only) */
+#define PF_DTRACE 0x00200000 /* delayed trace (used on m68k) */
+
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#define _STK_LIM (8*1024*1024)
+
+#define DEF_PRIORITY (20*HZ/100) /* 200 ms time slices */
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK \
+/* state etc */ { 0,DEF_PRIORITY,DEF_PRIORITY,0,0,0,0, \
+/* debugregs */ { 0, }, \
+/* exec domain */&default_exec_domain, \
+/* binfmt */ NULL, \
+/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
+/* stack */ 0,(unsigned long) &init_kernel_stack, \
+/* ec,brk... */ 0,0,0,0,0, \
+/* pid etc.. */ 0,0,0,0,0, \
+/* suppl grps*/ {NOGROUP,}, \
+/* proc links*/ &init_task,&init_task,NULL,NULL,NULL,NULL, \
+/* uid etc */ 0,0,0,0,0,0,0,0, \
+/* timeout */ 0,SCHED_OTHER,0,0,0,0,0,0,0, \
+/* timer */ { NULL, NULL, 0, 0, it_real_fn }, \
+/* utime */ 0,0,0,0,0, \
+/* flt */ 0,0,0,0,0,0, \
+/* swp */ 0,0,0,0,0, \
+/* rlimits */ INIT_RLIMITS, \
+/* math */ 0, \
+/* comm */ "swapper", \
+/* fs info */ 0,NULL, \
+/* ipc */ NULL, NULL, \
+/* ldt */ NULL, \
+/* tss */ INIT_TSS, \
+/* fs */ &init_fs, \
+/* files */ &init_files, \
+/* mm */ &init_mm, \
+/* signals */ &init_signals, \
+}
+
+extern struct mm_struct init_mm;
+extern struct task_struct init_task;
+extern struct task_struct *task[NR_TASKS];
+extern struct task_struct *last_task_used_math;
+extern struct task_struct *current_set[NR_CPUS];
+/*
+ * On a single processor system this comes out as current_set[0] when cpp
+ * has finished with it, which gcc will optimise away.
+ */
+#define current (0+current_set[smp_processor_id()]) /* Current on this processor */
+extern unsigned long volatile jiffies;
+extern unsigned long itimer_ticks;
+extern unsigned long itimer_next;
+extern struct timeval xtime;
+extern int need_resched;
+extern void do_timer(struct pt_regs *);
+
+extern unsigned int * prof_buffer;
+extern unsigned long prof_len;
+extern unsigned long prof_shift;
+
+extern int securelevel; /* system security level */
+
+#define CURRENT_TIME (xtime.tv_sec)
+
+extern void sleep_on(struct wait_queue ** p);
+extern void interruptible_sleep_on(struct wait_queue ** p);
+extern void wake_up(struct wait_queue ** p);
+extern void wake_up_interruptible(struct wait_queue ** p);
+extern void wake_up_process(struct task_struct * tsk);
+
+extern void notify_parent(struct task_struct * tsk, int signal);
+extern void force_sig(unsigned long sig,struct task_struct * p);
+extern int send_sig(unsigned long sig,struct task_struct * p,int priv);
+extern int in_group_p(gid_t grp);
+
+extern int request_irq(unsigned int irq,
+ void (*handler)(int, void *, struct pt_regs *),
+ unsigned long flags,
+ const char *device,
+ void *dev_id);
+extern void free_irq(unsigned int irq, void *dev_id);
+
+/*
+ * This has now become a routine instead of a macro, it sets a flag if
+ * it returns true (to do BSD-style accounting where the process is flagged
+ * if it uses root privs). The implication of this is that you should do
+ * normal permissions checks first, and check suser() last.
+ */
+#ifdef MACH
+
+extern inline int
+suser(void)
+{
+ return 1;
+}
+
+#else
+
+extern inline int suser(void)
+{
+ if (current->euid == 0) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+ return 0;
+}
+#endif
+
+extern void copy_thread(int, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_mm(struct task_struct *);
+extern void exit_fs(struct task_struct *);
+extern void exit_files(struct task_struct *);
+extern void exit_sighand(struct task_struct *);
+extern void release_thread(struct task_struct *);
+
+extern int do_execve(char *, char **, char **, struct pt_regs *);
+extern int do_fork(unsigned long, unsigned long, struct pt_regs *);
+
+extern void add_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+extern void remove_wait_queue(struct wait_queue **p, struct wait_queue *wait);
+
+/* See if we have a valid user level fd.
+ * If it makes sense, return the file structure it references.
+ * Otherwise return NULL.
+ */
+
+#ifdef MACH
+
+extern void __add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void add_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void __remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+extern void remove_wait_queue (struct wait_queue **q, struct wait_queue *wait);
+
+#else /* !MACH */
+
+extern inline struct file *file_from_fd(const unsigned int fd)
+{
+
+ if (fd >= NR_OPEN)
+ return NULL;
+ /* either valid or null */
+ return current->files->fd[fd];
+}
+
+/*
+ * The wait-queues are circular lists, and you have to be *very* sure
+ * to keep them correct. Use only these two functions to add/remove
+ * entries in the queues.
+ */
+extern inline void __add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue *head = *p;
+ struct wait_queue *next = WAIT_QUEUE_HEAD(p);
+
+ if (head)
+ next = head;
+ *p = wait;
+ wait->next = next;
+}
+
+extern inline void add_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __add_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void __remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ struct wait_queue * next = wait->next;
+ struct wait_queue * head = next;
+
+ for (;;) {
+ struct wait_queue * nextlist = head->next;
+ if (nextlist == wait)
+ break;
+ head = nextlist;
+ }
+ head->next = next;
+}
+
+extern inline void remove_wait_queue(struct wait_queue ** p, struct wait_queue * wait)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __remove_wait_queue(p, wait);
+ restore_flags(flags);
+}
+
+extern inline void select_wait(struct wait_queue ** wait_address, select_table * p)
+{
+ struct select_table_entry * entry;
+
+ if (!p || !wait_address)
+ return;
+ if (p->nr >= __MAX_SELECT_TABLE_ENTRIES)
+ return;
+ entry = p->entry + p->nr;
+ entry->wait_address = wait_address;
+ entry->wait.task = current;
+ entry->wait.next = NULL;
+ add_wait_queue(wait_address,&entry->wait);
+ p->nr++;
+}
+
+#endif /* !MACH */
+
+#define REMOVE_LINKS(p) do { unsigned long flags; \
+ save_flags(flags) ; cli(); \
+ (p)->next_task->prev_task = (p)->prev_task; \
+ (p)->prev_task->next_task = (p)->next_task; \
+ restore_flags(flags); \
+ if ((p)->p_osptr) \
+ (p)->p_osptr->p_ysptr = (p)->p_ysptr; \
+ if ((p)->p_ysptr) \
+ (p)->p_ysptr->p_osptr = (p)->p_osptr; \
+ else \
+ (p)->p_pptr->p_cptr = (p)->p_osptr; \
+ } while (0)
+
+#define SET_LINKS(p) do { unsigned long flags; \
+ save_flags(flags); cli(); \
+ (p)->next_task = &init_task; \
+ (p)->prev_task = init_task.prev_task; \
+ init_task.prev_task->next_task = (p); \
+ init_task.prev_task = (p); \
+ restore_flags(flags); \
+ (p)->p_ysptr = NULL; \
+ if (((p)->p_osptr = (p)->p_pptr->p_cptr) != NULL) \
+ (p)->p_osptr->p_ysptr = p; \
+ (p)->p_pptr->p_cptr = p; \
+ } while (0)
+
+#define for_each_task(p) \
+ for (p = &init_task ; (p = p->next_task) != &init_task ; )
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/linux/dev/include/linux/skbuff.h b/linux/dev/include/linux/skbuff.h
new file mode 100644
index 0000000..3f3128e
--- /dev/null
+++ b/linux/dev/include/linux/skbuff.h
@@ -0,0 +1,470 @@
+/*
+ * Definitions for the 'struct sk_buff' memory handlers.
+ *
+ * Authors:
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LINUX_SKBUFF_H
+#define _LINUX_SKBUFF_H
+
+#include <linux/config.h>
+#include <linux/time.h>
+
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+#define CONFIG_SKB_CHECK 0
+
+#define HAVE_ALLOC_SKB /* For the drivers to know */
+#define HAVE_ALIGNABLE_SKB /* Ditto 8) */
+
+
+#define FREE_READ 1
+#define FREE_WRITE 0
+
+#define CHECKSUM_NONE 0
+#define CHECKSUM_HW 1
+#define CHECKSUM_UNNECESSARY 2
+
+struct sk_buff_head
+{
+ struct sk_buff * next;
+ struct sk_buff * prev;
+ __u32 qlen; /* Must be same length as a pointer
+ for using debugging */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+};
+
+
+struct sk_buff
+{
+ struct sk_buff * next; /* Next buffer in list */
+ struct sk_buff * prev; /* Previous buffer in list */
+ struct sk_buff_head * list; /* List we are on */
+#if CONFIG_SKB_CHECK
+ int magic_debug_cookie;
+#endif
+ struct sk_buff *link3; /* Link for IP protocol level buffer chains */
+ struct sock *sk; /* Socket we are owned by */
+ unsigned long when; /* used to compute rtt's */
+ struct timeval stamp; /* Time we arrived */
+ struct linux_device *dev; /* Device we arrived on/are leaving by */
+ union
+ {
+ struct tcphdr *th;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ struct udphdr *uh;
+ unsigned char *raw;
+ /* for passing file handles in a unix domain socket */
+ void *filp;
+ } h;
+
+ union
+ {
+ /* As yet incomplete physical layer views */
+ unsigned char *raw;
+ struct ethhdr *ethernet;
+ } mac;
+
+ struct iphdr *ip_hdr; /* For IPPROTO_RAW */
+ unsigned long len; /* Length of actual data */
+ unsigned long csum; /* Checksum */
+ __u32 saddr; /* IP source address */
+ __u32 daddr; /* IP target address */
+ __u32 raddr; /* IP next hop address */
+ __u32 seq; /* TCP sequence number */
+ __u32 end_seq; /* seq [+ fin] [+ syn] + datalen */
+ __u32 ack_seq; /* TCP ack sequence number */
+ unsigned char proto_priv[16]; /* Protocol private data */
+ volatile char acked, /* Are we acked ? */
+ used, /* Are we in use ? */
+ free, /* How to free this buffer */
+ arp; /* Has IP/ARP resolution finished */
+ unsigned char tries, /* Times tried */
+ lock, /* Are we locked ? */
+ localroute, /* Local routing asserted for this frame */
+ pkt_type, /* Packet class */
+ pkt_bridged, /* Tracker for bridging */
+ ip_summed; /* Driver fed us an IP checksum */
+#define PACKET_HOST 0 /* To us */
+#define PACKET_BROADCAST 1 /* To all */
+#define PACKET_MULTICAST 2 /* To group */
+#define PACKET_OTHERHOST 3 /* To someone else */
+ unsigned short users; /* User count - see datagram.c,tcp.c */
+ unsigned short protocol; /* Packet protocol from driver. */
+ unsigned int truesize; /* Buffer size */
+
+ atomic_t count; /* reference count */
+ struct sk_buff *data_skb; /* Link to the actual data skb */
+ unsigned char *head; /* Head of buffer */
+ unsigned char *data; /* Data head pointer */
+ unsigned char *tail; /* Tail pointer */
+ unsigned char *end; /* End pointer */
+ void (*destructor)(struct sk_buff *); /* Destruct function */
+ __u16 redirport; /* Redirect port */
+#ifdef MACH
+#ifdef MACH_INCLUDE
+ ipc_port_t reply;
+ mach_msg_type_name_t reply_type;
+ vm_map_copy_t copy;
+#else
+ void *reply;
+ unsigned reply_type;
+ void *copy;
+#endif
+#endif
+};
+
+#ifdef CONFIG_SKB_LARGE
+#define SK_WMEM_MAX 65535
+#define SK_RMEM_MAX 65535
+#else
+#define SK_WMEM_MAX 32767
+#define SK_RMEM_MAX 32767
+#endif
+
+#if CONFIG_SKB_CHECK
+#define SK_FREED_SKB 0x0DE2C0DE
+#define SK_GOOD_SKB 0xDEC0DED1
+#define SK_HEAD_SKB 0x12231298
+#endif
+
+#ifdef __KERNEL__
+/*
+ * Handling routines are only of interest to the kernel
+ */
+#include <linux/malloc.h>
+
+#include <asm/system.h>
+
+#if 0
+extern void print_skb(struct sk_buff *);
+#endif
+extern void kfree_skb(struct sk_buff *skb, int rw);
+extern void skb_queue_head_init(struct sk_buff_head *list);
+extern void skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
+extern void skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
+extern struct sk_buff * skb_dequeue(struct sk_buff_head *list);
+extern void skb_insert(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_append(struct sk_buff *old,struct sk_buff *newsk);
+extern void skb_unlink(struct sk_buff *buf);
+extern __u32 skb_queue_len(struct sk_buff_head *list);
+extern struct sk_buff * skb_peek_copy(struct sk_buff_head *list);
+extern struct sk_buff * alloc_skb(unsigned int size, int priority);
+extern struct sk_buff * dev_alloc_skb(unsigned int size);
+extern void kfree_skbmem(struct sk_buff *skb);
+extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
+extern struct sk_buff * skb_copy(struct sk_buff *skb, int priority);
+extern void skb_device_lock(struct sk_buff *skb);
+extern void skb_device_unlock(struct sk_buff *skb);
+extern void dev_kfree_skb(struct sk_buff *skb, int mode);
+extern int skb_device_locked(struct sk_buff *skb);
+extern unsigned char * skb_put(struct sk_buff *skb, int len);
+extern unsigned char * skb_push(struct sk_buff *skb, int len);
+extern unsigned char * skb_pull(struct sk_buff *skb, int len);
+extern int skb_headroom(struct sk_buff *skb);
+extern int skb_tailroom(struct sk_buff *skb);
+extern void skb_reserve(struct sk_buff *skb, int len);
+extern void skb_trim(struct sk_buff *skb, int len);
+
+extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
+{
+ return (list->next == (struct sk_buff *) list);
+}
+
+/*
+ * Peek an sk_buff. Unlike most other operations you _MUST_
+ * be careful with this one. A peek leaves the buffer on the
+ * list and someone else may run off with it. For an interrupt
+ * type system cli() peek the buffer copy the data and sti();
+ */
+extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
+{
+ struct sk_buff *list = ((struct sk_buff *)list_)->next;
+ if (list == (struct sk_buff *)list_)
+ list = NULL;
+ return list;
+}
+
+/*
+ * Return the length of an sk_buff queue
+ */
+
+extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
+{
+ return(list_->qlen);
+}
+
+#if CONFIG_SKB_CHECK
+extern int skb_check(struct sk_buff *skb,int,int, char *);
+#define IS_SKB(skb) skb_check((skb), 0, __LINE__,__FILE__)
+#define IS_SKB_HEAD(skb) skb_check((skb), 1, __LINE__,__FILE__)
+#else
+#define IS_SKB(skb)
+#define IS_SKB_HEAD(skb)
+
+extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
+{
+ list->prev = (struct sk_buff *)list;
+ list->next = (struct sk_buff *)list;
+ list->qlen = 0;
+}
+
+/*
+ * Insert an sk_buff at the start of a list.
+ *
+ * The "__skb_xxxx()" functions are the non-atomic ones that
+ * can only be called with interrupts disabled.
+ */
+
+extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ prev = (struct sk_buff *)list;
+ next = prev->next;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_head(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Insert an sk_buff at the end of a list.
+ */
+
+extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ struct sk_buff *prev, *next;
+
+ newsk->list = list;
+ list->qlen++;
+ next = (struct sk_buff *)list;
+ prev = next->prev;
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+}
+
+extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_queue_tail(list, newsk);
+ restore_flags(flags);
+}
+
+/*
+ * Remove an sk_buff from a list.
+ */
+
+extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
+{
+ struct sk_buff *next, *prev, *result;
+
+ prev = (struct sk_buff *) list;
+ next = prev->next;
+ result = NULL;
+ if (next != prev) {
+ result = next;
+ next = next->next;
+ list->qlen--;
+ next->prev = prev;
+ prev->next = next;
+ result->next = NULL;
+ result->prev = NULL;
+ result->list = NULL;
+ }
+ return result;
+}
+
+extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
+{
+ long flags;
+ struct sk_buff *result;
+
+ save_flags(flags);
+ cli();
+ result = __skb_dequeue(list);
+ restore_flags(flags);
+ return result;
+}
+
+/*
+ * Insert a packet on a list.
+ */
+
+extern __inline__ void __skb_insert(struct sk_buff *newsk,
+ struct sk_buff * prev, struct sk_buff *next,
+ struct sk_buff_head * list)
+{
+ newsk->next = next;
+ newsk->prev = prev;
+ next->prev = newsk;
+ prev->next = newsk;
+ newsk->list = list;
+ list->qlen++;
+}
+
+/*
+ * Place a packet before a given packet in a list
+ */
+extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old->prev, old, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * Place a packet after a given packet in a list.
+ */
+
+extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ __skb_insert(newsk, old, old->next, old->list);
+ restore_flags(flags);
+}
+
+/*
+ * remove sk_buff from list. _Must_ be called atomically, and with
+ * the list known..
+ */
+extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+{
+ struct sk_buff * next, * prev;
+
+ list->qlen--;
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = NULL;
+ skb->prev = NULL;
+ skb->list = NULL;
+ next->prev = prev;
+ prev->next = next;
+}
+
+/*
+ * Remove an sk_buff from its list. Works even without knowing the list it
+ * is sitting on, which can be handy at times. It also means that THE LIST
+ * MUST EXIST when you unlink. Thus a list must have its contents unlinked
+ * _FIRST_.
+ */
+
+extern __inline__ void skb_unlink(struct sk_buff *skb)
+{
+ unsigned long flags;
+
+ save_flags(flags);
+ cli();
+ if(skb->list)
+ __skb_unlink(skb, skb->list);
+ restore_flags(flags);
+}
+
+/*
+ * Add data to an sk_buff
+ */
+extern __inline__ unsigned char *skb_put(struct sk_buff *skb, int len)
+{
+ unsigned char *tmp=skb->tail;
+ skb->tail+=len;
+ skb->len+=len;
+ if(skb->tail>skb->end)
+ {
+ __label__ here;
+ panic("skput:over: %p:%d", &&here,len);
+here:
+ }
+ return tmp;
+}
+
+extern __inline__ unsigned char *skb_push(struct sk_buff *skb, int len)
+{
+ skb->data-=len;
+ skb->len+=len;
+ if(skb->data<skb->head)
+ {
+ __label__ here;
+ panic("skpush:under: %p:%d", &&here,len);
+here:
+ }
+ return skb->data;
+}
+
+extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, int len)
+{
+ if(len > skb->len)
+ return NULL;
+ skb->data+=len;
+ skb->len-=len;
+ return skb->data;
+}
+
+extern __inline__ int skb_headroom(struct sk_buff *skb)
+{
+ return skb->data-skb->head;
+}
+
+extern __inline__ int skb_tailroom(struct sk_buff *skb)
+{
+ return skb->end-skb->tail;
+}
+
+extern __inline__ void skb_reserve(struct sk_buff *skb, int len)
+{
+ skb->data+=len;
+ skb->tail+=len;
+}
+
+extern __inline__ void skb_trim(struct sk_buff *skb, int len)
+{
+ if(skb->len>len)
+ {
+ skb->len=len;
+ skb->tail=skb->data+len;
+ }
+}
+
+#endif
+
+extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
+extern int datagram_select(struct sock *sk, int sel_type, select_table *wait);
+extern void skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
+extern void skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
+extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_SKBUFF_H */
diff --git a/linux/dev/include/linux/types.h b/linux/dev/include/linux/types.h
new file mode 100644
index 0000000..ef0f2fc
--- /dev/null
+++ b/linux/dev/include/linux/types.h
@@ -0,0 +1,112 @@
+#ifndef _LINUX_TYPES_H
+#define _LINUX_TYPES_H
+
+#include <linux/posix_types.h>
+#include <asm/types.h>
+
+#ifndef __KERNEL_STRICT_NAMES
+
+typedef __kernel_fd_set fd_set;
+
+#ifndef MACH_INCLUDE
+typedef __kernel_dev_t dev_t;
+typedef __kernel_ino_t ino_t;
+typedef __kernel_mode_t mode_t;
+typedef __kernel_nlink_t nlink_t;
+#endif
+
+#ifdef MACH_INCLUDE
+#define off_t long
+#else
+typedef __kernel_off_t off_t;
+#endif
+
+typedef __kernel_pid_t pid_t;
+
+#ifdef MACH_INCLUDE
+#define uid_t unsigned short
+#define gid_t unsigned short
+#define daddr_t int
+#else
+typedef __kernel_uid_t uid_t;
+typedef __kernel_gid_t gid_t;
+typedef __kernel_daddr_t daddr_t;
+#endif
+
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+typedef __kernel_loff_t loff_t;
+#endif
+
+/*
+ * The following typedefs are also protected by individual ifdefs for
+ * historical reasons:
+ */
+#ifndef _SIZE_T
+#define _SIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_size_t size_t;
+#endif
+#endif
+
+#ifndef _SSIZE_T
+#define _SSIZE_T
+#ifndef MACH_INCLUDE
+typedef __kernel_ssize_t ssize_t;
+#endif
+#endif
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __kernel_ptrdiff_t ptrdiff_t;
+#endif
+
+#ifndef _TIME_T
+#define _TIME_T
+#ifdef MACH_INCLUDE
+#define time_t long
+#else
+typedef __kernel_time_t time_t;
+#endif
+#endif
+
+#ifndef _CLOCK_T
+#define _CLOCK_T
+typedef __kernel_clock_t clock_t;
+#endif
+
+#ifndef _CADDR_T
+#define _CADDR_T
+#ifndef MACH_INCLUDE
+typedef __kernel_caddr_t caddr_t;
+#endif
+#endif
+
+#ifndef MACH_INCLUDE
+/* bsd */
+typedef unsigned char u_char;
+typedef unsigned short u_short;
+typedef unsigned int u_int;
+typedef unsigned long u_long;
+#endif
+
+/* sysv */
+typedef unsigned char unchar;
+typedef unsigned short ushort;
+typedef unsigned int uint;
+typedef unsigned long ulong;
+
+#endif /* __KERNEL_STRICT_NAMES */
+
+/*
+ * Below are truly Linux-specific types that should never collide with
+ * any application/library that wants linux/types.h.
+ */
+
+struct ustat {
+ __kernel_daddr_t f_tfree;
+ __kernel_ino_t f_tinode;
+ char f_fname[6];
+ char f_fpack[6];
+};
+
+#endif /* _LINUX_TYPES_H */
diff --git a/linux/dev/lib/vsprintf.c b/linux/dev/lib/vsprintf.c
new file mode 100644
index 0000000..096e2ec
--- /dev/null
+++ b/linux/dev/lib/vsprintf.c
@@ -0,0 +1,350 @@
+/*
+ * linux/lib/vsprintf.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+/* vsprintf.c -- Lars Wirzenius & Linus Torvalds. */
+/*
+ * Wirzenius wrote this portably, Torvalds fucked it up :-)
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+
+unsigned long
+simple_strtoul (const char *cp, char **endp, unsigned int base)
+{
+ unsigned long result = 0, value;
+
+ if (!base)
+ {
+ base = 10;
+ if (*cp == '0')
+ {
+ base = 8;
+ cp++;
+ if ((*cp == 'x') && isxdigit (cp[1]))
+ {
+ cp++;
+ base = 16;
+ }
+ }
+ }
+ while (isxdigit (*cp)
+ && (value = isdigit (*cp) ? *cp - '0'
+ : (islower (*cp) ? toupper (*cp) : *cp) - 'A' + 10) < base)
+ {
+ result = result * base + value;
+ cp++;
+ }
+ if (endp)
+ *endp = (char *) cp;
+ return result;
+}
+
+/* we use this so that we can do without the ctype library */
+#define is_digit(c) ((c) >= '0' && (c) <= '9')
+
+static int
+skip_atoi (const char **s)
+{
+ int i = 0;
+
+ while (is_digit (**s))
+ i = i * 10 + *((*s)++) - '0';
+ return i;
+}
+
+#define ZEROPAD 1 /* pad with zero */
+#define SIGN 2 /* unsigned/signed long */
+#define PLUS 4 /* show plus */
+#define SPACE 8 /* space if plus */
+#define LEFT 16 /* left justified */
+#define SPECIAL 32 /* 0x */
+#define LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
+
+#define do_div(n,base) ({ \
+int __res; \
+__res = ((unsigned long) n) % (unsigned) base; \
+n = ((unsigned long) n) / (unsigned) base; \
+__res; })
+
+static char *
+number (char *str, long num, int base, int size, int precision, int type)
+{
+ char c, sign, tmp[66];
+ const char *digits = "0123456789abcdefghijklmnopqrstuvwxyz";
+ int i;
+
+ if (type & LARGE)
+ digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+ if (type & LEFT)
+ type &= ~ZEROPAD;
+ if (base < 2 || base > 36)
+ return 0;
+ c = (type & ZEROPAD) ? '0' : ' ';
+ sign = 0;
+ if (type & SIGN)
+ {
+ if (num < 0)
+ {
+ sign = '-';
+ num = -num;
+ size--;
+ }
+ else if (type & PLUS)
+ {
+ sign = '+';
+ size--;
+ }
+ else if (type & SPACE)
+ {
+ sign = ' ';
+ size--;
+ }
+ }
+ if (type & SPECIAL)
+ {
+ if (base == 16)
+ size -= 2;
+ else if (base == 8)
+ size--;
+ }
+ i = 0;
+ if (num == 0)
+ tmp[i++] = '0';
+ else
+ while (num != 0)
+ tmp[i++] = digits[do_div (num, base)];
+ if (i > precision)
+ precision = i;
+ size -= precision;
+ if (!(type & (ZEROPAD + LEFT)))
+ while (size-- > 0)
+ *str++ = ' ';
+ if (sign)
+ *str++ = sign;
+ if (type & SPECIAL)
+ if (base == 8)
+ *str++ = '0';
+ else if (base == 16)
+ {
+ *str++ = '0';
+ *str++ = digits[33];
+ }
+ if (!(type & LEFT))
+ while (size-- > 0)
+ *str++ = c;
+ while (i < precision--)
+ *str++ = '0';
+ while (i-- > 0)
+ *str++ = tmp[i];
+ while (size-- > 0)
+ *str++ = ' ';
+ return str;
+}
+
+int
+linux_vsprintf (char *buf, const char *fmt, va_list args)
+{
+ int len;
+ unsigned long num;
+ int i, base;
+ char *str;
+ const char *s;
+
+ int flags; /* flags to number() */
+
+ int field_width; /* width of output field */
+ int precision; /* min. # of digits for integers; max
+ * number of chars for from string
+ */
+ int qualifier; /* 'h', 'l', or 'L' for integer fields */
+
+ for (str = buf; *fmt; ++fmt)
+ {
+ if (*fmt != '%')
+ {
+ *str++ = *fmt;
+ continue;
+ }
+
+ /* process flags */
+ flags = 0;
+ repeat:
+ ++fmt; /* this also skips first '%' */
+ switch (*fmt)
+ {
+ case '-':
+ flags |= LEFT;
+ goto repeat;
+ case '+':
+ flags |= PLUS;
+ goto repeat;
+ case ' ':
+ flags |= SPACE;
+ goto repeat;
+ case '#':
+ flags |= SPECIAL;
+ goto repeat;
+ case '0':
+ flags |= ZEROPAD;
+ goto repeat;
+ }
+
+ /* get field width */
+ field_width = -1;
+ if (is_digit (*fmt))
+ field_width = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ field_width = va_arg (args, int);
+ if (field_width < 0)
+ {
+ field_width = -field_width;
+ flags |= LEFT;
+ }
+ }
+
+ /* get the precision */
+ precision = -1;
+ if (*fmt == '.')
+ {
+ ++fmt;
+ if (is_digit (*fmt))
+ precision = skip_atoi (&fmt);
+ else if (*fmt == '*')
+ {
+ ++fmt;
+ /* it's the next argument */
+ precision = va_arg (args, int);
+ }
+ if (precision < 0)
+ precision = 0;
+ }
+
+ /* get the conversion qualifier */
+ qualifier = -1;
+ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L')
+ {
+ qualifier = *fmt;
+ ++fmt;
+ }
+
+ /* default base */
+ base = 10;
+
+ switch (*fmt)
+ {
+ case 'c':
+ if (!(flags & LEFT))
+ while (--field_width > 0)
+ *str++ = ' ';
+ *str++ = (unsigned char) va_arg (args, int);
+ while (--field_width > 0)
+ *str++ = ' ';
+ continue;
+
+ case 's':
+ s = va_arg (args, char *);
+ if (!s)
+ s = "<NULL>";
+
+ len = strnlen (s, precision);
+
+ if (!(flags & LEFT))
+ while (len < field_width--)
+ *str++ = ' ';
+ for (i = 0; i < len; ++i)
+ *str++ = *s++;
+ while (len < field_width--)
+ *str++ = ' ';
+ continue;
+
+ case 'p':
+ if (field_width == -1)
+ {
+ field_width = 2 * sizeof (void *);
+ flags |= ZEROPAD;
+ }
+ str = number (str,
+ (unsigned long) va_arg (args, void *), 16,
+ field_width, precision, flags);
+ continue;
+
+
+ case 'n':
+ if (qualifier == 'l')
+ {
+ long *ip = va_arg (args, long *);
+ *ip = (str - buf);
+ }
+ else
+ {
+ int *ip = va_arg (args, int *);
+ *ip = (str - buf);
+ }
+ continue;
+
+ /* integer number formats - set up the flags and "break" */
+ case 'o':
+ base = 8;
+ break;
+
+ case 'X':
+ flags |= LARGE;
+ case 'x':
+ base = 16;
+ break;
+
+ case 'd':
+ case 'i':
+ flags |= SIGN;
+ case 'u':
+ break;
+
+ default:
+ if (*fmt != '%')
+ *str++ = '%';
+ if (*fmt)
+ *str++ = *fmt;
+ else
+ --fmt;
+ continue;
+ }
+ if (qualifier == 'l')
+ num = va_arg (args, unsigned long);
+ else if (qualifier == 'h')
+ if (flags & SIGN)
+ num = va_arg (args, short);
+ else
+ num = va_arg (args, unsigned short);
+ else if (flags & SIGN)
+ num = va_arg (args, int);
+ else
+ num = va_arg (args, unsigned int);
+ str = number (str, num, base, field_width, precision, flags);
+ }
+ *str = '\0';
+ return str - buf;
+}
+
+int
+linux_sprintf (char *buf, const char *fmt,...)
+{
+ va_list args;
+ int i;
+
+ va_start (args, fmt);
+ i = linux_vsprintf (buf, fmt, args);
+ va_end (args);
+ return i;
+}
diff --git a/linux/dev/net/core/dev.c b/linux/dev/net/core/dev.c
new file mode 100644
index 0000000..efe0246
--- /dev/null
+++ b/linux/dev/net/core/dev.c
@@ -0,0 +1,1620 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dhinds@allegro.stanford.edu>
+ *
+ * Changes:
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant stunts to
+ * keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before calling netif_rx. Saves
+ * a function call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close changes.
+ * Rudi Cilibrasi : Pass the right thing to set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to make it work out
+ * on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Lawrence V. Stefani : Changed set MTU ioctl to not assume
+ * min MTU of 68 bytes for devices
+ * that have change MTU functions.
+ *
+ */
+
+#include <asm/segment.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+#include <net/slhc.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <net/br.h>
+#ifdef CONFIG_NET_ALIAS
+#include <linux/net_alias.h>
+#endif
+#ifdef CONFIG_KERNELD
+#include <linux/kerneld.h>
+#endif
+#ifdef CONFIG_NET_RADIO
+#include <linux/wireless.h>
+#endif /* CONFIG_NET_RADIO */
+
+#ifndef MACH
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ */
+
+struct packet_type *ptype_base[16];
+struct packet_type *ptype_all = NULL; /* Taps */
+
+/*
+ * Device list lock
+ */
+
+int dev_lockct=0;
+
+/*
+ * Our notifier list
+ */
+
+struct notifier_block *netdev_chain=NULL;
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the bottom half handler.
+ */
+
+static struct sk_buff_head backlog;
+
+/*
+ * We don't overdo the queue or we will thrash memory badly.
+ */
+
+static int backlog_size = 0;
+
+/*
+ * Return the lesser of the two values.
+ */
+
+static __inline__ unsigned long min(unsigned long a, unsigned long b)
+{
+ return (a < b)? a : b;
+}
+
+
+/******************************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************************/
+
+/*
+ * For efficiency
+ */
+
+static int dev_nit=0;
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit++;
+ pt->next=ptype_all;
+ ptype_all=pt;
+ }
+ else
+ {
+ hash=ntohs(pt->type)&15;
+ pt->next = ptype_base[hash];
+ ptype_base[hash] = pt;
+ }
+}
+
+
+/*
+ * Remove a protocol ID from the list.
+ */
+
+void dev_remove_pack(struct packet_type *pt)
+{
+ struct packet_type **pt1;
+ if(pt->type==htons(ETH_P_ALL))
+ {
+ dev_nit--;
+ pt1=&ptype_all;
+ }
+ else
+ pt1=&ptype_base[ntohs(pt->type)&15];
+ for(; (*pt1)!=NULL; pt1=&((*pt1)->next))
+ {
+ if(pt==(*pt1))
+ {
+ *pt1=pt->next;
+ return;
+ }
+ }
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+}
+
+/*****************************************************************************************
+
+ Device Interface Subroutines
+
+******************************************************************************************/
+
+/*
+ * Find an interface by name.
+ */
+
+struct device *dev_get(const char *name)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (strcmp(dev->name, name) == 0)
+ return(dev);
+ }
+ return NULL;
+}
+
+/*
+ * Find and possibly load an interface.
+ */
+
+#ifdef CONFIG_KERNELD
+
+extern __inline__ void dev_load(const char *name)
+{
+ if(!dev_get(name) && suser()) {
+#ifdef CONFIG_NET_ALIAS
+ const char *sptr;
+
+ for (sptr=name ; *sptr ; sptr++) if(*sptr==':') break;
+ if (!(*sptr && *(sptr+1)))
+#endif
+ request_module(name);
+ }
+}
+
+#endif
+
+/*
+ * Prepare an interface for use.
+ */
+
+int dev_open(struct device *dev)
+{
+ int ret = -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ if (dev->open)
+ ret = dev->open(dev);
+
+ /*
+ * If it went open OK then set the flags
+ */
+
+ if (ret == 0)
+ {
+ dev->flags |= (IFF_UP | IFF_RUNNING);
+ /*
+ * Initialise multicasting status
+ */
+ dev_mc_upload(dev);
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ }
+ return(ret);
+}
+
+
+/*
+ * Completely shutdown an interface.
+ */
+
+int dev_close(struct device *dev)
+{
+ int ct=0;
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ */
+
+ if ((dev->flags & IFF_UP) && dev->stop)
+ dev->stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags&=~(IFF_UP|IFF_RUNNING);
+
+ /*
+ * Tell people we are going down
+ */
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+ /*
+ * Flush the multicast chain
+ */
+ dev_mc_discard(dev);
+
+ /*
+ * Purge any queued packets when we down the link
+ */
+ while(ct<DEV_NUMBUFFS)
+ {
+ struct sk_buff *skb;
+ while((skb=skb_dequeue(&dev->buffs[ct]))!=NULL)
+ if(skb->free)
+ kfree_skb(skb,FREE_WRITE);
+ ct++;
+ }
+ return(0);
+}
+
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&netdev_chain, nb);
+}
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&netdev_chain,nb);
+}
+
+/*
+ * Send (or queue for sending) a packet.
+ *
+ * IMPORTANT: When this is called to resend frames. The caller MUST
+ * already have locked the sk_buff. Apart from that we do the
+ * rest of the magic.
+ */
+
+static void do_dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ unsigned long flags;
+ struct sk_buff_head *list;
+ int retransmission = 0; /* used to say if the packet should go */
+ /* at the front or the back of the */
+ /* queue - front is a retransmit try */
+
+ if(pri>=0 && !skb_device_locked(skb))
+ skb_device_lock(skb); /* Shove a lock on the frame */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb->dev = dev;
+
+ /*
+ * Negative priority is used to flag a frame that is being pulled from the
+ * queue front as a retransmit attempt. It therefore goes back on the queue
+ * start on a failure.
+ */
+
+ if (pri < 0)
+ {
+ pri = -pri-1;
+ retransmission = 1;
+ }
+
+#ifdef CONFIG_NET_DEBUG
+ if (pri >= DEV_NUMBUFFS)
+ {
+ printk(KERN_WARNING "bad priority in dev_queue_xmit.\n");
+ pri = 1;
+ }
+#endif
+
+ /*
+ * If the address has not been resolved. Call the device header rebuilder.
+ * This can cover all protocols and technically not just ARP either.
+ */
+
+ if (!skb->arp && dev->rebuild_header(skb->data, dev, skb->raddr, skb)) {
+ return;
+ }
+
+ /*
+ *
+ * If dev is an alias, switch to its main device.
+ * "arp" resolution has been made with alias device, so
+ * arp entries refer to alias, not main.
+ *
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ skb->dev = dev = net_alias_dev_tx(dev);
+#endif
+
+ /*
+ * If we are bridging and this is directly generated output
+ * pass the frame via the bridge.
+ */
+
+#ifdef CONFIG_BRIDGE
+ if(skb->pkt_bridged!=IS_BRIDGED && br_stats.flags & BR_UP)
+ {
+ if(br_tx_frame(skb))
+ return;
+ }
+#endif
+
+ list = dev->buffs + pri;
+
+ save_flags(flags);
+ /* if this isn't a retransmission, use the first packet instead... */
+ if (!retransmission) {
+ if (skb_queue_len(list)) {
+ /* avoid overrunning the device queue.. */
+ if (skb_queue_len(list) > dev->tx_queue_len) {
+ dev_kfree_skb(skb, FREE_WRITE);
+ return;
+ }
+ }
+
+ /* copy outgoing packets to any sniffer packet handlers */
+ if (dev_nit) {
+ struct packet_type *ptype;
+ skb->stamp=xtime;
+ for (ptype = ptype_all; ptype!=NULL; ptype = ptype->next)
+ {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ ((struct sock *)ptype->data != skb->sk))
+ {
+ struct sk_buff *skb2;
+ if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL)
+ break;
+ /* FIXME?: Wrong when the hard_header_len
+ * is an upper bound. Is this even
+ * used anywhere?
+ */
+ skb2->h.raw = skb2->data + dev->hard_header_len;
+ /* On soft header devices we
+ * yank the header before mac.raw
+ * back off. This is set by
+ * dev->hard_header().
+ */
+ if (dev->flags&IFF_SOFTHEADERS)
+ skb_pull(skb2,skb2->mac.raw-skb2->data);
+ skb2->mac.raw = skb2->data;
+ ptype->func(skb2, skb->dev, ptype);
+ }
+ }
+ }
+
+ if (skb_queue_len(list)) {
+ cli();
+ skb_device_unlock(skb); /* Buffer is on the device queue and can be freed safely */
+ __skb_queue_tail(list, skb);
+ skb = __skb_dequeue(list);
+ skb_device_lock(skb); /* New buffer needs locking down */
+ restore_flags(flags);
+ }
+ }
+ if (dev->hard_start_xmit(skb, dev) == 0) {
+ /*
+ * Packet is now solely the responsibility of the driver
+ */
+ return;
+ }
+
+ /*
+ * Transmission failed, put skb back into a list. Once on the list it's safe and
+ * no longer device locked (it can be freed safely from the device queue)
+ */
+ cli();
+ skb_device_unlock(skb);
+ __skb_queue_head(list,skb);
+ restore_flags(flags);
+}
+
+void dev_queue_xmit(struct sk_buff *skb, struct device *dev, int pri)
+{
+ start_bh_atomic();
+ do_dev_queue_xmit(skb, dev, pri);
+ end_bh_atomic();
+}
+
+/*
+ * Receive a packet from a device driver and queue it for the upper
+ * (protocol) levels. It always succeeds. This is the recommended
+ * interface to use.
+ */
+
+void netif_rx(struct sk_buff *skb)
+{
+ static int dropping = 0;
+
+ /*
+ * Any received buffers are un-owned and should be discarded
+ * when freed. These will be updated later as the frames get
+ * owners.
+ */
+
+ skb->sk = NULL;
+ skb->free = 1;
+ if(skb->stamp.tv_sec==0)
+ skb->stamp = xtime;
+
+ /*
+ * Check that we aren't overdoing things.
+ */
+
+ if (!backlog_size)
+ dropping = 0;
+ else if (backlog_size > 300)
+ dropping = 1;
+
+ if (dropping)
+ {
+ kfree_skb(skb, FREE_READ);
+ return;
+ }
+
+ /*
+ * Add it to the "backlog" queue.
+ */
+#if CONFIG_SKB_CHECK
+ IS_SKB(skb);
+#endif
+ skb_queue_tail(&backlog,skb);
+ backlog_size++;
+
+ /*
+ * If any packet arrived, mark it for processing after the
+ * hardware interrupt returns.
+ */
+
+ mark_bh(NET_BH);
+ return;
+}
+
+/*
+ * This routine causes all interfaces to try to send some data.
+ */
+
+static void dev_transmit(void)
+{
+ struct device *dev;
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if (dev->flags != 0 && !dev->tbusy) {
+ /*
+ * Kick the device
+ */
+ dev_tint(dev);
+ }
+ }
+}
+
+
+/**********************************************************************************
+
+ Receive Queue Processor
+
+***********************************************************************************/
+
+/*
+ * When we are called the queue is ready to grab, the interrupts are
+ * on and hardware can interrupt and queue to the receive queue as we
+ * run with no problems.
+ * This is run as a bottom half after an interrupt handler that does
+ * mark_bh(NET_BH);
+ */
+
+void net_bh(void)
+{
+ struct packet_type *ptype;
+ struct packet_type *pt_prev;
+ unsigned short type;
+
+ /*
+ * Can we send anything now? We want to clear the
+ * decks for any more sends that get done as we
+ * process the input. This also minimises the
+ * latency on a transmit interrupt bh.
+ */
+
+ dev_transmit();
+
+ /*
+ * Any data left to process. This may occur because a
+ * mark_bh() is done after we empty the queue including
+ * that from the device which does a mark_bh() just after
+ */
+
+ /*
+ * While the queue is not empty..
+ *
+ * Note that the queue never shrinks due to
+ * an interrupt, so we can do this test without
+ * disabling interrupts.
+ */
+
+ while (!skb_queue_empty(&backlog)) {
+ struct sk_buff * skb = backlog.next;
+
+ /*
+ * We have a packet. Therefore the queue has shrunk
+ */
+ cli();
+ __skb_unlink(skb, &backlog);
+ backlog_size--;
+ sti();
+
+
+#ifdef CONFIG_BRIDGE
+
+ /*
+ * If we are bridging then pass the frame up to the
+ * bridging code. If it is bridged then move on
+ */
+
+ if (br_stats.flags & BR_UP)
+ {
+ /*
+ * We pass the bridge a complete frame. This means
+ * recovering the MAC header first.
+ */
+
+ int offset=skb->data-skb->mac.raw;
+ cli();
+ skb_push(skb,offset); /* Put header back on for bridge */
+ if(br_receive_frame(skb))
+ {
+ sti();
+ continue;
+ }
+ /*
+ * Pull the MAC header off for the copy going to
+ * the upper layers.
+ */
+ skb_pull(skb,offset);
+ sti();
+ }
+#endif
+
+ /*
+ * Bump the pointer to the next structure.
+ *
+ * On entry to the protocol layer. skb->data and
+ * skb->h.raw point to the MAC and encapsulated data
+ */
+
+ skb->h.raw = skb->data;
+
+ /*
+ * Fetch the packet protocol ID.
+ */
+
+ type = skb->protocol;
+
+ /*
+ * We got a packet ID. Now loop over the "known protocols"
+ * list. There are two lists. The ptype_all list of taps (normally empty)
+ * and the main protocol list which is hashed perfectly for normal protocols.
+ */
+
+ pt_prev = NULL;
+ for (ptype = ptype_all; ptype!=NULL; ptype=ptype->next)
+ {
+ if(!ptype->dev || ptype->dev == skb->dev) {
+ if(pt_prev) {
+ struct sk_buff *skb2=skb_clone(skb, GFP_ATOMIC);
+ if(skb2)
+ pt_prev->func(skb2,skb->dev, pt_prev);
+ }
+ pt_prev=ptype;
+ }
+ }
+
+ for (ptype = ptype_base[ntohs(type)&15]; ptype != NULL; ptype = ptype->next)
+ {
+ if (ptype->type == type && (!ptype->dev || ptype->dev==skb->dev))
+ {
+ /*
+ * We already have a match queued. Deliver
+ * to it and then remember the new match
+ */
+ if(pt_prev)
+ {
+ struct sk_buff *skb2;
+
+ skb2=skb_clone(skb, GFP_ATOMIC);
+
+ /*
+ * Kick the protocol handler. This should be fast
+ * and efficient code.
+ */
+
+ if(skb2)
+ pt_prev->func(skb2, skb->dev, pt_prev);
+ }
+ /* Remember the current last to do */
+ pt_prev=ptype;
+ }
+ } /* End of protocol list loop */
+
+ /*
+ * Is there a last item to send to ?
+ */
+
+ if(pt_prev)
+ pt_prev->func(skb, skb->dev, pt_prev);
+ /*
+ * Has an unknown packet has been received ?
+ */
+
+ else
+ kfree_skb(skb, FREE_WRITE);
+ /*
+ * Again, see if we can transmit anything now.
+ * [Ought to take this out judging by tests it slows
+ * us down not speeds us up]
+ */
+#ifdef XMIT_EVERY
+ dev_transmit();
+#endif
+ } /* End of queue loop */
+
+ /*
+ * We have emptied the queue
+ */
+
+ /*
+ * One last output flush.
+ */
+
+#ifdef XMIT_AFTER
+ dev_transmit();
+#endif
+}
+
+
+/*
+ * This routine is called when an device driver (i.e. an
+ * interface) is ready to transmit a packet.
+ */
+
+void dev_tint(struct device *dev)
+{
+ int i;
+ unsigned long flags;
+ struct sk_buff_head * head;
+
+ /*
+ * aliases do not transmit (for now :) )
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev)) return;
+#endif
+ head = dev->buffs;
+ save_flags(flags);
+ cli();
+
+ /*
+ * Work the queues in priority order
+ */
+ for(i = 0;i < DEV_NUMBUFFS; i++,head++)
+ {
+
+ while (!skb_queue_empty(head)) {
+ struct sk_buff *skb;
+
+ skb = head->next;
+ __skb_unlink(skb, head);
+ /*
+ * Stop anyone freeing the buffer while we retransmit it
+ */
+ skb_device_lock(skb);
+ restore_flags(flags);
+ /*
+ * Feed them to the output stage and if it fails
+ * indicate they re-queue at the front.
+ */
+ do_dev_queue_xmit(skb,dev,-i - 1);
+ /*
+ * If we can take no more then stop here.
+ */
+ if (dev->tbusy)
+ return;
+ cli();
+ }
+ }
+ restore_flags(flags);
+}
+
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size shortly, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(char *arg)
+{
+ struct ifconf ifc;
+ struct ifreq ifr;
+ struct device *dev;
+ char *pos;
+ int len;
+ int err;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifconf));
+ if(err)
+ return err;
+ memcpy_fromfs(&ifc, arg, sizeof(struct ifconf));
+ len = ifc.ifc_len;
+ pos = ifc.ifc_buf;
+
+ /*
+ * We now walk the device list filling each active device
+ * into the array.
+ */
+
+ err=verify_area(VERIFY_WRITE,pos,len);
+ if(err)
+ return err;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ if(!(dev->flags & IFF_UP)) /* Downed devices don't count */
+ continue;
+ /*
+ * Have we run out of space here ?
+ */
+
+ if (len < sizeof(struct ifreq))
+ break;
+
+ memset(&ifr, 0, sizeof(struct ifreq));
+ strcpy(ifr.ifr_name, dev->name);
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *) &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+
+
+ /*
+ * Write this block to the caller's space.
+ */
+
+ memcpy_tofs(pos, &ifr, sizeof(struct ifreq));
+ pos += sizeof(struct ifreq);
+ len -= sizeof(struct ifreq);
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+
+ ifc.ifc_len = (pos - ifc.ifc_buf);
+ ifc.ifc_req = (struct ifreq *) ifc.ifc_buf;
+ memcpy_tofs(arg, &ifc, sizeof(struct ifconf));
+
+ /*
+ * Report how much was filled in
+ */
+
+ return(pos - arg);
+}
+
+
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+
+#ifdef CONFIG_PROC_FS
+static int sprintf_stats(char *buffer, struct device *dev)
+{
+ struct enet_statistics *stats = (dev->get_stats ? dev->get_stats(dev): NULL);
+ int size;
+
+ if (stats)
+ size = sprintf(buffer, "%6s:%7d %4d %4d %4d %4d %8d %4d %4d %4d %5d %4d\n",
+ dev->name,
+ stats->rx_packets, stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors
+ + stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors + stats->tx_aborted_errors
+ + stats->tx_window_errors + stats->tx_heartbeat_errors);
+ else
+ size = sprintf(buffer, "%6s: No statistics available.\n", dev->name);
+
+ return size;
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized /proc/net interface
+ * to create /proc/net/dev
+ */
+
+int dev_get_info(char *buffer, char **start, off_t offset, int length, int dummy)
+{
+ int len=0;
+ off_t begin=0;
+ off_t pos=0;
+ int size;
+
+ struct device *dev;
+
+
+ size = sprintf(buffer, "Inter-| Receive | Transmit\n"
+ " face |packets errs drop fifo frame|packets errs drop fifo colls carrier\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for (dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos<offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos>offset+length)
+ break;
+ }
+
+ *start=buffer+(offset-begin); /* Start of wanted data */
+ len-=(offset-begin); /* Start slop */
+ if(len>length)
+ len=length; /* Ending slop */
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Print one entry of /proc/net/wireless
+ * This is a clone of /proc/net/dev (just above)
+ */
+static int
+sprintf_wireless_stats(char * buffer,
+ struct device * dev)
+{
+ /* Get stats from the driver */
+ struct iw_statistics *stats = (dev->get_wireless_stats ?
+ dev->get_wireless_stats(dev) :
+ (struct iw_statistics *) NULL);
+ int size;
+
+ if(stats != (struct iw_statistics *) NULL)
+ size = sprintf(buffer,
+ "%6s: %02x %3d%c %3d%c %3d%c %5d %5d %5d\n",
+ dev->name,
+ stats->status,
+ stats->qual.qual,
+ stats->qual.updated & 1 ? '.' : ' ',
+ stats->qual.level,
+ stats->qual.updated & 2 ? '.' : ' ',
+ stats->qual.noise,
+ stats->qual.updated & 3 ? '.' : ' ',
+ stats->discard.nwid,
+ stats->discard.code,
+ stats->discard.misc);
+ else
+ size = 0;
+
+ return size;
+}
+
+/*
+ * Print info for /proc/net/wireless (print all entries)
+ * This is a clone of /proc/net/dev (just above)
+ */
+int
+dev_get_wireless_info(char * buffer,
+ char ** start,
+ off_t offset,
+ int length,
+ int dummy)
+{
+ int len = 0;
+ off_t begin = 0;
+ off_t pos = 0;
+ int size;
+
+ struct device * dev;
+
+ size = sprintf(buffer,
+ "Inter-|sta| Quality | Discarded packets\n"
+ " face |tus|link level noise| nwid crypt misc\n");
+
+ pos+=size;
+ len+=size;
+
+
+ for(dev = dev_base; dev != NULL; dev = dev->next)
+ {
+ size = sprintf_wireless_stats(buffer+len, dev);
+ len+=size;
+ pos=begin+len;
+
+ if(pos < offset)
+ {
+ len=0;
+ begin=pos;
+ }
+ if(pos > offset + length)
+ break;
+ }
+
+ *start = buffer + (offset - begin); /* Start of wanted data */
+ len -= (offset - begin); /* Start slop */
+ if(len > length)
+ len = length; /* Ending slop */
+
+ return len;
+}
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+
+/*
+ * This checks bitmasks for the ioctl calls for devices.
+ */
+
+static inline int bad_mask(unsigned long mask, unsigned long addr)
+{
+ if (addr & (mask = ~mask))
+ return 1;
+ mask = ntohl(mask);
+ if (mask & (mask+1))
+ return 1;
+ return 0;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls.
+ *
+ * The socket layer has seen an ioctl the address family thinks is
+ * for the device. At this point we get invoked to make a decision
+ */
+
+static int dev_ifsioc(void *arg, unsigned int getset)
+{
+ struct ifreq ifr;
+ struct device *dev;
+ int ret;
+
+ /*
+ * Fetch the caller's info block into kernel space
+ */
+
+ int err=verify_area(VERIFY_WRITE, arg, sizeof(struct ifreq));
+ if(err)
+ return err;
+
+ memcpy_fromfs(&ifr, arg, sizeof(struct ifreq));
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ /*
+ *
+ * net_alias_dev_get(): dev_get() with added alias naming magic.
+ * only allow alias creation/deletion if (getset==SIOCSIFADDR)
+ *
+ */
+
+#ifdef CONFIG_KERNELD
+ dev_load(ifr.ifr_name);
+#endif
+
+#ifdef CONFIG_NET_ALIAS
+ if ((dev = net_alias_dev_get(ifr.ifr_name, getset == SIOCSIFADDR, &err, NULL, NULL)) == NULL)
+ return(err);
+#else
+ if ((dev = dev_get(ifr.ifr_name)) == NULL)
+ return(-ENODEV);
+#endif
+ switch(getset)
+ {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr.ifr_flags = (dev->flags & ~IFF_SOFTHEADERS);
+ goto rarok;
+
+ case SIOCSIFFLAGS: /* Set interface flags */
+ {
+ int old_flags = dev->flags;
+
+ if(securelevel>0)
+ ifr.ifr_flags&=~IFF_PROMISC;
+ /*
+ * We are not allowed to potentially close/unload
+ * a device until we get this lock.
+ */
+
+ dev_lock_wait();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (ifr.ifr_flags & (
+ IFF_BROADCAST | IFF_DEBUG | IFF_LOOPBACK |
+ IFF_POINTOPOINT | IFF_NOTRAILERS | IFF_RUNNING |
+ IFF_NOARP | IFF_PROMISC | IFF_ALLMULTI | IFF_SLAVE | IFF_MASTER
+ | IFF_MULTICAST)) | (dev->flags & (IFF_SOFTHEADERS|IFF_UP));
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ if ((old_flags^ifr.ifr_flags)&IFF_UP) /* Bit is different ? */
+ {
+ if(old_flags&IFF_UP) /* Gone down */
+ ret=dev_close(dev);
+ else /* Come up */
+ {
+ ret=dev_open(dev);
+ if(ret<0)
+ dev->flags&=~IFF_UP; /* Open failed */
+ }
+ }
+ else
+ ret=0;
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ dev_mc_upload(dev);
+ }
+ break;
+
+ case SIOCGIFADDR: /* Get interface address (and family) */
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+ }
+ else
+ {
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr = dev->pa_addr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_port = 0;
+ }
+ goto rarok;
+
+ case SIOCSIFADDR: /* Set interface address (and family) */
+
+ /*
+ * BSDism. SIOCSIFADDR family=AF_UNSPEC sets the
+ * physical address. We can cope with this now.
+ */
+
+ if(ifr.ifr_addr.sa_family==AF_UNSPEC)
+ {
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel>0)
+ return -EPERM;
+ ret=dev->set_mac_address(dev,&ifr.ifr_addr);
+ }
+ else
+ {
+ u32 new_pa_addr = (*(struct sockaddr_in *)
+ &ifr.ifr_addr).sin_addr.s_addr;
+ u16 new_family = ifr.ifr_addr.sa_family;
+
+ if (new_family == dev->family &&
+ new_pa_addr == dev->pa_addr) {
+ ret =0;
+ break;
+ }
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_DOWN, dev);
+
+ /*
+ * if dev is an alias, must rehash to update
+ * address change
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ if (net_alias_is(dev))
+ net_alias_dev_rehash(dev ,&ifr.ifr_addr);
+#endif
+ dev->pa_addr = new_pa_addr;
+ dev->family = new_family;
+
+#ifdef CONFIG_INET
+ /* This is naughty. When net-032e comes out It wants moving into the net032
+ code not the kernel. Till then it can sit here (SIGH) */
+ if (!dev->pa_mask)
+ dev->pa_mask = ip_get_mask(dev->pa_addr);
+#endif
+ if (!dev->pa_brdaddr)
+ dev->pa_brdaddr = dev->pa_addr | ~dev->pa_mask;
+ if (dev->flags & IFF_UP)
+ notifier_call_chain(&netdev_chain, NETDEV_UP, dev);
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFBRDADDR: /* Get the broadcast address */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr = dev->pa_brdaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFBRDADDR: /* Set the broadcast address */
+ dev->pa_brdaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_broadaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFDSTADDR: /* Get the destination address (for point-to-point links) */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr = dev->pa_dstaddr;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFDSTADDR: /* Set the destination address (for point-to-point links) */
+ dev->pa_dstaddr = (*(struct sockaddr_in *)
+ &ifr.ifr_dstaddr).sin_addr.s_addr;
+ ret = 0;
+ break;
+
+ case SIOCGIFNETMASK: /* Get the netmask for the interface */
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr = dev->pa_mask;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_family = dev->family;
+ (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_port = 0;
+ goto rarok;
+
+ case SIOCSIFNETMASK: /* Set the netmask for the interface */
+ {
+ unsigned long mask = (*(struct sockaddr_in *)
+ &ifr.ifr_netmask).sin_addr.s_addr;
+ ret = -EINVAL;
+ /*
+ * The mask we set must be legal.
+ */
+ if (bad_mask(mask,0))
+ break;
+ dev->pa_mask = mask;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface (currently unused) */
+
+ ifr.ifr_metric = dev->metric;
+ goto rarok;
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface (currently unused) */
+ dev->metric = ifr.ifr_metric;
+ ret=0;
+ break;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr.ifr_mtu = dev->mtu;
+ goto rarok;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+
+ if (dev->change_mtu)
+ ret = dev->change_mtu(dev, ifr.ifr_mtu);
+ else
+ {
+ /*
+ * MTU must be positive.
+ */
+
+ if(ifr.ifr_mtu<68)
+ return -EINVAL;
+
+ dev->mtu = ifr.ifr_mtu;
+ ret = 0;
+ }
+ break;
+
+ case SIOCGIFMEM: /* Get the per device memory space. We can add this but currently
+ do not support it */
+ ret = -EINVAL;
+ break;
+
+ case SIOCSIFMEM: /* Set the per device memory buffer space. Not applicable in our case */
+ ret = -EINVAL;
+ break;
+
+ case SIOCGIFHWADDR:
+ memcpy(ifr.ifr_hwaddr.sa_data,dev->dev_addr, MAX_ADDR_LEN);
+ ifr.ifr_hwaddr.sa_family=dev->type;
+ goto rarok;
+
+ case SIOCSIFHWADDR:
+ if(dev->set_mac_address==NULL)
+ return -EOPNOTSUPP;
+ if(securelevel > 0)
+ return -EPERM;
+ if(ifr.ifr_hwaddr.sa_family!=dev->type)
+ return -EINVAL;
+ ret=dev->set_mac_address(dev,&ifr.ifr_hwaddr);
+ break;
+
+ case SIOCGIFMAP:
+ ifr.ifr_map.mem_start=dev->mem_start;
+ ifr.ifr_map.mem_end=dev->mem_end;
+ ifr.ifr_map.base_addr=dev->base_addr;
+ ifr.ifr_map.irq=dev->irq;
+ ifr.ifr_map.dma=dev->dma;
+ ifr.ifr_map.port=dev->if_port;
+ goto rarok;
+
+ case SIOCSIFMAP:
+ if(dev->set_config==NULL)
+ return -EOPNOTSUPP;
+ return dev->set_config(dev,&ifr.ifr_map);
+
+ case SIOCADDMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_add(dev,ifr.ifr_hwaddr.sa_data, dev->addr_len, 1);
+ return 0;
+
+ case SIOCDELMULTI:
+ if(dev->set_multicast_list==NULL)
+ return -EINVAL;
+ if(ifr.ifr_hwaddr.sa_family!=AF_UNSPEC)
+ return -EINVAL;
+ dev_mc_delete(dev,ifr.ifr_hwaddr.sa_data,dev->addr_len, 1);
+ return 0;
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if((getset >= SIOCDEVPRIVATE) &&
+ (getset <= (SIOCDEVPRIVATE + 15))) {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ memcpy_tofs(arg,&ifr,sizeof(struct ifreq));
+ break;
+ }
+
+#ifdef CONFIG_NET_RADIO
+ if((getset >= SIOCIWFIRST) &&
+ (getset <= SIOCIWLAST))
+ {
+ if(dev->do_ioctl==NULL)
+ return -EOPNOTSUPP;
+ /* Perform the ioctl */
+ ret=dev->do_ioctl(dev, &ifr, getset);
+ /* If return args... */
+ if(IW_IS_GET(getset))
+ memcpy_tofs(arg, &ifr,
+ sizeof(struct ifreq));
+ break;
+ }
+#endif /* CONFIG_NET_RADIO */
+
+ ret = -EINVAL;
+ }
+ return(ret);
+/*
+ * The load of calls that return an ifreq and ok (saves memory).
+ */
+rarok:
+ memcpy_tofs(arg, &ifr, sizeof(struct ifreq));
+ return 0;
+}
+
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+int dev_ioctl(unsigned int cmd, void *arg)
+{
+ switch(cmd)
+ {
+ case SIOCGIFCONF:
+ (void) dev_ifconf((char *) arg);
+ return 0;
+
+ /*
+ * Ioctl calls that can be done by all.
+ */
+
+ case SIOCGIFFLAGS:
+ case SIOCGIFADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ return dev_ifsioc(arg, cmd);
+
+ /*
+ * Ioctl calls requiring the power of a superuser
+ */
+
+ case SIOCSIFFLAGS:
+ case SIOCSIFADDR:
+ case SIOCSIFDSTADDR:
+ case SIOCSIFBRDADDR:
+ case SIOCSIFNETMASK:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMEM:
+ case SIOCSIFHWADDR:
+ case SIOCSIFMAP:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ if (!suser())
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+
+ default:
+ if((cmd >= SIOCDEVPRIVATE) &&
+ (cmd <= (SIOCDEVPRIVATE + 15))) {
+ return dev_ifsioc(arg, cmd);
+ }
+#ifdef CONFIG_NET_RADIO
+ if((cmd >= SIOCIWFIRST) &&
+ (cmd <= SIOCIWLAST))
+ {
+ if((IW_IS_SET(cmd)) && (!suser()))
+ return -EPERM;
+ return dev_ifsioc(arg, cmd);
+ }
+#endif /* CONFIG_NET_RADIO */
+ return -EINVAL;
+ }
+}
+#endif /* !MACH */
+
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+extern int lance_init(void);
+extern int pi_init(void);
+extern void sdla_setup(void);
+extern int dlci_setup(void);
+
+int net_dev_init(void)
+{
+ struct device *dev, **dp;
+
+ /*
+ * Initialise the packet receive queue.
+ */
+
+#ifndef MACH
+ skb_queue_head_init(&backlog);
+#endif
+
+ /*
+ * The bridge has to be up before the devices
+ */
+
+#ifdef CONFIG_BRIDGE
+ br_init();
+#endif
+
+ /*
+ * This is Very Ugly(tm).
+ *
+ * Some devices want to be initialized early..
+ */
+#if defined(CONFIG_PI)
+ pi_init();
+#endif
+#if defined(CONFIG_PT)
+ pt_init();
+#endif
+#if defined(CONFIG_DLCI)
+ dlci_setup();
+#endif
+#if defined(CONFIG_SDLA)
+ sdla_setup();
+#endif
+ /*
+ * SLHC if present needs attaching so other people see it
+ * even if not opened.
+ */
+#if (defined(CONFIG_SLIP) && defined(CONFIG_SLIP_COMPRESSED)) \
+ || defined(CONFIG_PPP) \
+ || (defined(CONFIG_ISDN) && defined(CONFIG_ISDN_PPP))
+ slhc_install();
+#endif
+
+ /*
+ * Add the devices.
+ * If the call to dev->init fails, the dev is removed
+ * from the chain disconnecting the device until the
+ * next reboot.
+ */
+
+ dp = &dev_base;
+ while ((dev = *dp) != NULL)
+ {
+ int i;
+ for (i = 0; i < DEV_NUMBUFFS; i++) {
+ skb_queue_head_init(dev->buffs + i);
+ }
+
+ if (dev->init && dev->init(dev))
+ {
+ /*
+ * It failed to come up. Unhook it.
+ */
+ *dp = dev->next;
+ }
+ else
+ {
+ dp = &dev->next;
+ }
+ }
+
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_DEV, 3, "dev",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_info
+ });
+#endif
+
+#ifdef CONFIG_NET_RADIO
+#ifdef CONFIG_PROC_FS
+ proc_net_register(&(struct proc_dir_entry) {
+ PROC_NET_WIRELESS, 8, "wireless",
+ S_IFREG | S_IRUGO, 1, 0, 0,
+ 0, &proc_net_inode_operations,
+ dev_get_wireless_info
+ });
+#endif /* CONFIG_PROC_FS */
+#endif /* CONFIG_NET_RADIO */
+
+ /*
+ * Initialise net_alias engine
+ *
+ * - register net_alias device notifier
+ * - register proc entries: /proc/net/alias_types
+ * /proc/net/aliases
+ */
+
+#ifdef CONFIG_NET_ALIAS
+ net_alias_init();
+#endif
+
+ init_bh(NET_BH, net_bh);
+ return 0;
+}