summaryrefslogtreecommitdiff
path: root/debian
diff options
context:
space:
mode:
authorGuillem Jover <guillem@debian.org>2004-09-13 18:12:56 +0000
committerGuillem Jover <guillem@debian.org>2004-09-13 18:12:56 +0000
commit0192c15bb6c6360c0288ef3507253e8fcdb51b30 (patch)
treeccd1d57af4adc6222ad994981fd3880cd2e5ffa6 /debian
parent32ea8f1e5ffc37e7737a9ab03c6e44a99fcf2544 (diff)
Update NICs from Linux.
Thanks to Alfred M. Szmidt <ams@kemisten.nu> and Arief M. Utama <arief_mulya@yahoo.com>.
Diffstat (limited to 'debian')
-rw-r--r--debian/changelog3
-rw-r--r--debian/patches/nic-update.patch34693
2 files changed, 34696 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog
index 4216aa6..e04c475 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -3,6 +3,9 @@ gnumach (1:20040229-2) unstable; urgency=low
* Split patches into debian/patches/.
* Allow to build natively on non GNU/Hurd systems.
* Enable i386, kfreebsd-i386 and knetbsd-i386 on all packages.
+ * Update NICs from Linux.
+ Thanks to Alfred M. Szmidt <ams@kemisten.nu> and
+ Arief M. Utama <arief_mulya@yahoo.com>.
-- Guillem Jover <guillem@debian.org> Mon, 13 Sep 2004 19:19:30 +0200
diff --git a/debian/patches/nic-update.patch b/debian/patches/nic-update.patch
new file mode 100644
index 0000000..569a22b
--- /dev/null
+++ b/debian/patches/nic-update.patch
@@ -0,0 +1,34693 @@
+2004-08-05 Arief M. Utama <arief_mulya@yahoo.com>
+
+ * linux/dev/drivers/net/eepro100.c (pci_tbl): Add PCI ID's from
+ linux-2.4.23.
+
+2004-02-29 Alfred M. Szmidt <ams@kemisten.nu>
+
+ * i386/linux/Makefile.in (linux-net-files): Added `cb_shim.c',
+ `hamachi.c', `intel-gige.c', `myson803.c', `natsemi.c', `ns820.c',
+ `pci-scan.c', `pci-serial.c', `pci-skeleton.c', `starfire.c',
+ `sundance.c' and `winbond-840.c.
+ * i386/linux/Drivers.in (ns820, myson803, sundance,
+ winbond-840, hamachi): New drivers. Added `pci-scan' and
+ `pci-skeleton.o' to the network driver class.
+ * linux/dev/include/linux/modversions.h: New file.
+
+ * linux/src/drivers/net/3c59x.c, linux/src/drivers/net/eepro100.c,
+ linux/src/drivers/net/epic100.c, linux/src/drivers/net/ne2k-pci.c,
+ linux/src/drivers/net/rtl8139.c, linux/src/drivers/net/tulip.c,
+ linux/src/drivers/net/via-rhine.c,
+ linux/src/drivers/net/yellowfin.c: Updated files from netdrivers
+ 3.5 (http://www.scyld.com/network).
+ * linux/src/drivers/net/winbond-840.c,
+ linux/src/drivers/net/starfire.c,
+ linux/src/drivers/net/sundance.c, linux/src/drivers/net/ns820.c,
+ linux/src/drivers/net/pci-scan.c,
+ linux/src/drivers/net/pci-scan.h,
+ linux/src/drivers/net/pci-serial.c,
+ linux/src/drivers/net/pci-skeleton.c,
+ linux/src/drivers/net/hamachi.c,
+ linux/src/drivers/net/intel-gige.c,
+ linux/src/drivers/net/kern_compat.h,
+ linux/src/drivers/net/myson803.c, linux/src/drivers/net/natsemi.c,
+ linux/src/drivers/net/cb_shim.c: New files from netdrivers 3.5
+ package (http://www.scyld.com/network).
+
+
+Index: i386/linux/Drivers.in
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/i386/linux/Attic/Drivers.in,v
+retrieving revision 1.4
+diff -u -r1.4 Drivers.in
+--- i386/linux/Drivers.in 27 May 2001 12:44:22 -0000 1.4
++++ i386/linux/Drivers.in 20 Aug 2004 10:32:51 -0000
+@@ -1,5 +1,5 @@
+ dnl Device driver options for i386
+-dnl Copyright 1997, 1999 Free Software Foundation, Inc.
++dnl Copyright 1997, 1999, 2004 Free Software Foundation, Inc.
+
+ dnl Permission to use, copy, modify and distribute this software and its
+ dnl documentation is hereby granted, provided that both the copyright
+@@ -99,7 +99,7 @@
+
+ AC_DRIVER_CLASS(scsi, CONFIG_SCSI, constants.o hosts.o scsi.o scsi_ioctl.o scsi_proc.o scsicam.o sd.o sd_ioctl.o sr.o sr_ioctl.o)
+
+-AC_DRIVER_CLASS(net, CONFIG_INET, auto_irq.o net.o Space.o dev.o net_init.o)
++AC_DRIVER_CLASS(net, CONFIG_INET, auto_irq.o net.o Space.o dev.o net_init.o pci-scan.o pci-skeleton.o)
+
+ dnl Strictly speaking, we could have a `linux' option too, but it's
+ dnl not possible to built a useful kernel without at least one Linux
+@@ -159,6 +159,36 @@
+
+ dnl Ethernet controllers
+
++dnl FIXME: Can't be enabled since it is a pcmcia driver, and we don't
++dnl have that kind of fluff.
++dnl
++dnl linux_DRIVER(cb_shim, CONFIG_CB_SHIM, cb_shim, net)
++
++dnl FIXME: Depends on L1_CACHE_BYTES being defined, usually in
++dnl asm-*/cache.h.
++dnl
++dnl linux_DRIVER(starfire, CONFIG_STARFIRE, starfire, net)
++
++linux_DRIVER(sundance, CONFIG_SUNDANCE, sundance, net)
++linux_DRIVER(winbond-840, CONFIG_WINBOND840, winbond-840, net)
++linux_DRIVER(hamachi, CONFIG_HAMACHI, hamachi, net)
++
++dnl FIXME: Getting the following while linking:
++dnl
++dnl ../../../gnumach/linux/src/drivers/net/ns820.c:463: multiple definition of `natsemi_drv_id'
++dnl natsemi.o(.data+0xe0):../../../gnumach/linux/src/drivers/net/natsemi.c:462: first defined here
++dnl pci-skeleton.o(.text+0x0): In function `skel_netdev_probe':
++dnl ../../../gnumach/linux/src/drivers/net/pci-skeleton.c:505: multiple definition of `skel_netdev_probe'
++dnl intel-gige.o(.text+0x0):../../../gnumach/linux/src/drivers/net/intel-gige.c:463: first defined here
++dnl
++dnl So these two are disabled because of that...
++dnl
++dnl linux_DRIVER(intel_gige, CONFIG_INTER_GIGE, intel-gige, net)
++dnl linux_DRIVER(natsemi, CONFIG_NATSEMI, natsemi, net)
++
++linux_DRIVER(myson803, CONFIG_MYSON803, myson803, net)
++linux_DRIVER(ns820, CONFIG_NS820, ns820, net)
++
+ AC_DRIVER(ne2000, CONFIG_NE2000, ne.o 8390.o, net)
+ AC_DRIVER(el2, CONFIG_EL2, 3c503.o 8390.o, net)
+ linux_DRIVER(el3, EL3, 3c509, net)
+@@ -204,3 +234,7 @@
+ linux_DRIVER(tlan, TLAN, tlan, net)
+ linux_DRIVER(viarhine, VIA_RHINE, via-rhine, net)
+ AC_OUTPUT(Makefile)
++
++dnl Local Variables:
++dnl mode: autoconf
++dnl End:
+Index: i386/linux/Makefile.in
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/i386/linux/Attic/Makefile.in,v
+retrieving revision 1.4
+diff -u -r1.4 Makefile.in
+--- i386/linux/Makefile.in 27 May 2001 12:44:22 -0000 1.4
++++ i386/linux/Makefile.in 20 Aug 2004 10:32:51 -0000
+@@ -1,5 +1,5 @@
+ # Makefile for Linux device drivers and the glue codes.
+-# Copyright 1998, 1999 Free Software Foundation, Inc.
++# Copyright 1998, 1999, 2004 Free Software Foundation, Inc.
+ #
+ # Permission to use, copy, modify and distribute this software and its
+ # documentation is hereby granted, provided that both the copyright
+@@ -62,13 +62,18 @@
+ vpath %.c $(linuxsrcdir)/dev/drivers/block
+ vpath %.c $(linuxsrcdir)/src/drivers/block
+
+-linux-net-files = auto_irq.c 3c501.c 3c503.c 3c505.c 3c507.c 3c509.c \
+- 3c59x.c 3c515.c 8390.c Space.c ac3200.c apricot.c at1700.c atp.c \
+- de4x5.c de600.c de620.c depca.c dev.c e2100.c eepro.c eepro100.c \
+- eexpress.c epic100.c eth16i.c ewrk3.c fmv18x.c hp-plus.c hp.c hp100.c \
+- lance.c ne.c ne2k-pci.c net_init.c ni52.c ni65.c pcnet32.c rtl8139.c \
+- seeq8005.c sk_g16.c smc-ultra.c smc-ultra32.c tlan.c tulip.c \
+- via-rhine.c wavelan.c wd.c yellowfin.c znet.c net.c
++linux-net-files = auto_irq.c 3c501.c 3c503.c 3c505.c 3c507.c 3c509.c \
++ 3c59x.c 3c515.c 8390.c Space.c ac3200.c apricot.c at1700.c \
++ atp.c de4x5.c de600.c de620.c depca.c dev.c e2100.c eepro.c \
++ eepro100.c eexpress.c epic100.c eth16i.c ewrk3.c fmv18x.c \
++ hp-plus.c hp.c hp100.c lance.c ne.c ne2k-pci.c net_init.c \
++ ni52.c ni65.c pcnet32.c rtl8139.c seeq8005.c sk_g16.c \
++ smc-ultra.c smc-ultra32.c tlan.c tulip.c via-rhine.c wavelan.c \
++ wd.c yellowfin.c znet.c net.c net.c cb_shim.c hamachi.c \
++ intel-gige.c myson803.c natsemi.c ns820.c pci-scan.c \
++ pci-serial.c pci-skeleton.c starfire.c sundance.c \
++ winbond-840.c
++
+ vpath %.c $(linuxsrcdir)/dev/drivers/net
+ vpath %.c $(linuxsrcdir)/dev/net/core
+ vpath %.c $(linuxsrcdir)/src/drivers/net
+Index: linux/dev/drivers/net/eepro100.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/dev/drivers/net/Attic/eepro100.c,v
+retrieving revision 1.1
+diff -u -r1.1 eepro100.c
+--- linux/dev/drivers/net/eepro100.c 17 Aug 2001 23:33:35 -0000 1.1
++++ linux/dev/drivers/net/eepro100.c 20 Aug 2004 10:32:52 -0000
+@@ -358,6 +358,94 @@
+ u16 vendor_id, device_id;
+ int pci_index;
+ } static pci_tbl[] = {
++ { "Intel PCI EtherExpress Pro100 VE 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1031,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VE 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1032,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VM 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1033,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VM 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1034,
++ 0
++ },
++ { "Intel PCI Phoneline Network Connection 82562EH",
++ PCI_VENDOR_ID_INTEL, 0x1035,
++ 0
++ },
++ { "Intel PCI Phoneline Network Connection 82562EH",
++ PCI_VENDOR_ID_INTEL, 0x1036,
++ 0
++ },
++ { "Intel PCI LAN Controller 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1037,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VM/KM 82801CAM",
++ PCI_VENDOR_ID_INTEL, 0x1038,
++ 0
++ },
++ { "Intel PCI LAN Controller with 82562ET/EZ PHY 82801DB",
++ PCI_VENDOR_ID_INTEL, 0x1039,
++ 0
++ },
++ { "Intel PCI LAN Controller with 82562ET/EZ (CNR) PHY 82801DB",
++ PCI_VENDOR_ID_INTEL, 0x103A,
++ 0
++ },
++ { "Intel PCI LAN Controller with 82562EM/EX PHY",
++ PCI_VENDOR_ID_INTEL, 0x103B,
++ 0
++ },
++ { "Intel PCI LAN Controller with 82562EM/EX (CNR) PHY 82801DB",
++ PCI_VENDOR_ID_INTEL, 0x103C,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VE 82801DB",
++ PCI_VENDOR_ID_INTEL, 0x103D,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VM 82801DB",
++ PCI_VENDOR_ID_INTEL, 0x103E,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 VE 82801EB/ER",
++ PCI_VENDOR_ID_INTEL, 0x1050,
++ 0
++ },
++ { "Intel PCI Fast Ethernet/CardBus Controller 82551QM",
++ PCI_VENDOR_ID_INTEL, 0x1059,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 82865",
++ PCI_VENDOR_ID_INTEL, 0x1227,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 Smart (i960RP/RD)",
++ PCI_VENDOR_ID_INTEL, 0x1228,
++ 0
++ },
++ { "Intel PCI LAN0 Controller 82801E",
++ PCI_VENDOR_ID_INTEL, 0x2459,
++ 0
++ },
++ { "Intel PCI LAN1 Controller 82801E",
++ PCI_VENDOR_ID_INTEL, 0x245D,
++ 0
++ },
++ { "Intel PCI to PCI Bridge EtherExpress Pro100 Server Adapter",
++ PCI_VENDOR_ID_INTEL, 0x5200,
++ 0
++ },
++ { "Intel PCI EtherExpress Pro100 Server Adapter",
++ PCI_VENDOR_ID_INTEL, 0x5201,
++ 0
++ },
+ { "Intel PCI EtherExpress Pro100 82557",
+ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
+ 0
+Index: linux/dev/include/linux/modversions.h
+===================================================================
+RCS file: linux/dev/include/linux/modversions.h
+diff -N linux/dev/include/linux/modversions.h
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/dev/include/linux/modversions.h 20 Aug 2004 10:32:52 -0000
+@@ -0,0 +1 @@
++/* Dummy file. */
+Index: linux/src/drivers/net/3c59x.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/3c59x.c,v
+retrieving revision 1.3
+diff -u -r1.3 3c59x.c
+--- linux/src/drivers/net/3c59x.c 8 Oct 1999 13:50:16 -0000 1.3
++++ linux/src/drivers/net/3c59x.c 20 Aug 2004 10:32:53 -0000
+@@ -1,157 +1,181 @@
+ /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
+ /*
+- Written 1996-1999 by Donald Becker.
++ Written 1996-2003 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+ Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+ and the EtherLink XL 3c900 and 3c905 cards.
+
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
++ The original author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support information and updates are available at
++ http://www.scyld.com/network/vortex.html
+ */
+
+-static char *version =
+-"3c59x.c:v0.99L 5/28/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/vortex.html\n";
++static const char versionA[] =
++"3c59x.c:v0.99Za 4/17/2003 Donald Becker, becker@scyld.com\n";
++static const char versionB[] =
++" http://www.scyld.com/network/vortex.html\n";
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* This driver uses 'options' to pass the media type, full-duplex flag, etc.
++ See media_tbl[] and the web page for the possible types.
++ There is no limit on card count, MAX_UNITS limits only module options. */
++#define MAX_UNITS 8
++static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
+
+-/* "Knobs" that adjust features and parameters. */
+ /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1512 effectively disables this feature. */
+ static const int rx_copybreak = 200;
+-/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+-static const int mtu = 1500;
+-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+-static int max_interrupt_work = 20;
+
+-/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
+-#define vortex_debug debug
+-#ifdef VORTEX_DEBUG
+-static int vortex_debug = VORTEX_DEBUG;
+-#else
+-static int vortex_debug = 1;
+-#endif
++/* Allow setting MTU to a larger size, bypassing the normal Ethernet setup. */
++static const int mtu = 1500;
+
+-/* Some values here only for performance evaluation and path-coverage
+- debugging. */
+-static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ Cyclones and later have a 64 or 256 element hash table based on the
++ Ethernet CRC. */
++static int multicast_filter_limit = 64;
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ Do not increase the Tx ring beyond 256.
++ Large receive rings waste memory and confound network buffer limits.
++ These values have been carefully studied: changing these might mask a
++ problem, it won't fix it.
++ */
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 32
+
+-/* A few values that may be tweaked. */
++/* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (2*HZ)
++#define TX_TIMEOUT (6*HZ)
+
+-/* Keep the ring sizes a power of two for efficiency. */
+-#define TX_RING_SIZE 16
+-#define RX_RING_SIZE 32
+-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. The 1536 value is not
++ a limit, or directly related to MTU, but rather a way to keep a
++ consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
+
+-#ifndef __OPTIMIZE__
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
+ #warning You must compile this file with the correct options!
+ #warning See the last lines of the source file.
+ #error You must compile this driver with "-O".
+ #endif
+
+ #include <linux/config.h>
+-#include <linux/version.h>
+-#ifdef MODULE
+-#ifdef MODVERSIONS
+-#include <linux/modversions.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
+ #endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
+ #include <linux/module.h>
++#include <linux/modversions.h>
+ #else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
++#include <linux/modversions.h>
++#include <linux/module.h>
+ #endif
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+ #include <linux/errno.h>
+-#include <linux/in.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
+-#if LINUX_VERSION_CODE < 0x20155
+-#include <linux/bios32.h>
+-#endif
+-#include <asm/irq.h> /* For NR_IRQS only. */
++#include <asm/irq.h>
++#include <asm/byteorder.h>
+ #include <asm/bitops.h>
+ #include <asm/io.h>
+
+-/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
+- This is only in the support-all-kernels source code. */
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
+
+-#define RUN_AT(x) (jiffies + (x))
++/* Condensed operations for readability.
++ Compatibility defines are now in kern_compat.h */
+
+-#include <linux/delay.h>
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+-#if (LINUX_VERSION_CODE >= 0x20100)
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+ char kernel_version[] = UTS_RELEASE;
+-#else
+-#ifndef __alpha__
+-#define ioremap(a,b) \
+- (((a)<0x100000) ? (void *)((u_long)(a)) : vremap(a,b))
+-#define iounmap(v) \
+- do { if ((u_long)(v) > 0x100000) vfree(v); } while (0)
+-#endif
+-#endif
+-#if LINUX_VERSION_CODE <= 0x20139
+-#define net_device_stats enet_statistics
+-#define NETSTATS_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20138
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
+-#define le32_to_cpu(val) (val)
+-#define cpu_to_le32(val) (val)
+-#endif
+-#if LINUX_VERSION_CODE < 0x20155
+-#define PCI_SUPPORT_VER1
+-#else
+-#define PCI_SUPPORT_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20159
+-#define DEV_FREE_SKB(skb) dev_kfree_skb (skb, FREE_WRITE);
+-#else /* Grrr, incompatible changes should change the name. */
+-#define DEV_FREE_SKB(skb) dev_kfree_skb(skb);
+-#endif
+-#if ! defined(CAP_NET_ADMIN)
+-#define capable(CAP_XXX) (suser())
+ #endif
+
+-#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+-MODULE_DESCRIPTION("3Com 3c590/3c900 series Vortex/Boomerang driver");
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("3Com EtherLink XL (3c590/3c900 series) driver");
++MODULE_LICENSE("GPL");
+ MODULE_PARM(debug, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+ MODULE_PARM(rx_copybreak, "i");
+ MODULE_PARM(max_interrupt_work, "i");
+-MODULE_PARM(compaq_ioaddr, "i");
+-MODULE_PARM(compaq_irq, "i");
+-MODULE_PARM(compaq_device_id, "i");
++MODULE_PARM(multicast_filter_limit, "i");
++#ifdef MODULE_PARM_DESC
++MODULE_PARM_DESC(debug, "3c59x message level (0-31)");
++MODULE_PARM_DESC(options, "3c59x force fixed media type");
++MODULE_PARM_DESC(full_duplex,
++ "3c59x set to 1 to force full duplex (deprecated)");
++MODULE_PARM_DESC(rx_copybreak,
++ "3c59x copy breakpoint for copy-only-tiny-frames");
++MODULE_PARM_DESC(max_interrupt_work,
++ "3c59x maximum events handled per interrupt");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast address count before switching to Rx-all-multicast");
+ #endif
+
+ /* Operational parameter that usually are not changed. */
+
+-/* The Vortex size is twice that of the original EtherLinkIII series: the
+- runtime register window, window 1, is now always mapped in.
+- The Boomerang size is twice as large as the Vortex -- it has additional
+- bus master control registers. */
+-#define VORTEX_TOTAL_SIZE 0x20
+-#define BOOMERANG_TOTAL_SIZE 0x40
+-
+ /* Set iff a MII transceiver on any interface requires mdio preamble.
+ This only set with the original DP83840 on older 3c905 boards, so the extra
+ code size of a per-interface flag is not worthwhile. */
+ static char mii_preamble_required = 0;
+
++/* Performance and path-coverage information. */
++static int rx_nocopy = 0, rx_copy = 0, queued_packet = 0, rx_csumhits;
++
+ /*
+ Theory of Operation
+
+@@ -160,11 +184,10 @@
+ This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+ XL, 3Com's PCI to 10/100baseT adapters. It also works with the 10Mbs
+ versions of the FastEtherLink cards. The supported product IDs are
+- 3c590, 3c592, 3c595, 3c597, 3c900, 3c905
++in the pci_tbl[] list.
+
+ The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+-with the kernel source or available from
+- cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
++with the kernel source.
+
+ II. Board-specific settings
+
+@@ -224,83 +247,152 @@
+ the EISA version is called "Demon". According to Terry these names come
+ from rides at the local amusement park.
+
+-The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+-This driver only supports ethernet packets because of the skbuff allocation
+-limit of 4K.
++The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes.
++This driver only supports ethernet packets on some kernels because of the
++skbuff allocation limit of 4K.
+ */
+
+-/* This table drives the PCI probe routines. It's mostly boilerplate in all
+- of the drivers, and will likely be provided by some future kernel.
+-*/
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-struct pci_id_info {
+- const char *name;
+- u16 vendor_id, device_id, device_id_mask, flags;
+- int drv_flags, io_size;
+- struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
+- long ioaddr, int irq, int chip_idx, int fnd_cnt);
+-};
+-
+-enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4,
+- HAS_PWR_CTRL=0x10, HAS_MII=0x20, HAS_NWAY=0x40, HAS_CB_FNS=0x80, };
+-static struct device *vortex_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, long ioaddr,
+- int irq, int dev_id, int card_idx);
++/* The Vortex size is twice that of the original EtherLinkIII series: the
++ runtime register window, window 1, is now always mapped in.
++ The Boomerang size is twice as large as the Vortex -- it has additional
++ bus master control registers. */
++#define VORTEX_SIZE 0x20
++#define BOOMERANG_SIZE 0x40
++#define CYCLONE_SIZE 0x80
++enum { IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=0x804, IS_TORNADO=0x08,
++ HAS_PWR_CTRL=0x10, HAS_MII=0x20, HAS_NWAY=0x40, HAS_CB_FNS=0x80,
++ EEPROM_8BIT=0x200, INVERT_LED_PWR=0x400, MII_XCVR_PWR=0x4000,
++ HAS_V2_TX=0x800, WN0_XCVR_PWR=0x1000,
++};
++/* Base feature sets for the generations. */
++#define FEATURE_BOOMERANG (HAS_MII) /* 905 */
++#define FEATURE_CYCLONE (IS_CYCLONE|HAS_V2_TX) /* 905B */
++#define FEATURE_TORNADO (IS_TORNADO|HAS_NWAY|HAS_V2_TX) /* 905C */
++
++static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int pwr_event(void *dev_instance, int event);
++#ifdef USE_MEM_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#endif
++
+ static struct pci_id_info pci_tbl[] = {
+- {"3c590 Vortex 10Mbps", 0x10B7, 0x5900, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
+- {"3c595 Vortex 100baseTx", 0x10B7, 0x5950, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
+- {"3c595 Vortex 100baseT4", 0x10B7, 0x5951, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
+- {"3c595 Vortex 100base-MII", 0x10B7, 0x5952, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, vortex_probe1},
+- {"3Com Vortex", 0x10B7, 0x5900, 0xff00,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
+- {"3c900 Boomerang 10baseT", 0x10B7, 0x9000, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
+- {"3c900 Boomerang 10Mbps Combo", 0x10B7, 0x9001, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
+- {"3c900 Cyclone 10Mbps Combo", 0x10B7, 0x9005, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c900B-FL Cyclone 10base-FL", 0x10B7, 0x900A, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c905 Boomerang 100baseTx", 0x10B7, 0x9050, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
+- {"3c905 Boomerang 100baseT4", 0x10B7, 0x9051, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
+- {"3c905B Cyclone 100baseTx", 0x10B7, 0x9055, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
+- {"3c905B Cyclone 10/100/BNC", 0x10B7, 0x9058, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY, 128, vortex_probe1},
+- {"3c905B-FX Cyclone 100baseFx", 0x10B7, 0x905A, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c905C Tornado", 0x10B7, 0x9200, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c980 Cyclone", 0x10B7, 0x9800, 0xfff0,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3cSOHO100-TX Hurricane", 0x10B7, 0x7646, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c555 Laptop Hurricane", 0x10B7, 0x5055, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE, 128, vortex_probe1},
+- {"3c575 Boomerang CardBus", 0x10B7, 0x5057, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
+- {"3CCFE575 Cyclone CardBus", 0x10B7, 0x5157, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS,
+- 128, vortex_probe1},
+- {"3CCFE656 Cyclone CardBus", 0x10B7, 0x6560, 0xffff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS,
+- 128, vortex_probe1},
+- {"3c575 series CardBus (unknown version)", 0x10B7, 0x5057, 0xf0ff,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, vortex_probe1},
+- {"3Com Boomerang (unknown version)", 0x10B7, 0x9000, 0xff00,
+- PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, vortex_probe1},
++ {"3c590 Vortex 10Mbps", { 0x590010B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"3c595 Vortex 100baseTx", { 0x595010B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"3c595 Vortex 100baseT4", { 0x595110B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"3c595 Vortex 100base-MII",{ 0x595210B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ /* Change EISA_scan if these move from index 4 and 5. */
++ {"3c592 EISA Vortex", { 0x592010B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"3c597 EISA Vortex", { 0x597010B7, 0xffffffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"Vortex (unknown)", { 0x590010B7, 0xff00ffff },
++ PCI_IOTYPE, VORTEX_SIZE, IS_VORTEX, },
++ {"3c900 Boomerang 10baseT", { 0x900010B7, 0xffffffff },
++ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
++ {"3c900 Boomerang 10Mbps Combo", { 0x900110B7, 0xffffffff },
++ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG, },
++ {"3c900 Cyclone 10Mbps TPO", { 0x900410B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
++ {"3c900 Cyclone 10Mbps Combo", { 0x900510B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
++ {"3c900 Cyclone 10Mbps TPC", { 0x900610B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
++ {"3c900B-FL Cyclone 10base-FL",{ 0x900A10B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
++ {"3c905 Boomerang 100baseTx",{ 0x905010B7, 0xffffffff },
++ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
++ {"3c905 Boomerang 100baseT4",{ 0x905110B7, 0xffffffff },
++ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
++ {"3c905B Cyclone 100baseTx",{ 0x905510B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
++ {"3c905B Cyclone 10/100/BNC",{ 0x905810B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE|HAS_NWAY, },
++ {"3c905B-FX Cyclone 100baseFx",{ 0x905A10B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, IS_CYCLONE, },
++ {"3c905C Tornado",{ 0x920010B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
++ {"3c920 Tornado",{ 0x920110B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
++ {"3c920 series Tornado",{ 0x920010B7, 0xfff0ffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
++ {"3c982 Server Tornado",{ 0x980510B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
++ {"3c980 Cyclone",{ 0x980010B7, 0xfff0ffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE|HAS_NWAY, },
++ {"3cSOHO100-TX Hurricane", { 0x764610B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
++ {"3c555 Laptop Hurricane", { 0x505510B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_CYCLONE, },
++ {"3c556 Laptop Tornado",{ 0x605510B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
++ {"3c556 series Laptop Tornado",{ 0x605510B7, 0xf0ffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO|EEPROM_8BIT, },
++ {"3c1556B-5 mini-PCI",{ 0x605610B7, 0xffffffff, 0x655610b7, 0xffffffff, },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_TORNADO|EEPROM_8BIT|INVERT_LED_PWR|WN0_XCVR_PWR, },
++ {"3c1556B mini-PCI",{ 0x605610B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
++ {"3c1556B series mini-PCI",{ 0x605610B7, 0xf0ffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_TORNADO|EEPROM_8BIT|HAS_CB_FNS|INVERT_LED_PWR|MII_XCVR_PWR, },
++ {"3c575 Boomerang CardBus", { 0x505710B7, 0xffffffff },
++ PCI_IOTYPE,BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, },
++ {"3CCFE575BT Cyclone CardBus",{ 0x515710B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_CYCLONE | HAS_CB_FNS | EEPROM_8BIT | INVERT_LED_PWR, },
++ {"3CCFE575CT Tornado CardBus",{ 0x525710B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_TORNADO|HAS_CB_FNS|EEPROM_8BIT|MII_XCVR_PWR, },
++ {"3CCFE656 Cyclone CardBus",{ 0x656010B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ IS_CYCLONE|HAS_NWAY|HAS_CB_FNS| INVERT_LED_PWR | MII_XCVR_PWR, },
++ {"3CCFE656B Cyclone+Winmodem CardBus",{ 0x656210B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ FEATURE_CYCLONE/*|HAS_NWAY*/ |HAS_CB_FNS|EEPROM_8BIT|INVERT_LED_PWR|MII_XCVR_PWR, },
++ {"3CCFE656C Tornado+Winmodem CardBus",{ 0x656410B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE,
++ (FEATURE_TORNADO & ~HAS_NWAY)|HAS_CB_FNS|EEPROM_8BIT | MII_XCVR_PWR, },
++ {"3c450 HomePNA Tornado",{ 0x450010B7, 0xffffffff },
++ PCI_IOTYPE, CYCLONE_SIZE, FEATURE_TORNADO, },
++ {"3c575 series CardBus (unknown version)", {0x505710B7, 0xf0ffffff },
++ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG|HAS_MII, },
++ {"3Com Boomerang (unknown version)",{ 0x900010B7, 0xff00ffff },
++ PCI_IOTYPE, BOOMERANG_SIZE, IS_BOOMERANG, },
+ {0,}, /* 0 terminated list. */
+ };
+
++struct drv_id_info vortex_drv_id = {
++ "vortex", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
++ vortex_probe1, pwr_event };
++
++/* This driver was written to use I/O operations.
++ However there are performance benefits to using memory operations, so
++ that mode is now an options.
++ Compiling for memory ops turns off EISA support.
++*/
++#ifdef USE_MEM_OPS
++#undef inb
++#undef inw
++#undef inl
++#undef outb
++#undef outw
++#undef outl
++#define inb readb
++#define inw readw
++#define inl readl
++#define outb writeb
++#define outw writew
++#define outl writel
++#endif
++
+ /* Operational definitions.
+ These are not used by other compilation units and thus are not
+ exported in a ".h" file.
+@@ -332,7 +424,9 @@
+
+ /* The SetRxFilter command accepts the following classes: */
+ enum RxFilter {
+- RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
++ RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8,
++ RxMulticastHash = 0x10,
++};
+
+ /* Bits in the general status register. */
+ enum vortex_status {
+@@ -356,11 +450,7 @@
+ Wn0EepromData = 12, /* Window 0: EEPROM results register. */
+ IntrStatus=0x0E, /* Valid in all windows. */
+ };
+-enum Win0_EEPROM_bits {
+- EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+- EEPROM_EWENB = 0x30, /* Enable erasing/writing for 10 msec. */
+- EEPROM_EWDIS = 0x00, /* Disable EWENB before 10 msec timeout. */
+-};
++
+ /* EEPROM locations. */
+ enum eeprom_offset {
+ PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+@@ -372,34 +462,35 @@
+ Wn2_ResetOptions=12,
+ };
+ enum Window3 { /* Window 3: MAC/config bits. */
+- Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+-};
+-union wn3_config {
+- int i;
+- struct w3_config_fields {
+- unsigned int ram_size:3, ram_width:1, ram_speed:2, rom_size:2;
+- int pad8:8;
+- unsigned int ram_split:2, pad18:2, xcvr:4, autoselect:1;
+- int pad24:7;
+- } u;
++ Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+ };
+
+ enum Window4 { /* Window 4: Xcvr/media bits. */
+ Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+ };
++enum Window5 {
++ Wn5_TxThreshold = 0, Wn5_RxFilter = 8,
++};
+ enum Win4_Media_bits {
+ Media_SQE = 0x0008, /* Enable SQE error counting for AUI. */
+ Media_10TP = 0x00C0, /* Enable link beat and jabber for 10baseT. */
+ Media_Lnk = 0x0080, /* Enable just link beat for 100TX/100FX. */
+ Media_LnkBeat = 0x0800,
+ };
+-enum Window7 { /* Window 7: Bus Master control. */
++enum Window7 {
++ /* Bus Master control on Vortex. */
+ Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
++ /* On Cyclone and later, VLAN and PowerMgt control. */
++ Wn7_VLAN_Mask = 0, Wn7_VLAN_EtherType = 4, Wn7_PwrMgmtEvent = 12,
+ };
+-/* Boomerang bus master control registers. */
++
++/* Boomerang and Cyclone bus master control registers. */
+ enum MasterCtrl {
+ PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+- TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
++ DownPollRate = 0x2d, TxFreeThreshold = 0x2f,
++ UpPktStatus = 0x30, UpListPtr = 0x38,
++ /* Cyclone+. */
++ TxPktID=0x18, RxPriorityThresh = 0x3c,
+ };
+
+ /* The Rx and Tx descriptor lists.
+@@ -429,14 +520,16 @@
+
+ /* Values for the Tx status entry. */
+ enum tx_desc_status {
+- CRCDisable=0x2000, TxDComplete=0x8000,
++ CRCDisable=0x2000, TxIntrDnComplete=0x8000, TxDownComplete=0x10000,
+ AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
++ TxNoRoundup=0x10000000, /* HAS_V2_TX should not word-pad packet. */
+ TxIntrUploaded=0x80000000, /* IRQ when in FIFO, but maybe not sent. */
+ };
+
+ /* Chip features we care about in vp->capabilities, read from the EEPROM. */
+-enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
++enum ChipCaps { CapBusMaster=0x20, CapNoTxLength=0x0200, CapPwrMgmt=0x2000 };
+
++#define PRIV_ALIGN 15 /* Required alignment mask */
+ struct vortex_private {
+ /* The Rx and Tx rings should be quad-word-aligned. */
+ struct boom_rx_desc rx_ring[RX_RING_SIZE];
+@@ -444,32 +537,46 @@
+ /* The addresses of transmit- and receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+- struct device *next_module;
++ struct net_device *next_module;
+ void *priv_addr;
+- unsigned int cur_rx, cur_tx; /* The next free ring entry */
+- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+- struct net_device_stats stats;
++ /* Keep the Rx and Tx variables grouped on their own cache lines. */
++ struct boom_rx_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ struct boom_tx_desc *tx_desc_tail;
+ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1, restart_tx:1;
+
+- /* PCI configuration space information. */
+- u8 pci_bus, pci_devfn; /* PCI bus location, for power management. */
++ long last_reset;
++ spinlock_t window_lock;
++ struct net_device_stats stats;
+ char *cb_fn_base; /* CardBus function status addr space. */
+- int chip_id;
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev; /* PCI configuration space information. */
+
+ /* The remainder are related to chip state, mostly media selection. */
+- unsigned long in_interrupt;
++ int multicast_filter_limit;
++ u32 mc_filter[8];
++ int max_interrupt_work;
++ int rx_mode;
+ struct timer_list timer; /* Media selection timer. */
+ int options; /* User-settable misc. driver options. */
+ unsigned int media_override:4, /* Passed-in media type. */
+ default_media:4, /* Read from the EEPROM/Wn3_Config. */
+- full_duplex:1, force_fd:1, autoselect:1,
++ full_duplex:1, medialock:1, autoselect:1,
+ bus_master:1, /* Vortex can only do a fragment bus-m. */
+ full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang */
+ hw_csums:1, /* Has hardware checksums. */
+- tx_full:1;
++ restore_intr_mask:1,
++ polling:1;
+ u16 status_enable;
+ u16 intr_enable;
+ u16 available_media; /* From Wn3_Options. */
++ u16 wn3_mac_ctrl; /* Current settings. */
+ u16 capabilities, info1, info2; /* Various, from EEPROM. */
+ u16 advertising; /* NWay media advertisement */
+ unsigned char phys[2]; /* MII device addresses. */
+@@ -503,62 +610,60 @@
+ { "Default", 0, 0xFF, XCVR_10baseT, 10000},
+ };
+
+-#ifndef CARDBUS
+-static int vortex_scan(struct device *dev, struct pci_id_info pci_tbl[]);
++#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
++static int eisa_scan(struct net_device *dev);
+ #endif
+-static int vortex_open(struct device *dev);
++static int vortex_open(struct net_device *dev);
++static void set_media_type(struct net_device *dev);
++static void activate_xcvr(struct net_device *dev);
++static void start_operation(struct net_device *dev);
++static void start_operation1(struct net_device *dev);
+ static void mdio_sync(long ioaddr, int bits);
+ static int mdio_read(long ioaddr, int phy_id, int location);
+ static void mdio_write(long ioaddr, int phy_id, int location, int value);
+ static void vortex_timer(unsigned long arg);
+-static int vortex_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int boomerang_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int vortex_rx(struct device *dev);
+-static int boomerang_rx(struct device *dev);
++static void vortex_tx_timeout(struct net_device *dev);
++static int vortex_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static int boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static int vortex_rx(struct net_device *dev);
++static int boomerang_rx(struct net_device *dev);
+ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+-static int vortex_close(struct device *dev);
+-static void update_stats(long ioaddr, struct device *dev);
+-static struct net_device_stats *vortex_get_stats(struct device *dev);
+-static void set_rx_mode(struct device *dev);
+-static int vortex_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+-static void acpi_wake(int pci_bus, int pci_devfn);
+-static void acpi_set_WOL(struct device *dev);
++static int vortex_close(struct net_device *dev);
++static void update_stats(long ioaddr, struct net_device *dev);
++static struct net_device_stats *vortex_get_stats(struct net_device *dev);
++static void set_rx_mode(struct net_device *dev);
++static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++#if defined(NO_PCI)
++#define acpi_set_WOL(dev) do {} while(0);
++#define acpi_wake(pci_dev) do {} while(0);
++#define acpi_set_pwr_state(pci_dev, state) do {} while(0);
++#else
++static void acpi_set_WOL(struct net_device *dev);
++#endif
+
+
+-/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+-/* Option count limit only -- unlimited interfaces are supported. */
+-#define MAX_UNITS 8
+-static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+ /* A list of all installed Vortex devices, for removing the driver module. */
+-static struct device *root_vortex_dev = NULL;
++static struct net_device *root_vortex_dev = NULL;
+
+-#ifdef MODULE
+-#ifndef CARDBUS
+-/* Variables to work-around the Compaq PCI BIOS32 problem. */
+-static int compaq_ioaddr = 0, compaq_irq = 0, compaq_device_id = 0x5900;
+-#endif
+
+-#ifdef CARDBUS
++#if defined(MODULE) && defined(CARDBUS)
+
+ #include <pcmcia/driver_ops.h>
+
+ static dev_node_t *vortex_attach(dev_locator_t *loc)
+ {
+- u16 dev_id, vendor_id;
+- u32 io;
++ u32 io, pci_id;
+ u8 bus, devfn, irq;
+- struct device *dev;
++ struct net_device *dev;
+ int chip_idx;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &io);
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+- pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID, &vendor_id);
+- pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+- printk(KERN_INFO "vortex_attach(bus %d, function %d, device %4.4x)\n",
+- bus, devfn, dev_id);
++ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
++ printk(KERN_INFO "vortex_attach(bus %d, function %d, device %8.8x)\n",
++ bus, devfn, pci_id);
+ io &= ~3;
+ if (io == 0 || irq == 0) {
+ printk(KERN_ERR "The 3Com CardBus Ethernet interface was not "
+@@ -566,17 +671,16 @@
+ io == 0 ? "I/O address" : "IRQ");
+ return NULL;
+ }
+- for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor_id == pci_tbl[chip_idx].vendor_id
+- && (dev_id & pci_tbl[chip_idx].device_id_mask) ==
+- pci_tbl[chip_idx].device_id)
++ for (chip_idx = 0; pci_tbl[chip_idx].id.pci; chip_idx++)
++ if ((pci_id & pci_tbl[chip_idx].id.pci_mask) ==
++ pci_tbl[chip_idx].id.pci)
+ break;
+- if (pci_tbl[chip_idx].vendor_id == 0) { /* Compiled out! */
+- printk(KERN_INFO "Unable to match chip type %4.4x %4.4x in "
+- "vortex_attach().\n", vendor_id, dev_id);
++ if (pci_tbl[chip_idx].id.pci == 0) { /* Compiled out! */
++ printk(KERN_INFO "Unable to match chip type %8.8x in "
++ "vortex_attach().\n", pci_id);
+ return NULL;
+ }
+- dev = vortex_probe1(bus, devfn, NULL, io, irq, chip_idx, MAX_UNITS+1);
++ dev = vortex_probe1(pci_find_slot(bus, devfn), NULL, io, irq, chip_idx, MAX_UNITS+1);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+@@ -590,17 +694,17 @@
+
+ static void vortex_detach(dev_node_t *node)
+ {
+- struct device **devp, **next;
+- printk(KERN_INFO "vortex_detach(%s)\n", node->dev_name);
++ struct net_device **devp, **next;
++ printk(KERN_DEBUG "vortex_detach(%s)\n", node->dev_name);
+ for (devp = &root_vortex_dev; *devp; devp = next) {
+ next = &((struct vortex_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+- struct device *dev = *devp;
++ struct net_device *dev = *devp;
+ struct vortex_private *vp = dev->priv;
+ if (dev->flags & IFF_UP)
+- vortex_close(dev);
++ dev_close(dev);
+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
+ unregister_netdev(dev);
+ if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+@@ -616,143 +720,68 @@
+ "3c575_cb", vortex_attach, NULL, NULL, vortex_detach
+ };
+
+-#endif /* Cardbus support */
++#endif /* Old-style Cardbus module support */
++
++#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
++#if ! defined(MODULE) /* Must be a 2.4 kernel */
++module_init(init_module);
++module_exit(cleanup_module);
++#endif
+
+ int init_module(void)
+ {
+- if (vortex_debug)
+- printk(KERN_INFO "%s", version);
++ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+ #ifdef CARDBUS
+ register_driver(&vortex_ops);
+ return 0;
+ #else
+- return vortex_scan(0, pci_tbl);
++#ifndef USE_MEM_OPS
++ /* This is not quite correct, but both EISA and PCI cards is unlikely. */
++ if (eisa_scan(0) >= 0)
++ return 0;
++#if defined(NO_PCI)
++ return 0;
++#endif
++#endif
++
++ return pci_drv_register(&vortex_drv_id, NULL);
+ #endif
+ }
+
+ #else
+-int tc59x_probe(struct device *dev)
++int tc59x_probe(struct net_device *dev)
+ {
+- static int did_version = -1;
+- if (++did_version <= 0)
+- printk(KERN_INFO "%s", version);
+- return vortex_scan(dev, pci_tbl);
++ int retval = -ENODEV;
++
++ /* Allow an EISA-only driver. */
++#if ! defined(NO_PCI)
++ if (pci_drv_register(&vortex_drv_id, dev) >= 0) {
++ retval = 0;
++ dev = 0;
++ }
++#endif
++#ifndef USE_MEM_OPS
++ if (eisa_scan(dev) >= 0)
++ retval = 0;
++#endif
++ if (retval >= 0)
++ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
++ return retval;
+ }
+ #endif /* not MODULE */
+
+-#ifndef CARDBUS
+-static int vortex_scan(struct device *dev, struct pci_id_info pci_tbl[])
++#if ! defined(CARDBUS) && ! defined(USE_MEM_OPS)
++static int eisa_scan(struct net_device *dev)
+ {
+ int cards_found = 0;
+
+- /* Allow an EISA-only driver. */
+-#if defined(CONFIG_PCI) || (defined(MODULE) && !defined(NO_PCI))
+- /* Ideally we would detect all cards in slot order. That would
+- be best done a central PCI probe dispatch, which wouldn't work
+- well with the current structure. So instead we detect 3Com cards
+- in slot order. */
+- if (pcibios_present()) {
+- static int pci_index = 0;
+- unsigned char pci_bus, pci_device_fn;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- u16 vendor, device, pci_command, new_command;
+- int chip_idx, irq;
+- long ioaddr;
+-
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+- for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor == pci_tbl[chip_idx].vendor_id
+- && (device & pci_tbl[chip_idx].device_id_mask) ==
+- pci_tbl[chip_idx].device_id)
+- break;
+- if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
+- continue;
+-
+- /* The Cyclone requires config space re-write if powered down. */
+- acpi_wake(pci_bus, pci_device_fn);
+-
+- {
+-#if LINUX_VERSION_CODE >= 0x20155
+- struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
+- ioaddr = pdev->base_address[0] & ~3;
+- irq = pdev->irq;
+-#else
+- u32 pci_ioaddr;
+- u8 pci_irq_line;
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- ioaddr = pci_ioaddr & ~3;;
+- irq = pci_irq_line;
+-#endif
+- }
+-
+- if (ioaddr == 0) {
+- printk(KERN_WARNING " A 3Com network adapter has been found, "
+- "however it has not been assigned an I/O address.\n"
+- " You may need to power-cycle the machine for this "
+- "device to work!\n");
+- continue;
+- }
+-
+- if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+- continue;
+-
+- /* Activate the card. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled the device "
+- "at %d/%d. Updating PCI command %4.4x->%4.4x.\n",
+- pci_bus, pci_device_fn, pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+- dev = vortex_probe1(pci_bus, pci_device_fn, dev, ioaddr, irq,
+- chip_idx, cards_found);
+-
+- if (dev) {
+- /* Get and check the latency values. On the 3c590 series
+- the latency timer must be set to the maximum value to avoid
+- data corruption that occurs when the timer expires during
+- a transfer -- a bug in the Vortex chip only. */
+- u8 pci_latency;
+- u8 new_latency = (device & 0xff00) == 0x5900 ? 248 : 32;
+-
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < new_latency) {
+- printk(KERN_INFO "%s: Overriding PCI latency"
+- " timer (CFLT) setting of %d, new value is %d.\n",
+- dev->name, pci_latency, new_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, new_latency);
+- }
+- dev = 0;
+- cards_found++;
+- }
+- }
+- }
+-#endif /* NO_PCI */
+-
+- /* Now check all slots of the EISA bus. */
++ /* Check the slots of the EISA bus. */
+ if (EISA_bus) {
+ static long ioaddr = 0x1000;
+ for ( ; ioaddr < 0x9000; ioaddr += 0x1000) {
+ int device_id;
+- if (check_region(ioaddr, VORTEX_TOTAL_SIZE))
++ if (check_region(ioaddr, VORTEX_SIZE))
+ continue;
+ /* Check the standard EISA ID register for an encoded '3Com'. */
+ if (inw(ioaddr + 0xC80) != 0x6d50)
+@@ -761,65 +790,100 @@
+ device_id = (inb(ioaddr + 0xC82)<<8) + inb(ioaddr + 0xC83);
+ if ((device_id & 0xFF00) != 0x5900)
+ continue;
+- vortex_probe1(0, 0, dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
+- 4, cards_found);
++ vortex_probe1(0, dev, ioaddr, inw(ioaddr + 0xC88) >> 12,
++ (device_id & 0xfff0) == 0x5970 ? 5 : 4, cards_found);
+ dev = 0;
+ cards_found++;
+ }
+ }
+
+-#ifdef MODULE
+- /* Special code to work-around the Compaq PCI BIOS32 problem. */
+- if (compaq_ioaddr) {
+- vortex_probe1(0, 0, dev, compaq_ioaddr, compaq_irq,
+- compaq_device_id, cards_found++);
+- dev = 0;
+- }
+-#endif
+-
+ return cards_found ? 0 : -ENODEV;
+ }
+ #endif /* ! Cardbus */
+
+-static struct device *vortex_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, long ioaddr,
+- int irq, int chip_idx, int card_idx)
++static int do_eeprom_op(long ioaddr, int ee_cmd)
+ {
++ int timer;
++
++ outw(ee_cmd, ioaddr + Wn0EepromCmd);
++ /* Wait for the read to take place, worst-case 162 us. */
++ for (timer = 1620; timer >= 0; timer--) {
++ if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
++ break;
++ }
++ return inw(ioaddr + Wn0EepromData);
++}
++
++static void *vortex_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt)
++{
++ struct net_device *dev;
+ struct vortex_private *vp;
++ void *priv_mem;
+ int option;
+ unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
++ int ee_read_cmd;
++ int drv_flags = pci_tbl[chip_idx].drv_flags;
+ int i;
+
+- dev = init_etherdev(dev, 0);
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++#if ! defined(NO_PCI)
++ /* Check the PCI latency value. On the 3c590 series the latency timer
++ must be set to the maximum value to avoid data corruption that occurs
++ when the timer expires during a transfer. This bug exists the Vortex
++ chip only. */
++ if (pdev) {
++ u8 pci_latency;
++ u8 new_latency = (drv_flags & IS_VORTEX) ? 248 : 32;
++
++ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
++ if (pci_latency < new_latency) {
++ printk(KERN_INFO "%s: Overriding PCI latency"
++ " timer (CFLT) setting of %d, new value is %d.\n",
++ dev->name, pci_latency, new_latency);
++ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
++ }
++ }
++#endif
+
+ printk(KERN_INFO "%s: 3Com %s at 0x%lx, ",
+ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*vp) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL) {
++ printk(" INTERFACE MEMORY ALLOCATION FAILURE.\n");
++ return NULL;
++ }
++
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ dev->mtu = mtu;
+
+- /* Make certain the descriptor lists are aligned. */
+- {
+- void *mem = kmalloc(sizeof(*vp) + 15, GFP_KERNEL);
+- vp = (void *)(((long)mem + 15) & ~15);
+- vp->priv_addr = mem;
+- }
++ dev->priv = vp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(vp, 0, sizeof(*vp));
+- dev->priv = vp;
++ vp->priv_addr = priv_mem;
+
+ vp->next_module = root_vortex_dev;
+ root_vortex_dev = dev;
+
+ vp->chip_id = chip_idx;
+- vp->pci_bus = pci_bus;
+- vp->pci_devfn = pci_devfn;
++ vp->pci_dev = pdev;
++ vp->drv_flags = drv_flags;
++ vp->msg_level = (1 << debug) - 1;
++ vp->rx_copybreak = rx_copybreak;
++ vp->max_interrupt_work = max_interrupt_work;
++ vp->multicast_filter_limit = multicast_filter_limit;
+
+ /* The lower four bits are the media type. */
+ if (dev->mem_start)
+ option = dev->mem_start;
+- else if (card_idx < MAX_UNITS)
+- option = options[card_idx];
++ else if (find_cnt < MAX_UNITS)
++ option = options[find_cnt];
+ else
+ option = -1;
+
+@@ -832,28 +896,30 @@
+ vp->full_duplex = 0;
+ vp->bus_master = 0;
+ }
+- if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
+ vp->full_duplex = 1;
+
+- vp->force_fd = vp->full_duplex;
+ vp->options = option;
+
+ /* Read the station address from the EEPROM. */
+ EL3WINDOW(0);
++ /* Figure out the size and offset of the EEPROM table.
++ This is complicated by potential discontiguous address bits. */
++
++ /* Locate the opcode bits, 0xC0 or 0x300. */
++ outw(0x5555, ioaddr + Wn0EepromData);
++ ee_read_cmd = do_eeprom_op(ioaddr, 0x80) == 0x5555 ? 0x200 : 0x80;
++ /* Locate the table base for CardBus cards. */
++ if (do_eeprom_op(ioaddr, ee_read_cmd + 0x37) == 0x6d50)
++ ee_read_cmd += 0x30;
++
+ for (i = 0; i < 0x40; i++) {
+- int timer;
+-#ifdef CARDBUS
+- outw(0x230 + i, ioaddr + Wn0EepromCmd);
+-#else
+- outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
+-#endif
+- /* Pause for at least 162 us. for the read to take place. */
+- for (timer = 10; timer >= 0; timer--) {
+- udelay(162);
+- if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+- break;
++ int cmd_and_addr = ee_read_cmd + i;
++ if (ee_read_cmd == 0xB0) { /* Correct for discontinuity. */
++ int offset = 0x30 + i;
++ cmd_and_addr = 0x80 + (offset & 0x3f) + ((offset<<2) & 0x0f00);
+ }
+- eeprom[i] = inw(ioaddr + Wn0EepromData);
++ eeprom[i] = do_eeprom_op(ioaddr, cmd_and_addr);
+ }
+ for (i = 0; i < 0x18; i++)
+ checksum ^= eeprom[i];
+@@ -863,7 +929,7 @@
+ checksum ^= eeprom[i++];
+ checksum = (checksum ^ (checksum >> 8)) & 0xff;
+ }
+- if (checksum != 0x00)
++ if (checksum != 0x00 && !(drv_flags & IS_TORNADO))
+ printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+ for (i = 0; i < 3; i++)
+@@ -874,27 +940,22 @@
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+-#ifdef __sparc__
+- printk(", IRQ %s\n", __irq_itoa(dev->irq));
+-#else
+ printk(", IRQ %d\n", dev->irq);
+ /* Tell them about an invalid IRQ. */
+- if (vortex_debug && (dev->irq <= 0 || dev->irq >= NR_IRQS))
++ if (dev->irq <= 0)
+ printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+ dev->irq);
+-#endif
+
+- if (pci_tbl[vp->chip_id].drv_flags & HAS_CB_FNS) {
++#if ! defined(NO_PCI)
++ if (drv_flags & HAS_CB_FNS) {
+ u32 fn_st_addr; /* Cardbus function status space */
+- pcibios_read_config_dword(pci_bus, pci_devfn, PCI_BASE_ADDRESS_2,
+- &fn_st_addr);
++ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_2, &fn_st_addr);
+ if (fn_st_addr)
+ vp->cb_fn_base = ioremap(fn_st_addr & ~3, 128);
+- printk("%s: CardBus functions mapped %8.8x->%p (PCMCIA committee"
+- " brain-damage).\n", dev->name, fn_st_addr, vp->cb_fn_base);
+- EL3WINDOW(2);
+- outw(0x10 | inw(ioaddr + Wn2_ResetOptions), ioaddr + Wn2_ResetOptions);
++ printk(KERN_INFO "%s: CardBus functions mapped %8.8x->%p.\n",
++ dev->name, fn_st_addr, vp->cb_fn_base);
+ }
++#endif
+
+ /* Extract our information from the EEPROM data. */
+ vp->info1 = eeprom[13];
+@@ -903,27 +964,31 @@
+
+ if (vp->info1 & 0x8000)
+ vp->full_duplex = 1;
++ if (vp->full_duplex)
++ vp->medialock = 1;
++
++ /* Turn on the transceiver. */
++ activate_xcvr(dev);
+
+ {
+ char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+- union wn3_config config;
++ int i_cfg;
+ EL3WINDOW(3);
+ vp->available_media = inw(ioaddr + Wn3_Options);
+ if ((vp->available_media & 0xff) == 0) /* Broken 3c916 */
+ vp->available_media = 0x40;
+- config.i = inl(ioaddr + Wn3_Config);
+- if (vortex_debug > 1)
+- printk(KERN_DEBUG " Internal config register is %4.4x, "
+- "transceivers %#x.\n", config.i, inw(ioaddr + Wn3_Options));
+- printk(KERN_INFO " %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+- 8 << config.u.ram_size,
+- config.u.ram_width ? "word" : "byte",
+- ram_split[config.u.ram_split],
+- config.u.autoselect ? "autoselect/" : "",
+- config.u.xcvr > XCVR_ExtMII ? "<invalid transceiver>" :
+- media_tbl[config.u.xcvr].name);
+- vp->default_media = config.u.xcvr;
+- vp->autoselect = config.u.autoselect;
++ i_cfg = inl(ioaddr + Wn3_Config); /* Internal Configuration */
++ vp->default_media = (i_cfg >> 20) & 15;
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG " Internal config register is %8.8x, "
++ "transceivers %#x.\n", i_cfg, inw(ioaddr + Wn3_Options));
++ printk(KERN_INFO " %dK buffer %s Rx:Tx split, %s%s interface.\n",
++ 8 << (i_cfg & 7),
++ ram_split[(i_cfg >> 16) & 3],
++ i_cfg & 0x01000000 ? "autoselect/" : "",
++ vp->default_media > XCVR_ExtMII ? "<invalid transceiver>" :
++ media_tbl[vp->default_media].name);
++ vp->autoselect = i_cfg & 0x01000000 ? 1 : 0;
+ }
+
+ if (vp->media_override != 7) {
+@@ -933,16 +998,17 @@
+ } else
+ dev->if_port = vp->default_media;
+
+- if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
++ if ((vp->available_media & 0x41) || (drv_flags & HAS_NWAY) ||
++ dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+ int phy, phy_idx = 0;
+ EL3WINDOW(4);
+ mii_preamble_required++;
+- mii_preamble_required++;
++ mdio_sync(ioaddr, 32);
+ mdio_read(ioaddr, 24, 1);
+ for (phy = 1; phy <= 32 && phy_idx < sizeof(vp->phys); phy++) {
+ int mii_status, phyx = phy & 0x1f;
+ mii_status = mdio_read(ioaddr, phyx, 1);
+- if (mii_status && mii_status != 0xffff) {
++ if ((mii_status & 0xf800) && mii_status != 0xffff) {
+ vp->phys[phy_idx++] = phyx;
+ printk(KERN_INFO " MII transceiver found at address %d,"
+ " status %4x.\n", phyx, mii_status);
+@@ -955,6 +1021,12 @@
+ printk(KERN_WARNING" ***WARNING*** No MII transceivers found!\n");
+ vp->phys[0] = 24;
+ } else {
++ if (mii_preamble_required == 0 &&
++ mdio_read(ioaddr, vp->phys[0], 1) == 0) {
++ printk(KERN_INFO "%s: MII transceiver has preamble bug.\n",
++ dev->name);
++ mii_preamble_required = 1;
++ }
+ vp->advertising = mdio_read(ioaddr, vp->phys[0], 4);
+ if (vp->full_duplex) {
+ /* Only advertise the FD media types. */
+@@ -962,14 +1034,14 @@
+ mdio_write(ioaddr, vp->phys[0], 4, vp->advertising);
+ }
+ }
++ } else {
++ /* We will emulate MII management. */
++ vp->phys[0] = 32;
+ }
+
+- if (vp->capabilities & CapPwrMgmt)
+- acpi_set_WOL(dev);
+-
+ if (vp->capabilities & CapBusMaster) {
+ vp->full_bus_master_tx = 1;
+- printk(KERN_INFO" Enabling bus-master transmits and %s receives.\n",
++ printk(KERN_INFO" Using bus-master transmits and %s receives.\n",
+ (vp->info2 & 1) ? "early" : "whole-frame" );
+ vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+ }
+@@ -989,29 +1061,27 @@
+ }
+
+
+-static int
+-vortex_open(struct device *dev)
++static int vortex_open(struct net_device *dev)
+ {
+- long ioaddr = dev->base_addr;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+- union wn3_config config;
++ long ioaddr = dev->base_addr;
+ int i;
+
+- /* Should be if(HAS_ACPI) */
+- acpi_wake(vp->pci_bus, vp->pci_devfn);
++ MOD_INC_USE_COUNT;
+
+- /* Before initializing select the active media port. */
+- EL3WINDOW(3);
+- config.i = inl(ioaddr + Wn3_Config);
++ acpi_wake(vp->pci_dev);
++ vp->window_lock = SPIN_LOCK_UNLOCKED;
++ activate_xcvr(dev);
+
++ /* Before initializing select the active media port. */
+ if (vp->media_override != 7) {
+- if (vortex_debug > 1)
++ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+ dev->name, vp->media_override,
+ media_tbl[vp->media_override].name);
+ dev->if_port = vp->media_override;
+ } else if (vp->autoselect) {
+- if (pci_tbl[vp->chip_id].drv_flags & HAS_NWAY)
++ if (vp->drv_flags & HAS_NWAY)
+ dev->if_port = XCVR_NWAY;
+ else {
+ /* Find first available media type, starting with 100baseTx. */
+@@ -1022,88 +1092,39 @@
+ } else
+ dev->if_port = vp->default_media;
+
+- init_timer(&vp->timer);
+- vp->timer.expires = RUN_AT(media_tbl[dev->if_port].wait);
+- vp->timer.data = (unsigned long)dev;
+- vp->timer.function = &vortex_timer; /* timer handler */
+- add_timer(&vp->timer);
+-
+- if (vortex_debug > 1)
+- printk(KERN_DEBUG "%s: Initial media type %s.\n",
+- dev->name, media_tbl[dev->if_port].name);
+-
+- vp->full_duplex = vp->force_fd;
+- config.u.xcvr = dev->if_port;
+- if ( ! (pci_tbl[vp->chip_id].drv_flags & HAS_NWAY))
+- outl(config.i, ioaddr + Wn3_Config);
+-
+- if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
+- int mii_reg1, mii_reg5;
+- EL3WINDOW(4);
+- /* Read BMSR (reg1) only to clear old status. */
+- mii_reg1 = mdio_read(ioaddr, vp->phys[0], 1);
+- mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+- if (mii_reg5 == 0xffff || mii_reg5 == 0x0000)
+- ; /* No MII device or no link partner report */
+- else if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
+- || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+- vp->full_duplex = 1;
+- if (vortex_debug > 1)
+- printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+- " setting %s-duplex.\n", dev->name, vp->phys[0],
+- mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
+- EL3WINDOW(3);
+- }
+-
+- /* Set the full-duplex bit. */
+- outb(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+- (dev->mtu > 1500 ? 0x40 : 0), ioaddr + Wn3_MAC_Ctrl);
+-
+- if (vortex_debug > 1) {
+- printk(KERN_DEBUG "%s: vortex_open() InternalConfig %8.8x.\n",
+- dev->name, config.i);
+- }
++ if (! vp->medialock)
++ vp->full_duplex = 0;
+
+- outw(TxReset, ioaddr + EL3_CMD);
+- for (i = 2000; i >= 0 ; i--)
+- if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+- break;
++ vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
++ (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
++ (vp->full_bus_master_rx ? UpComplete : RxComplete) |
++ (vp->bus_master ? DMADone : 0);
++ vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | RxComplete |
++ StatsFull | HostError | TxComplete | IntReq
++ | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+
+- outw(RxReset, ioaddr + EL3_CMD);
+- /* Wait a few ticks for the RxReset command to complete. */
+- for (i = 2000; i >= 0 ; i--)
+- if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+- break;
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Initial media type %s %s-duplex.\n",
++ dev->name, media_tbl[dev->if_port].name,
++ vp->full_duplex ? "full":"half");
+
+- outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
++ set_media_type(dev);
++ start_operation(dev);
+
+ /* Use the now-standard shared IRQ implementation. */
+ if (request_irq(dev->irq, &vortex_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+- if (vortex_debug > 1) {
++ spin_lock(&vp->window_lock);
++
++ if (vp->msg_level & NETIF_MSG_IFUP) {
+ EL3WINDOW(4);
+ printk(KERN_DEBUG "%s: vortex_open() irq %d media status %4.4x.\n",
+ dev->name, dev->irq, inw(ioaddr + Wn4_Media));
+ }
+
+- /* Set the station address and mask in window 2 each time opened. */
+- EL3WINDOW(2);
+- for (i = 0; i < 6; i++)
+- outb(dev->dev_addr[i], ioaddr + i);
+- for (; i < 12; i+=2)
+- outw(0, ioaddr + i);
+-
+- if (dev->if_port == XCVR_10base2)
+- /* Start the thinnet transceiver. We should really wait 50ms...*/
+- outw(StartCoax, ioaddr + EL3_CMD);
+- if (dev->if_port != XCVR_NWAY) {
+- EL3WINDOW(4);
+- outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+- media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+- }
+-
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+@@ -1119,63 +1140,226 @@
+
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
++#if defined(CONFIG_VLAN)
++ /* If this value is set no MTU adjustment is needed for 802.1Q. */
++ outw(0x8100, ioaddr + Wn7_VLAN_EtherType);
++#endif
++ spin_unlock(&vp->window_lock);
+
+ if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+ vp->cur_rx = vp->dirty_rx = 0;
++ /* Use 1518/+18 if the CRC is transferred. */
++ vp->rx_buf_sz = dev->mtu + 14;
++ if (vp->rx_buf_sz < PKT_BUF_SZ)
++ vp->rx_buf_sz = PKT_BUF_SZ;
++
+ /* Initialize the RxEarly register as recommended. */
+ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ outl(0x0020, ioaddr + PktStatus);
+- if (vortex_debug > 2)
+- printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name);
+ for (i = 0; i < RX_RING_SIZE; i++) {
+- struct sk_buff *skb;
+- vp->rx_ring[i].next = cpu_to_le32(virt_to_bus(&vp->rx_ring[i+1]));
+- vp->rx_ring[i].status = 0; /* Clear complete bit. */
+- vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
+- skb = dev_alloc_skb(PKT_BUF_SZ);
++ vp->rx_ring[i].length = cpu_to_le32(vp->rx_buf_sz | LAST_FRAG);
++ vp->rx_ring[i].status = 0;
++ vp->rx_ring[i].next = virt_to_le32desc(&vp->rx_ring[i+1]);
++ vp->rx_skbuff[i] = 0;
++ }
++ /* Wrap the ring. */
++ vp->rx_head_desc = &vp->rx_ring[0];
++ vp->rx_ring[i-1].next = virt_to_le32desc(&vp->rx_ring[0]);
++
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(vp->rx_buf_sz);
+ vp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+-#if LINUX_VERSION_CODE >= 0x10300
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+- vp->rx_ring[i].addr = cpu_to_le32(virt_to_bus(skb->tail));
+-#else
+- vp->rx_ring[i].addr = virt_to_bus(skb->data);
+-#endif
++ vp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+- /* Wrap the ring. */
+- vp->rx_ring[i-1].next = cpu_to_le32(virt_to_bus(&vp->rx_ring[0]));
+- outl(virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
++ outl(virt_to_bus(vp->rx_head_desc), ioaddr + UpListPtr);
+ }
+ if (vp->full_bus_master_tx) { /* Boomerang bus master Tx. */
+ dev->hard_start_xmit = &boomerang_start_xmit;
+ vp->cur_tx = vp->dirty_tx = 0;
+- outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
++ vp->tx_desc_tail = &vp->tx_ring[TX_RING_SIZE - 1];
++ if (vp->drv_flags & IS_BOOMERANG) {
++ /* Room for a packet, to avoid long DownStall delays. */
++ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
++ } else if (vp->drv_flags & HAS_V2_TX)
++ outb(20, ioaddr + DownPollRate);
++
+ /* Clear the Tx ring. */
+ for (i = 0; i < TX_RING_SIZE; i++)
+ vp->tx_skbuff[i] = 0;
+ outl(0, ioaddr + DownListPtr);
++ vp->tx_full = 0;
++ vp->restart_tx = 1;
+ }
+- /* Set reciever mode: presumably accept b-case and phys addr only. */
++ /* The multicast filter is an ill-considered, write-only design.
++ The semantics are not documented, so we assume but do not rely
++ on the table being cleared with an RxReset.
++ Here we do an explicit clear of the largest known table.
++ */
++ if (vp->drv_flags & HAS_V2_TX)
++ for (i = 0; i < 0x100; i++)
++ outw(SetFilterBit | i, ioaddr + EL3_CMD);
++ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
++
++ /* Set receiver mode: presumably accept b-case and phys addr only. */
++ vp->rx_mode = 0;
+ set_rx_mode(dev);
+- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+- vp->in_interrupt = 0;
+- dev->tbusy = 0;
+- dev->interrupt = 0;
+- dev->start = 1;
++ start_operation1(dev);
++
++ init_timer(&vp->timer);
++ vp->timer.expires = jiffies + media_tbl[dev->if_port].wait;
++ vp->timer.data = (unsigned long)dev;
++ vp->timer.function = &vortex_timer; /* timer handler */
++ add_timer(&vp->timer);
++
++ return 0;
++}
++
++static void set_media_type(struct net_device *dev)
++{
++ struct vortex_private *vp = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i_cfg;
++
++ EL3WINDOW(3);
++ i_cfg = inl(ioaddr + Wn3_Config);
++ i_cfg &= ~0x00f00000;
++ if (vp->drv_flags & HAS_NWAY)
++ outl(i_cfg | 0x00800000, ioaddr + Wn3_Config);
++ else
++ outl(i_cfg | (dev->if_port << 20), ioaddr + Wn3_Config);
++
++ if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
++ int mii_reg1, mii_reg5;
++ EL3WINDOW(4);
++ /* Read BMSR (reg1) only to clear old status. */
++ mii_reg1 = mdio_read(ioaddr, vp->phys[0], 1);
++ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
++ if (mii_reg5 == 0xffff || mii_reg5 == 0x0000)
++ ; /* No MII device or no link partner report */
++ else if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */
++ || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
++ vp->full_duplex = 1;
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
++ " setting %s-duplex.\n", dev->name, vp->phys[0],
++ mii_reg1, mii_reg5, vp->full_duplex ? "full" : "half");
++ EL3WINDOW(3);
++ }
++ if (dev->if_port == XCVR_10base2)
++ /* Start the thinnet transceiver. We should really wait 50ms...*/
++ outw(StartCoax, ioaddr + EL3_CMD);
++ EL3WINDOW(4);
++ if (dev->if_port != XCVR_NWAY) {
++ outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
++ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
++ }
++ /* Do we require link beat to transmit? */
++ if (vp->info1 & 0x4000)
++ outw(inw(ioaddr + Wn4_Media) & ~Media_Lnk, ioaddr + Wn4_Media);
++
++ /* Set the full-duplex and oversized frame bits. */
++ EL3WINDOW(3);
++
++ vp->wn3_mac_ctrl = vp->full_duplex ? 0x0120 : 0;
++ if (dev->mtu > 1500)
++ vp->wn3_mac_ctrl |= (dev->mtu == 1504 ? 0x0400 : 0x0040);
++ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
++
++ if (vp->drv_flags & HAS_V2_TX)
++ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
++}
++
++static void activate_xcvr(struct net_device *dev)
++{
++ struct vortex_private *vp = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int reset_opts;
++
++ /* Correct some magic bits. */
++ EL3WINDOW(2);
++ reset_opts = inw(ioaddr + Wn2_ResetOptions);
++ if (vp->drv_flags & INVERT_LED_PWR)
++ reset_opts |= 0x0010;
++ if (vp->drv_flags & MII_XCVR_PWR)
++ reset_opts |= 0x4000;
++ outw(reset_opts, ioaddr + Wn2_ResetOptions);
++ if (vp->drv_flags & WN0_XCVR_PWR) {
++ EL3WINDOW(0);
++ outw(0x0900, ioaddr);
++ }
++}
+
++static void start_operation(struct net_device *dev)
++{
++ struct vortex_private *vp = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ outw(TxReset, ioaddr + EL3_CMD);
++ for (i = 2000; i >= 0 ; i--)
++ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
++ break;
++
++ outw(RxReset | 0x04, ioaddr + EL3_CMD);
++ /* Assume this cleared the filter. */
++ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
++
++ /* Wait a few ticks for the RxReset command to complete. */
++ for (i = 0; i < 200000; i++)
++ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
++ break;
++ if (i >= 200 && (vp->msg_level & NETIF_MSG_DRV))
++ printk(KERN_DEBUG "%s: Rx Reset took an unexpectedly long time"
++ " to finish, %d ticks.\n",
++ dev->name, i);
++
++ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
++ /* Handle VLANs and jumbo frames. */
++ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
++ EL3WINDOW(3);
++ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
++ if (dev->mtu > 2033) {
++ outl(inl(ioaddr + Wn3_Config) | 0x0000C000, ioaddr + Wn3_Config);
++ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
++ }
++ }
++ /* Reset the station address and mask. */
++ EL3WINDOW(2);
++ for (i = 0; i < 6; i++)
++ outb(dev->dev_addr[i], ioaddr + i);
++ for (; i < 12; i+=2)
++ outw(0, ioaddr + i);
++ if (vp->drv_flags & IS_BOOMERANG) {
++ /* Room for a packet, to avoid long DownStall delays. */
++ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
++ } else if (vp->drv_flags & HAS_V2_TX) {
++ outb(20, ioaddr + DownPollRate);
++ vp->restart_tx = 1;
++ }
++}
++
++static void start_operation1(struct net_device *dev)
++{
++ struct vortex_private *vp = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (vp->full_bus_master_rx) { /* post-Vortex bus master. */
++ /* Initialize the RxEarly register as recommended. */
++ outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
++ outl(0x0020, ioaddr + PktStatus);
++ outl(virt_to_bus(&vp->rx_ring[vp->cur_rx % RX_RING_SIZE]),
++ ioaddr + UpListPtr);
++ }
++
++ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+- vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+- (vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+- (vp->full_bus_master_rx ? UpComplete : RxComplete) |
+- (vp->bus_master ? DMADone : 0);
+- vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable | RxComplete |
+- StatsFull | HostError | TxComplete | IntReq
+- | (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+@@ -1183,24 +1367,47 @@
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
+ if (vp->cb_fn_base) /* The PCMCIA people are idiots. */
+ writel(0x8000, vp->cb_fn_base + 4);
+-
+- MOD_INC_USE_COUNT;
+-
+- return 0;
++ netif_start_tx_queue(dev);
+ }
+
+ static void vortex_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+ int ok = 0;
+- int media_status, mii_status, old_window;
++ int media_status, old_window;
+
+- if (vortex_debug > 1)
+- printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n",
+- dev->name, media_tbl[dev->if_port].name);
++ if (vp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Media selection timer tick happened, "
++ "%s %s duplex.\n",
++ dev->name, media_tbl[dev->if_port].name,
++ vp->full_duplex ? "full" : "half");
++
++ /* This only works with bus-master (non-3c590) chips. */
++ if (vp->cur_tx - vp->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ /* Check for blocked interrupts. */
++ if (inw(ioaddr + EL3_STATUS) & IntLatch) {
++ /* We have a blocked IRQ line. This should never happen, but
++ we recover as best we can.*/
++ if ( ! vp->polling) {
++ if (jiffies - vp->last_reset > 10*HZ) {
++ printk(KERN_ERR "%s: IRQ %d is physically blocked! "
++ "Failing back to low-rate polling.\n",
++ dev->name, dev->irq);
++ vp->last_reset = jiffies;
++ }
++ vp->polling = 1;
++ }
++ vortex_interrupt(dev->irq, dev, 0);
++ next_tick = jiffies + 2;
++ } else {
++ vortex_tx_timeout(dev);
++ vp->last_reset = jiffies;
++ }
++ }
+
+ disable_irq(dev->irq);
+ old_window = inw(ioaddr + EL3_CMD) >> 13;
+@@ -1210,59 +1417,66 @@
+ case XCVR_10baseT: case XCVR_100baseTx: case XCVR_100baseFx:
+ if (media_status & Media_LnkBeat) {
+ ok = 1;
+- if (vortex_debug > 1)
++ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s has link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+- } else if (vortex_debug > 1)
++ } else if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media %s is has no link beat, %x.\n",
+ dev->name, media_tbl[dev->if_port].name, media_status);
+ break;
+- case XCVR_MII: case XCVR_NWAY:
+- mii_status = mdio_read(ioaddr, vp->phys[0], 1);
+- ok = 1;
+- if (debug > 1)
+- printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
+- dev->name, mii_status);
+- if (mii_status & 0x0004) {
+- int mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
+- if (! vp->force_fd && mii_reg5 != 0xffff) {
+- int duplex = (mii_reg5&0x0100) ||
+- (mii_reg5 & 0x01C0) == 0x0040;
+- if (vp->full_duplex != duplex) {
+- vp->full_duplex = duplex;
+- printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+- "#%d link partner capability of %4.4x.\n",
+- dev->name, vp->full_duplex ? "full" : "half",
+- vp->phys[0], mii_reg5);
+- /* Set the full-duplex bit. */
+- outb((vp->full_duplex ? 0x20 : 0) |
+- (dev->mtu > 1500 ? 0x40 : 0),
+- ioaddr + Wn3_MAC_Ctrl);
+- }
+- next_tick = 60*HZ;
+- }
+- }
+- break;
+- default: /* Other media types handled by Tx timeouts. */
+- if (vortex_debug > 1)
+- printk(KERN_DEBUG "%s: Media %s is has no indication, %x.\n",
+- dev->name, media_tbl[dev->if_port].name, media_status);
++ case XCVR_MII: case XCVR_NWAY: {
++ int mii_status = mdio_read(ioaddr, vp->phys[0], 1);
++ int mii_reg5, negotiated, duplex;
++ ok = 1;
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n",
++ dev->name, mii_status);
++ if (vp->medialock)
++ break;
++ if ((mii_status & 0x0004) == 0) {
++ next_tick = 5*HZ;
++ break;
++ }
++ mii_reg5 = mdio_read(ioaddr, vp->phys[0], 5);
++ negotiated = mii_reg5 & vp->advertising;
++ duplex = (negotiated & 0x0100) || (negotiated & 0x03C0) == 0x0040;
++ if (mii_reg5 == 0xffff || vp->full_duplex == duplex)
++ break;
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on "
++ "MII #%d link partner capability of %4.4x.\n",
++ dev->name, vp->full_duplex ? "full" : "half",
++ vp->phys[0], mii_reg5);
++ vp->full_duplex = duplex;
++ /* Set the full-duplex bit. */
++ EL3WINDOW(3);
++ if (duplex)
++ vp->wn3_mac_ctrl |= 0x120;
++ else
++ vp->wn3_mac_ctrl &= ~0x120;
++ outb(vp->wn3_mac_ctrl, ioaddr + Wn3_MAC_Ctrl);
++ break;
++ }
++ default: /* Other media types handled by Tx timeouts. */
++ if (vp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Media %s is has no indication, %x.\n",
++ dev->name, media_tbl[dev->if_port].name, media_status);
+ ok = 1;
+ }
+ if ( ! ok) {
+- union wn3_config config;
++ int i_cfg;
+
+ do {
+ dev->if_port = media_tbl[dev->if_port].next;
+ } while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
+ if (dev->if_port == XCVR_Default) { /* Go back to default. */
+ dev->if_port = vp->default_media;
+- if (vortex_debug > 1)
++ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failing, using default "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+ } else {
+- if (vortex_debug > 1)
++ if (vp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Media selection failed, now trying "
+ "%s port.\n",
+ dev->name, media_tbl[dev->if_port].name);
+@@ -1272,54 +1486,60 @@
+ media_tbl[dev->if_port].media_bits, ioaddr + Wn4_Media);
+
+ EL3WINDOW(3);
+- config.i = inl(ioaddr + Wn3_Config);
+- config.u.xcvr = dev->if_port;
+- outl(config.i, ioaddr + Wn3_Config);
++ i_cfg = inl(ioaddr + Wn3_Config);
++ i_cfg &= ~0x00f00000;
++ i_cfg |= (dev->if_port << 20);
++ outl(i_cfg, ioaddr + Wn3_Config);
+
+ outw(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
+ ioaddr + EL3_CMD);
+ }
+ EL3WINDOW(old_window);
+ enable_irq(dev->irq);
++ if (vp->restore_intr_mask)
++ outw(FakeIntr, ioaddr + EL3_CMD);
+
+- if (vortex_debug > 2)
++ if (vp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n",
+ dev->name, media_tbl[dev->if_port].name);
+
+- vp->timer.expires = RUN_AT(next_tick);
++ vp->timer.expires = jiffies + next_tick;
+ add_timer(&vp->timer);
+ return;
+ }
+
+-static void vortex_tx_timeout(struct device *dev)
++static void vortex_tx_timeout(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
++ int tx_status = inb(ioaddr + TxStatus);
++ int intr_status = inw(ioaddr + EL3_STATUS);
+ int j;
+
+ printk(KERN_ERR "%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
+- dev->name, inb(ioaddr + TxStatus),
+- inw(ioaddr + EL3_STATUS));
++ dev->name, tx_status, intr_status);
+ /* Slight code bloat to be user friendly. */
+- if ((inb(ioaddr + TxStatus) & 0x88) == 0x88)
++ if ((tx_status & 0x88) == 0x88)
+ printk(KERN_ERR "%s: Transmitter encountered 16 collisions --"
+ " network cable problem?\n", dev->name);
+- if (inw(ioaddr + EL3_STATUS) & IntLatch) {
++ if (intr_status & IntLatch) {
+ printk(KERN_ERR "%s: Interrupt posted but not delivered --"
+ " IRQ blocked by another device?\n", dev->name);
+- /* Bad idea here.. but we might as well handle a few events. */
++ /* Race condition possible, but we handle a few events. */
+ vortex_interrupt(dev->irq, dev, 0);
+ }
+
+ #if ! defined(final_version) && LINUX_VERSION_CODE >= 0x10300
+ if (vp->full_bus_master_tx) {
+ int i;
+- printk(KERN_DEBUG " Flags; bus-master %d, full %d; dirty %d "
+- "current %d.\n",
+- vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx);
+- printk(KERN_DEBUG " Transmit list %8.8x vs. %p.\n",
+- inl(ioaddr + DownListPtr),
+- &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
++ printk(KERN_DEBUG " Flags: bus-master %d full %d dirty %d "
++ "current %d restart_tx %d.\n",
++ vp->full_bus_master_tx, vp->tx_full, vp->dirty_tx, vp->cur_tx,
++ vp->restart_tx);
++ printk(KERN_DEBUG " Transmit list %8.8x vs. %p, packet ID %2.2x.\n",
++ (int)inl(ioaddr + DownListPtr),
++ &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE],
++ inb(ioaddr + TxPktID));
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ printk(KERN_DEBUG " %d: @%p length %8.8x status %8.8x\n", i,
+ &vp->tx_ring[i],
+@@ -1334,26 +1554,40 @@
+ break;
+
+ vp->stats.tx_errors++;
++
+ if (vp->full_bus_master_tx) {
+- if (vortex_debug > 0)
++ if (vp->drv_flags & HAS_V2_TX)
++ outb(20, ioaddr + DownPollRate);
++ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n",
+ dev->name);
+ if (vp->cur_tx - vp->dirty_tx > 0 && inl(ioaddr + DownListPtr) == 0)
+ outl(virt_to_bus(&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + DownListPtr);
+- if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_RING_SIZE - 1)) {
++ else
++ vp->restart_tx = 1;
++ if (vp->drv_flags & IS_BOOMERANG) {
++ /* Room for a packet, to avoid long DownStall delays. */
++ outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
++ outw(DownUnstall, ioaddr + EL3_CMD);
++ } else {
++ if (dev->mtu > 2033)
++ outw(SetTxStart + (2000>>2), ioaddr + EL3_CMD);
++ }
++
++ if (vp->tx_full && (vp->cur_tx - vp->dirty_tx <= TX_QUEUE_LEN - 1)) {
+ vp->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
++ netif_unpause_tx_queue(dev);
+ }
+- outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
+- outw(DownUnstall, ioaddr + EL3_CMD);
+- } else
++ } else {
++ netif_unpause_tx_queue(dev);
+ vp->stats.tx_dropped++;
+-
++ }
++
+ /* Issue Tx Enable */
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->trans_start = jiffies;
+-
++
+ /* Switch to register set 7 for normal use. */
+ EL3WINDOW(7);
+ }
+@@ -1363,7 +1597,7 @@
+ * the cache impact.
+ */
+ static void
+-vortex_error(struct device *dev, int status)
++vortex_error(struct net_device *dev, int status)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -1373,8 +1607,7 @@
+ if (status & TxComplete) { /* Really "TxError" for us. */
+ unsigned char tx_status = inb(ioaddr + TxStatus);
+ /* Presumably a tx-timeout. We must merely re-enable. */
+- if (vortex_debug > 2
+- || (tx_status != 0x88 && vortex_debug > 0))
++ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG"%s: Transmit error, Tx status register %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x14) vp->stats.tx_fifo_errors++;
+@@ -1382,8 +1615,10 @@
+ outb(0, ioaddr + TxStatus);
+ if (tx_status & 0x30)
+ do_tx_reset = 1;
+- else /* Merely re-enable the transmitter. */
++ else { /* Merely re-enable the transmitter. */
+ outw(TxEnable, ioaddr + EL3_CMD);
++ vp->restart_tx = 1;
++ }
+ }
+ if (status & RxEarly) { /* Rx early is unused. */
+ vortex_rx(dev);
+@@ -1391,7 +1626,7 @@
+ }
+ if (status & StatsFull) { /* Empty statistics. */
+ static int DoneDidThat = 0;
+- if (vortex_debug > 4)
++ if (vp->msg_level & NETIF_MSG_MISC)
+ printk(KERN_DEBUG "%s: Updating stats.\n", dev->name);
+ update_stats(ioaddr, dev);
+ /* HACK: Disable statistics as an interrupt source. */
+@@ -1409,31 +1644,47 @@
+ if (status & IntReq) { /* Restore all interrupt sources. */
+ outw(vp->status_enable, ioaddr + EL3_CMD);
+ outw(vp->intr_enable, ioaddr + EL3_CMD);
++ vp->restore_intr_mask = 0;
+ }
+ if (status & HostError) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+- if (vortex_debug > 0)
+- printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
+- dev->name, fifo_diag);
++ if (vp->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: Host error, status %x, FIFO diagnostic "
++ "register %4.4x.\n",
++ dev->name, status, fifo_diag);
+ /* Adapter failure requires Tx/Rx reset and reinit. */
+ if (vp->full_bus_master_tx) {
++ int bus_status = inl(ioaddr + PktStatus);
++ /* 0x80000000 PCI master abort. */
++ /* 0x40000000 PCI target abort. */
+ outw(TotalReset | 0xff, ioaddr + EL3_CMD);
+ for (i = 2000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
++ if (vp->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: PCI bus error, bus status %8.8x, reset "
++ "had %d tick left.\n",
++ dev->name, bus_status, i);
+ /* Re-enable the receiver. */
+ outw(RxEnable, ioaddr + EL3_CMD);
+ outw(TxEnable, ioaddr + EL3_CMD);
++ vp->restart_tx = 1;
+ } else if (fifo_diag & 0x0400)
+ do_tx_reset = 1;
+ if (fifo_diag & 0x3000) {
+- outw(RxReset, ioaddr + EL3_CMD);
+- for (i = 2000; i >= 0 ; i--)
++ outw(RxReset | 7, ioaddr + EL3_CMD);
++ for (i = 200000; i >= 0 ; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
++ if ((vp->drv_flags & HAS_V2_TX) && dev->mtu > 1500) {
++ EL3WINDOW(3);
++ outw(dev->mtu + 14, ioaddr + Wn3_MaxPktSize);
++ }
+ /* Set the Rx filter to the current state. */
++ memset(vp->mc_filter, 0, sizeof vp->mc_filter);
++ vp->rx_mode = 0;
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+ outw(AckIntr | HostError, ioaddr + EL3_CMD);
+@@ -1446,19 +1697,23 @@
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ outw(TxEnable, ioaddr + EL3_CMD);
++ vp->restart_tx = 1;
+ }
+
+ }
+
+
+ static int
+-vortex_start_xmit(struct sk_buff *skb, struct device *dev)
++vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start >= TX_TIMEOUT)
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+ }
+@@ -1471,16 +1726,18 @@
+ outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+ vp->tx_skb = skb;
+ outw(StartDMADown, ioaddr + EL3_CMD);
+- /* dev->tbusy will be cleared at the DMADone interrupt. */
++ netif_stop_tx_queue(dev);
++ /* Tx busy will be cleared at the DMADone interrupt. */
+ } else {
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+- DEV_FREE_SKB(skb);
+- if (inw(ioaddr + TxFree) > 1536) {
+- clear_bit(0, (void*)&dev->tbusy);
+- } else
++ dev_free_skb(skb);
++ if (inw(ioaddr + TxFree) <= 1536) {
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
+ }
+
+ dev->trans_start = jiffies;
+@@ -1492,7 +1749,7 @@
+
+ while (--i > 0 && (tx_status = inb(ioaddr + TxStatus)) > 0) {
+ if (tx_status & 0x3C) { /* A Tx-disabling error occurred. */
+- if (vortex_debug > 2)
++ if (vp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+ dev->name, tx_status);
+ if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+@@ -1505,6 +1762,7 @@
+ break;
+ }
+ outw(TxEnable, ioaddr + EL3_CMD);
++ vp->restart_tx = 1;
+ }
+ outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+ }
+@@ -1513,38 +1771,46 @@
+ }
+
+ static int
+-boomerang_start_xmit(struct sk_buff *skb, struct device *dev)
++boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
++ int entry;
++ struct boom_tx_desc *prev_entry;
++ unsigned long flags;
++ int i;
+
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start >= TX_TIMEOUT)
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
+ vortex_tx_timeout(dev);
+ return 1;
+- } else {
+- /* Calculate the next Tx descriptor entry. */
+- int entry = vp->cur_tx % TX_RING_SIZE;
+- struct boom_tx_desc *prev_entry =
+- &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+- unsigned long flags;
+- int i;
++ }
+
+- if (vortex_debug > 3)
+- printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
+- dev->name, vp->cur_tx);
+- if (vp->tx_full) {
+- if (vortex_debug >0)
+- printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
+- dev->name);
+- return 1;
+- }
+- vp->tx_skbuff[entry] = skb;
+- vp->tx_ring[entry].next = 0;
+- vp->tx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->data));
+- vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
++ /* Calculate the next Tx descriptor entry. */
++ entry = vp->cur_tx % TX_RING_SIZE;
++ prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
++
++ if (vp->msg_level & NETIF_MSG_TX_QUEUED)
++ printk(KERN_DEBUG "%s: Queuing Tx packet, index %d.\n",
++ dev->name, vp->cur_tx);
++ /* Impossible error. */
++ if (vp->tx_full) {
++ printk(KERN_WARNING "%s: Tx Ring full, refusing to send buffer.\n",
++ dev->name);
++ return 1;
++ }
++ vp->tx_skbuff[entry] = skb;
++ vp->tx_ring[entry].next = 0;
++ vp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
++ vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
++ if (vp->capabilities & CapNoTxLength)
++ vp->tx_ring[entry].status =
++ cpu_to_le32(TxNoRoundup | TxIntrUploaded | (entry << 2));
++ else
+ vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+
++ if (vp->drv_flags & IS_BOOMERANG) {
+ save_flags(flags);
+ cli();
+ outw(DownStall, ioaddr + EL3_CMD);
+@@ -1552,66 +1818,64 @@
+ for (i = 600; i >= 0 ; i--)
+ if ( (inw(ioaddr + EL3_STATUS) & CmdInProgress) == 0)
+ break;
+- prev_entry->next = cpu_to_le32(virt_to_bus(&vp->tx_ring[entry]));
++ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
++ vp->tx_desc_tail = &vp->tx_ring[entry];
+ if (inl(ioaddr + DownListPtr) == 0) {
+ outl(virt_to_bus(&vp->tx_ring[entry]), ioaddr + DownListPtr);
+ queued_packet++;
+ }
+ outw(DownUnstall, ioaddr + EL3_CMD);
+ restore_flags(flags);
+-
+- vp->cur_tx++;
+- if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1)
+- vp->tx_full = 1;
+- else { /* Clear previous interrupt enable. */
++ } else {
++ vp->tx_desc_tail->next = virt_to_le32desc(&vp->tx_ring[entry]);
++ vp->tx_desc_tail = &vp->tx_ring[entry];
++ if (vp->restart_tx) {
++ outl(virt_to_bus(vp->tx_desc_tail), ioaddr + DownListPtr);
++ vp->restart_tx = 0;
++ queued_packet++;
++ }
++ }
++ vp->cur_tx++;
++ if (vp->cur_tx - vp->dirty_tx >= TX_QUEUE_LEN) {
++ vp->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (vp->cur_tx - (volatile unsigned int)vp->dirty_tx
++ < TX_QUEUE_LEN - 2) {
++ vp->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else { /* Clear previous interrupt enable. */
+ #if defined(tx_interrupt_mitigation)
+- prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
++ prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+ #endif
+- clear_bit(0, (void*)&dev->tbusy);
+- }
+- dev->trans_start = jiffies;
+- return 0;
++ netif_unpause_tx_queue(dev); /* Typical path */
+ }
++ dev->trans_start = jiffies;
++ return 0;
+ }
+
+ /* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+ static void vortex_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+ {
+- struct device *dev = dev_id;
++ struct net_device *dev = dev_id;
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr;
+ int latency, status;
+- int work_done = max_interrupt_work;
++ int work_done = vp->max_interrupt_work;
+
+-#if defined(__i386__)
+- /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+- if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+- printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+- dev->name);
+- dev->interrupt = 0; /* Avoid halting machine. */
+- return;
+- }
+-#else
+- if (dev->interrupt) {
+- printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+- return;
+- }
+- dev->interrupt = 1;
+-#endif
+-
+- dev->interrupt = 1;
+ ioaddr = dev->base_addr;
+ latency = inb(ioaddr + Timer);
+ status = inw(ioaddr + EL3_STATUS);
+
+ if (status == 0xffff)
+ goto handler_exit;
+- if (vortex_debug > 4)
++ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+ dev->name, status, latency);
+ do {
+- if (vortex_debug > 5)
++ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+ dev->name, status);
+ if (status & RxComplete)
+@@ -1622,12 +1886,11 @@
+ }
+
+ if (status & TxAvailable) {
+- if (vortex_debug > 5)
++ if (vp->msg_level & NETIF_MSG_TX_DONE)
+ printk(KERN_DEBUG " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ netif_resume_tx_queue(dev);
+ }
+
+ if (status & DownComplete) {
+@@ -1636,30 +1899,37 @@
+ outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+ while (vp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % TX_RING_SIZE;
+- if (inl(ioaddr + DownListPtr) ==
+- virt_to_bus(&vp->tx_ring[entry]))
++ int tx_status = le32_to_cpu(vp->tx_ring[entry].status);
++ if (vp->capabilities & CapNoTxLength) {
++ if ( ! (tx_status & TxDownComplete))
++ break;
++ } else if (inl(ioaddr + DownListPtr) ==
++ virt_to_bus(&vp->tx_ring[entry]))
+ break; /* It still hasn't been processed. */
++ if (vp->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, tx_status);
+ if (vp->tx_skbuff[entry]) {
+- DEV_FREE_SKB(vp->tx_skbuff[entry]);
++ dev_free_skb_irq(vp->tx_skbuff[entry]);
+ vp->tx_skbuff[entry] = 0;
+ }
+ /* vp->stats.tx_packets++; Counted below. */
+ dirty_tx++;
+ }
+ vp->dirty_tx = dirty_tx;
+- if (vp->tx_full && (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1)) {
++ /* 4 entry hysteresis before marking the queue non-full. */
++ if (vp->tx_full && (vp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4)) {
+ vp->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ netif_resume_tx_queue(dev);
+ }
+ }
+ if (status & DMADone) {
+ if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+ outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+- DEV_FREE_SKB(vp->tx_skb); /* Release the transfered buffer */
++ /* Release the transfered buffer */
++ dev_free_skb_irq(vp->tx_skb);
+ if (inw(ioaddr + TxFree) > 1536) {
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ netif_resume_tx_queue(dev);
+ } else /* Interrupt when FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+ }
+@@ -1683,6 +1953,7 @@
+ outw(SetStatusEnb | ((~status) & 0x7FE), ioaddr + EL3_CMD);
+ outw(AckIntr | 0x7FF, ioaddr + EL3_CMD);
+ /* The timer will reenable interrupts. */
++ vp->restore_intr_mask = 1;
+ break;
+ }
+ }
+@@ -1693,32 +1964,27 @@
+
+ } while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+- if (vortex_debug > 4)
++ if (vp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+ dev->name, status);
+ handler_exit:
+-#if defined(__i386__)
+- clear_bit(0, (void*)&dev->interrupt);
+-#else
+- dev->interrupt = 0;
+-#endif
+ return;
+ }
+
+-static int vortex_rx(struct device *dev)
++static int vortex_rx(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int i;
+ short rx_status;
+
+- if (vortex_debug > 5)
++ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG" In rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+ while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ unsigned char rx_error = inb(ioaddr + RxErrors);
+- if (vortex_debug > 2)
++ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+ if (rx_error & 0x01) vp->stats.rx_over_errors++;
+@@ -1732,7 +1998,7 @@
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(pkt_len + 5);
+- if (vortex_debug > 4)
++ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+ if (skb != NULL) {
+@@ -1756,12 +2022,15 @@
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ vp->stats.rx_bytes += pkt_len;
++#endif
+ /* Wait a limited time to go to next packet. */
+ for (i = 200; i >= 0; i--)
+ if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+ break;
+ continue;
+- } else if (vortex_debug)
++ } else if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+ "size %d.\n", dev->name, pkt_len);
+ }
+@@ -1777,7 +2046,7 @@
+ }
+
+ static int
+-boomerang_rx(struct device *dev)
++boomerang_rx(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int entry = vp->cur_rx % RX_RING_SIZE;
+@@ -1785,42 +2054,51 @@
+ int rx_status;
+ int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+- if (vortex_debug > 5)
++ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In boomerang_rx(), status %4.4x, rx_status "
+- "%4.4x.\n",
+- inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
++ "%8.8x.\n",
++ inw(ioaddr+EL3_STATUS), (int)inl(ioaddr+UpPktStatus));
+ while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+ if (--rx_work_limit < 0)
+ break;
+ if (rx_status & RxDError) { /* Error, update stats. */
+ unsigned char rx_error = rx_status >> 16;
+- if (vortex_debug > 2)
++ if (vp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+ vp->stats.rx_errors++;
+- if (rx_error & 0x01) vp->stats.rx_over_errors++;
+ if (rx_error & 0x02) vp->stats.rx_length_errors++;
++ if (rx_error & 0x10) vp->stats.rx_length_errors++;
+ if (rx_error & 0x04) vp->stats.rx_frame_errors++;
+ if (rx_error & 0x08) vp->stats.rx_crc_errors++;
+- if (rx_error & 0x10) vp->stats.rx_length_errors++;
++ if (rx_error & 0x01) {
++ vp->stats.rx_over_errors++;
++ if (vp->drv_flags & HAS_V2_TX) {
++ int cur_rx_thresh = inb(ioaddr + RxPriorityThresh);
++ if (cur_rx_thresh < 0x20)
++ outb(cur_rx_thresh + 1, ioaddr + RxPriorityThresh);
++ else
++ printk(KERN_WARNING "%s: Excessive PCI latency causing"
++ " packet corruption.\n", dev->name);
++ }
++ }
+ } else {
+ /* The packet length: up to 4.5K!. */
+ int pkt_len = rx_status & 0x1fff;
+ struct sk_buff *skb;
+
+- if (vortex_debug > 4)
++ if (vp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+ pkt_len, rx_status);
+
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+- if (pkt_len < rx_copybreak
++ if (pkt_len < vp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+ memcpy(skb_put(skb, pkt_len),
+- bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
+- pkt_len);
++ le32desc_to_virt(vp->rx_ring[entry].addr), pkt_len);
+ rx_copy++;
+ } else {
+ void *temp;
+@@ -1829,7 +2107,7 @@
+ vp->rx_skbuff[entry] = NULL;
+ temp = skb_put(skb, pkt_len);
+ /* Remove this checking code for final release. */
+- if (bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)) != temp)
++ if (le32desc_to_virt(vp->rx_ring[entry].addr) != temp)
+ printk(KERN_ERR "%s: Warning -- the skbuff addresses do not match"
+ " in boomerang_rx: %p vs. %p.\n", dev->name,
+ bus_to_virt(le32_to_cpu(vp->rx_ring[entry].addr)),
+@@ -1849,20 +2127,23 @@
+ netif_rx(skb);
+ dev->last_rx = jiffies;
+ vp->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ vp->stats.rx_bytes += pkt_len;
++#endif
+ }
+ entry = (++vp->cur_rx) % RX_RING_SIZE;
+ }
+ /* Refill the Rx ring buffers. */
+- for (; vp->dirty_rx < vp->cur_rx; vp->dirty_rx++) {
++ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+ struct sk_buff *skb;
+ entry = vp->dirty_rx % RX_RING_SIZE;
+ if (vp->rx_skbuff[entry] == NULL) {
+- skb = dev_alloc_skb(PKT_BUF_SZ);
++ skb = dev_alloc_skb(vp->rx_buf_sz);
+ if (skb == NULL)
+ break; /* Bad news! */
+ skb->dev = dev; /* Mark as being used by this device. */
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+- vp->rx_ring[entry].addr = cpu_to_le32(virt_to_bus(skb->tail));
++ vp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ vp->rx_skbuff[entry] = skb;
+ }
+ vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+@@ -1871,25 +2152,11 @@
+ return 0;
+ }
+
+-static int
+-vortex_close(struct device *dev)
++static void
++vortex_down(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+- int i;
+-
+- dev->start = 0;
+- dev->tbusy = 1;
+-
+- if (vortex_debug > 1) {
+- printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+- dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+- printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+- " tx_queued %d Rx pre-checksummed %d.\n",
+- dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
+- }
+-
+- del_timer(&vp->timer);
+
+ /* Turn off statistics ASAP. We update vp->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+@@ -1902,44 +2169,66 @@
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+
+- free_irq(dev->irq, dev);
+-
+ outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+ update_stats(ioaddr, dev);
+- if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
++ if (vp->full_bus_master_rx)
+ outl(0, ioaddr + UpListPtr);
++ if (vp->full_bus_master_tx)
++ outl(0, ioaddr + DownListPtr);
++}
++
++static int
++vortex_close(struct net_device *dev)
++{
++ struct vortex_private *vp = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (vp->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
++ dev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
++ printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
++ " tx_queued %d Rx pre-checksummed %d.\n",
++ dev->name, rx_nocopy, rx_copy, queued_packet, rx_csumhits);
++ }
++
++ del_timer(&vp->timer);
++ vortex_down(dev);
++ free_irq(dev->irq, dev);
++ outw(TotalReset | 0x34, ioaddr + EL3_CMD);
++
++ if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+ for (i = 0; i < RX_RING_SIZE; i++)
+ if (vp->rx_skbuff[i]) {
+ #if LINUX_VERSION_CODE < 0x20100
+ vp->rx_skbuff[i]->free = 1;
+ #endif
+- DEV_FREE_SKB(vp->rx_skbuff[i]);
++ dev_free_skb(vp->rx_skbuff[i]);
+ vp->rx_skbuff[i] = 0;
+ }
+ }
+ if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+- outl(0, ioaddr + DownListPtr);
+ for (i = 0; i < TX_RING_SIZE; i++)
+ if (vp->tx_skbuff[i]) {
+- DEV_FREE_SKB(vp->tx_skbuff[i]);
++ dev_free_skb(vp->tx_skbuff[i]);
+ vp->tx_skbuff[i] = 0;
+ }
+ }
+
+- if (vp->capabilities & CapPwrMgmt)
+- acpi_set_WOL(dev);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+ }
+
+-static struct net_device_stats *vortex_get_stats(struct device *dev)
++static struct net_device_stats *vortex_get_stats(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ unsigned long flags;
+
+- if (dev->start) {
++ if (netif_running(dev)) {
+ save_flags(flags);
+ cli();
+ update_stats(dev->base_addr, dev);
+@@ -1955,7 +2244,7 @@
+ table. This is done by checking that the ASM (!) code generated uses
+ atomic updates with '+='.
+ */
+-static void update_stats(long ioaddr, struct device *dev)
++static void update_stats(long ioaddr, struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ int old_window = inw(ioaddr + EL3_CMD);
+@@ -1978,8 +2267,8 @@
+ /* Don't bother with register 9, an extension of registers 6&7.
+ If we do use the 6&7 values the atomic update assumption above
+ is invalid. */
++ /* Rx Bytes is unreliable */ inw(ioaddr + 10);
+ #if LINUX_VERSION_CODE > 0x020119
+- vp->stats.rx_bytes += inw(ioaddr + 10);
+ vp->stats.tx_bytes += inw(ioaddr + 12);
+ #else
+ inw(ioaddr + 10);
+@@ -1994,49 +2283,151 @@
+ return;
+ }
+
+-static int vortex_ioctl(struct device *dev, struct ifreq *rq, int cmd)
++static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+- int phy = vp->phys[0] & 0x1f;
++ u32 *data32 = (void *)&rq->ifr_data;
++ int phy = vp->phys[0];
+
+ switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ if (data[0] == 32) { /* Emulate MII for 3c59*, 3c900. */
++ data[3] = 0;
++ switch (data[1]) {
++ case 0:
++ if (dev->if_port == XCVR_100baseTx) data[3] |= 0x2000;
++ if (vp->full_duplex) data[3] |= 0x0100;
++ break;
++ case 1:
++ if (vp->available_media & 0x02) data[3] |= 0x6000;
++ if (vp->available_media & 0x08) data[3] |= 0x1800;
++ spin_lock(&vp->window_lock);
++ EL3WINDOW(4);
++ if (inw(ioaddr + Wn4_Media) & Media_LnkBeat) data[3] |= 0x0004;
++ spin_unlock(&vp->window_lock);
++ break;
++ case 2: data[3] = 0x0280; break; /* OUI 00:a0:24 */
++ case 3: data[3] = 0x9000; break;
++ default: break;
++ }
++ return 0;
++ }
++ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
++ spin_unlock(&vp->window_lock);
+ return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++ if (data[0] == vp->phys[0]) {
++ u16 value = data[2];
++ if (vp->phys[0] == 32) {
++ if (data[1] == 0) {
++ vp->media_override = (value & 0x2000) ?
++ XCVR_100baseTx : XCVR_10baseT;
++ vp->full_duplex = (value & 0x0100) ? 1 : 0;
++ vp->medialock = 1;
++ }
++ return 0;
++ }
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ vp->medialock = (value & 0x9000) ? 0 : 1;
++ if (vp->medialock)
++ vp->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: vp->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ spin_lock(&vp->window_lock);
+ EL3WINDOW(4);
+ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ spin_unlock(&vp->window_lock);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = vp->msg_level;
++ data32[1] = vp->multicast_filter_limit;
++ data32[2] = vp->max_interrupt_work;
++ data32[3] = vp->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ vp->msg_level = data32[0];
++ vp->multicast_filter_limit = data32[1];
++ vp->max_interrupt_work = data32[2];
++ vp->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static inline u32 ether_crc(int length, unsigned char *data)
++{
++ int crc = -1;
++
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
++ crc = (crc << 1) ^
++ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
++ }
++ return crc;
++}
++
+ /* Pre-Cyclone chips have no documented multicast filter, so the only
+- multicast setting is to receive all multicast frames. At least
+- the chip has a very clean way to set the mode, unlike many others. */
+-static void set_rx_mode(struct device *dev)
++ multicast setting is to receive all multicast frames. Cyclone and later
++ chips have a write-only table of unknown size.
++ At least the chip has a very clean way to set the other filter modes. */
++static void set_rx_mode(struct net_device *dev)
+ {
++ struct vortex_private *vp = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int new_mode;
+
+ if (dev->flags & IFF_PROMISC) {
+- if (vortex_debug > 0)
+- printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
++ /* Unconditionally log a net tap. */
++ printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", dev->name);
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
++ } else if (dev->flags & IFF_ALLMULTI) {
++ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
++ } else if ((vp->drv_flags & HAS_V2_TX) &&
++ dev->mc_count < vp->multicast_filter_limit) {
++ struct dev_mc_list *mclist;
++ int i;
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ int filter_bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0xff;
++ if (test_bit(filter_bit, vp->mc_filter))
++ continue;
++ outw(SetFilterBit | 0x0400 | filter_bit, ioaddr + EL3_CMD);
++ set_bit(filter_bit, vp->mc_filter);
++ }
++
++ new_mode = SetRxFilter|RxStation|RxMulticastHash|RxBroadcast;
++ } else if (dev->mc_count) {
+ new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+ } else
+ new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+- outw(new_mode, ioaddr + EL3_CMD);
++ if (vp->rx_mode != new_mode) {
++ vp->rx_mode = new_mode;
++ outw(new_mode, ioaddr + EL3_CMD);
++ }
+ }
+
+
+@@ -2090,19 +2481,15 @@
+ outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+- /* Read the two transition, 16 data, and wire-idle bits. */
+- for (i = 19; i > 0; i--) {
++ /* Read the two transition and 16 data bits. */
++ for (i = 18; i > 0; i--) {
+ outw(MDIO_ENB_IN, mdio_addr);
+ mdio_delay();
+ retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+ outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
+-#if 0
+- return (retval>>1) & 0x1ffff;
+-#else
+- return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
+-#endif
++ return retval & 0x10000 ? 0xffff : retval & 0xffff;
+ }
+
+ static void mdio_write(long ioaddr, int phy_id, int location, int value)
+@@ -2123,19 +2510,15 @@
+ mdio_delay();
+ }
+ /* Leave the interface idle. */
+- for (i = 1; i >= 0; i--) {
+- outw(MDIO_ENB_IN, mdio_addr);
+- mdio_delay();
+- outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+- mdio_delay();
+- }
++ mdio_sync(ioaddr, 32);
+
+ return;
+ }
+
++#if ! defined(NO_PCI)
+ /* ACPI: Advanced Configuration and Power Interface. */
+ /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+-static void acpi_set_WOL(struct device *dev)
++static void acpi_set_WOL(struct net_device *dev)
+ {
+ struct vortex_private *vp = (struct vortex_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -2147,57 +2530,105 @@
+ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ /* Change the power state to D3; RxEnable doesn't take effect. */
+- pcibios_write_config_word(vp->pci_bus, vp->pci_devfn, 0xe0, 0x8103);
++ pci_write_config_word(vp->pci_dev, 0xe0, 0x8103);
+ }
+-/* Change from D3 (sleep) to D0 (active).
+- Problem: The Cyclone forgets all PCI config info during the transition! */
+-static void acpi_wake(int bus, int devfn)
+-{
+- u32 base0, base1, romaddr;
+- u16 pci_command, pwr_command;
+- u8 pci_latency, pci_cacheline, irq;
++#endif
+
+- pcibios_read_config_word(bus, devfn, 0xe0, &pwr_command);
+- if ((pwr_command & 3) == 0)
+- return;
+- pcibios_read_config_word( bus, devfn, PCI_COMMAND, &pci_command);
+- pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &base0);
+- pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &base1);
+- pcibios_read_config_dword(bus, devfn, PCI_ROM_ADDRESS, &romaddr);
+- pcibios_read_config_byte( bus, devfn, PCI_LATENCY_TIMER, &pci_latency);
+- pcibios_read_config_byte( bus, devfn, PCI_CACHE_LINE_SIZE, &pci_cacheline);
+- pcibios_read_config_byte( bus, devfn, PCI_INTERRUPT_LINE, &irq);
+-
+- pcibios_write_config_word( bus, devfn, 0xe0, 0x0000);
+- pcibios_write_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, base0);
+- pcibios_write_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, base1);
+- pcibios_write_config_dword(bus, devfn, PCI_ROM_ADDRESS, romaddr);
+- pcibios_write_config_byte( bus, devfn, PCI_INTERRUPT_LINE, irq);
+- pcibios_write_config_byte( bus, devfn, PCI_LATENCY_TIMER, pci_latency);
+- pcibios_write_config_byte( bus, devfn, PCI_CACHE_LINE_SIZE, pci_cacheline);
+- pcibios_write_config_word( bus, devfn, PCI_COMMAND, pci_command | 5);
++static int pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct vortex_private *np = (struct vortex_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ vortex_down(dev);
++ netif_stop_tx_queue(dev);
++ if (np->capabilities & CapPwrMgmt)
++ acpi_set_WOL(dev);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the actions are very chip specific. */
++ activate_xcvr(dev);
++ set_media_type(dev);
++ start_operation(dev);
++ np->rx_mode = 0;
++ set_rx_mode(dev);
++ start_operation1(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_vortex_dev; *devp; devp = next) {
++ next = &((struct vortex_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ case DRV_PWR_WakeOn:
++ if ( ! (np->capabilities & CapPwrMgmt))
++ return -1;
++ EL3WINDOW(7);
++ /* Power up on: 1=Downloaded Filter, 2=Magic Packets, 4=Link Status.*/
++ outw(2, ioaddr + 12);
++ /* This RxEnable doesn't take effect if we immediately change to D3. */
++ outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
++ outw(RxEnable, ioaddr + EL3_CMD);
++ acpi_set_pwr_state(np->pci_dev, ACPI_D3);
++ break;
++ }
++ return 0;
+ }
+
+
+ #ifdef MODULE
+ void cleanup_module(void)
+ {
+- struct device *next_dev;
++ struct net_device *next_dev;
+
+ #ifdef CARDBUS
+ unregister_driver(&vortex_ops);
++#elif ! defined(NO_PCI)
++ pci_drv_unregister(&vortex_drv_id);
+ #endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_vortex_dev) {
+ struct vortex_private *vp=(void *)(root_vortex_dev->priv);
+- next_dev = vp->next_module;
+ unregister_netdev(root_vortex_dev);
+- outw(TotalReset, root_vortex_dev->base_addr + EL3_CMD);
++ outw(TotalReset | 0x14, root_vortex_dev->base_addr + EL3_CMD);
++ if (vp->capabilities & CapPwrMgmt)
++ acpi_set_WOL(root_vortex_dev);
++#ifdef USE_MEM_OPS
++ iounmap((char *)root_vortex_dev->base_addr);
++#else
+ release_region(root_vortex_dev->base_addr,
+ pci_tbl[vp->chip_id].io_size);
++#endif
++ next_dev = vp->next_module;
++ if (vp->priv_addr)
++ kfree(vp->priv_addr);
+ kfree(root_vortex_dev);
+- kfree(vp->priv_addr);
+ root_vortex_dev = next_dev;
+ }
+ }
+@@ -2206,9 +2637,10 @@
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c59x.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c59x.c"
+- * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c 3c59x.c -o 3c575_cb.o -I/usr/src/linux/pcmcia-cs-3.0.9/include/"
++ * compile-command: "make KERNVER=`uname -r` 3c59x.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c"
++ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c 3c59x.c -o 3c575_cb.o -I/usr/src/pcmcia/include/"
++ * eisa-only-compile: "gcc -DNO_PCI -DMODULE -O6 -c 3c59x.c -o 3c597.o"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/cb_shim.c
+===================================================================
+RCS file: linux/src/drivers/net/cb_shim.c
+diff -N linux/src/drivers/net/cb_shim.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/cb_shim.c 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,296 @@
++/* cb_shim.c: Linux CardBus device support code. */
++/*
++ Written 1999-2002 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by
++ reference. This is not a documented interface. Drivers incorporating
++ or interacting with these functions are derivative works and thus
++ are covered the GPL. They must include an explicit GPL notice.
++
++ This code provides a shim to allow newer drivers to interact with the
++ older Cardbus driver activation code. The functions supported are
++ attach, suspend, power-off, resume and eject.
++
++ The author may be reached as becker@scyld.com, or
++ Donald Becker
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support and updates available at
++ http://www.scyld.com/network/drivers.html
++
++ Other contributers: (none yet)
++*/
++
++static const char version1[] =
++"cb_shim.c:v1.03 7/12/2002 Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/linux/drivers.html\n";
++
++/* Module options. */
++static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/netdevice.h>
++#include <linux/pci.h>
++#include <asm/io.h>
++
++/* These might be awkward to locate. */
++#include <pcmcia/driver_ops.h>
++#include "pci-scan.h"
++#include "kern_compat.h"
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Hot-swap-PCI and Cardbus event dispatch");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Enable additional status messages (0-7)");
++
++/* Note: this is used in a slightly sleazy manner: it is passed to routines
++ that expect and return just dev_node_t. However using the too-simple
++ dev_node_t complicates devices management -- older drivers had to
++ look up dev_node_t.name in their private list. */
++
++struct registered_pci_device {
++ struct dev_node_t node;
++ int magic;
++ struct registered_pci_device *next;
++ struct drv_id_info *drv_info;
++ struct pci_dev *pci_loc;
++ void *dev_instance;
++} static *root_pci_devs = 0;
++
++struct drv_shim {
++ struct drv_id_info *did;
++ struct driver_operations drv_ops;
++ int magic;
++ struct drv_shim *next;
++} static *root_drv_id = 0;
++
++static void drv_power_op(struct dev_node_t *node, enum drv_pwr_action action)
++{
++ struct registered_pci_device **devp, **next, *rpin = (void *)node, *rp;
++ if (debug > 1)
++ printk(KERN_DEBUG "power operation(%s, %d).\n",
++ rpin->drv_info->name, action);
++ /* With our wrapper structure we can almost do
++ rpin->drv_info->pwr_event(rpin->dev_instance, action);
++ But the detach operation requires us to remove the object from the
++ list, so we check for uncontrolled "ghost" devices. */
++ for (devp = &root_pci_devs; *devp; devp = next) {
++ rp = *devp;
++ next = &rp->next;
++ if (rp == rpin) {
++ if (rp->drv_info->pwr_event)
++ rp->drv_info->pwr_event((*devp)->dev_instance, action);
++ else
++ printk(KERN_ERR "No power event hander for driver %s.\n",
++ rpin->drv_info->name);
++ if (action == DRV_DETACH) {
++ kfree(rp);
++ *devp = *next;
++ MOD_DEC_USE_COUNT;
++ }
++ return;
++ }
++ }
++ if (debug)
++ printk(KERN_WARNING "power operation(%s, %d) for a ghost device.\n",
++ node->dev_name, action);
++}
++/* Wrappers / static lambdas. */
++static void drv_suspend(struct dev_node_t *node)
++{
++ drv_power_op(node, DRV_SUSPEND);
++}
++static void drv_resume(struct dev_node_t *node)
++{
++ drv_power_op(node, DRV_RESUME);
++}
++static void drv_detach(struct dev_node_t *node)
++{
++ drv_power_op(node, DRV_DETACH);
++}
++
++/* The CardBus interaction does not identify the driver the attach() is
++ for, thus we must search for the ID in all PCI device tables.
++ While ugly, we likely only have one driver loaded anyway.
++*/
++static dev_node_t *drv_attach(struct dev_locator_t *loc)
++{
++ struct drv_shim *dp;
++ struct drv_id_info *drv_id = NULL;
++ struct pci_id_info *pci_tbl = NULL;
++ u32 pci_id, subsys_id, pci_rev, pciaddr;
++ u8 irq;
++ int chip_idx = 0, pci_flags, bus, devfn;
++ long ioaddr;
++ void *newdev;
++
++ if (debug > 1)
++ printk(KERN_INFO "drv_attach()\n");
++ if (loc->bus != LOC_PCI) return NULL;
++ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
++ if (debug > 1)
++ printk(KERN_DEBUG "drv_attach(bus %d, function %d)\n", bus, devfn);
++
++ pcibios_read_config_dword(bus, devfn, PCI_VENDOR_ID, &pci_id);
++ pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsys_id);
++ pcibios_read_config_dword(bus, devfn, PCI_REVISION_ID, &pci_rev);
++ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
++ for (dp = root_drv_id; dp; dp = dp->next) {
++ drv_id = dp->did;
++ pci_tbl = drv_id->pci_dev_tbl;
++ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
++ struct pci_id_info *chip = &pci_tbl[chip_idx];
++ if ((pci_id & chip->id.pci_mask) == chip->id.pci
++ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
++ && (pci_rev & chip->id.revision_mask) == chip->id.revision)
++ break;
++ }
++ if (pci_tbl[chip_idx].name) /* Compiled out! */
++ break;
++ }
++ if (dp == 0) {
++ printk(KERN_WARNING "No driver match for device %8.8x at %d/%d.\n",
++ pci_id, bus, devfn);
++ return 0;
++ }
++ pci_flags = pci_tbl[chip_idx].pci_flags;
++ pcibios_read_config_dword(bus, devfn, ((pci_flags >> 2) & 0x1C) + 0x10,
++ &pciaddr);
++ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
++ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++ } else
++ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_tbl[chip_idx].io_size);
++ if (ioaddr == 0 || irq == 0) {
++ printk(KERN_ERR "The %s at %d/%d was not assigned an %s.\n"
++ KERN_ERR " It will not be activated.\n",
++ pci_tbl[chip_idx].name, bus, devfn,
++ ioaddr == 0 ? "address" : "IRQ");
++ return NULL;
++ }
++ printk(KERN_INFO "Found a %s at %d/%d address 0x%x->0x%lx IRQ %d.\n",
++ pci_tbl[chip_idx].name, bus, devfn, pciaddr, ioaddr, irq);
++ {
++ u16 pci_command;
++ pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_command);
++ printk(KERN_INFO "%s at %d/%d command 0x%x.\n",
++ pci_tbl[chip_idx].name, bus, devfn, pci_command);
++ }
++
++ newdev = drv_id->probe1(pci_find_slot(bus, devfn), 0,
++ ioaddr, irq, chip_idx, 0);
++ if (newdev) {
++ struct registered_pci_device *hsdev =
++ kmalloc(sizeof(struct registered_pci_device), GFP_KERNEL);
++ if (drv_id->pci_class == PCI_CLASS_NETWORK_ETHERNET<<8)
++ strcpy(hsdev->node.dev_name, ((struct net_device *)newdev)->name);
++ hsdev->node.major = hsdev->node.minor = 0;
++ hsdev->node.next = NULL;
++ hsdev->drv_info = drv_id;
++ hsdev->dev_instance = newdev;
++ hsdev->next = root_pci_devs;
++ root_pci_devs = hsdev;
++ drv_id->pwr_event(newdev, DRV_ATTACH);
++ MOD_INC_USE_COUNT;
++ return &hsdev->node;
++ }
++ return NULL;
++}
++
++/* Add/remove a driver ID structure to our private list of known drivers. */
++int do_cb_register(struct drv_id_info *did)
++{
++ struct driver_operations *dop;
++ struct drv_shim *dshim = kmalloc(sizeof(*dshim), GFP_KERNEL);
++ if (dshim == 0)
++ return 0;
++ if (debug > 1)
++ printk(KERN_INFO "Registering driver support for '%s'.\n",
++ did->name);
++ MOD_INC_USE_COUNT;
++ dshim->did = did;
++ dop = &dshim->drv_ops;
++ dop->name = (char *)did->name;
++ dop->attach = drv_attach;
++ dop->suspend = drv_suspend;
++ dop->resume = drv_resume;
++ dop->detach = drv_detach;
++ dshim->next = root_drv_id;
++ root_drv_id = dshim;
++ return register_driver(dop);
++}
++
++void do_cb_unregister(struct drv_id_info *did)
++{
++ struct drv_shim **dp;
++ for (dp = &root_drv_id; *dp; dp = &(*dp)->next)
++ if ((*dp)->did == did) {
++ struct drv_shim *dshim = *dp;
++ unregister_driver(&dshim->drv_ops);
++ *dp = dshim->next;
++ kfree(dshim);
++ MOD_DEC_USE_COUNT;
++ return;
++ }
++}
++
++extern int (*register_hotswap_hook)(struct drv_id_info *did);
++extern void (*unregister_hotswap_hook)(struct drv_id_info *did);
++
++int (*old_cb_hook)(struct drv_id_info *did);
++void (*old_un_cb_hook)(struct drv_id_info *did);
++
++int init_module(void)
++{
++ if (debug)
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ old_cb_hook = register_hotswap_hook;
++ old_un_cb_hook = unregister_hotswap_hook;
++ register_hotswap_hook = do_cb_register;
++ unregister_hotswap_hook = do_cb_unregister;
++ return 0;
++}
++void cleanup_module(void)
++{
++ register_hotswap_hook = old_cb_hook;
++ unregister_hotswap_hook = old_un_cb_hook;
++ return;
++}
++
++
++/*
++ * Local variables:
++ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c cb_shim.c -I/usr/include/ -I/usr/src/pcmcia/include/"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
++
+Index: linux/src/drivers/net/eepro100.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/eepro100.c,v
+retrieving revision 1.2
+diff -u -r1.2 eepro100.c
+--- linux/src/drivers/net/eepro100.c 18 Aug 2001 00:56:42 -0000 1.2
++++ linux/src/drivers/net/eepro100.c 20 Aug 2004 10:32:53 -0000
+@@ -1,146 +1,166 @@
+ /* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
+ /*
+- NOTICE: this version of the driver is supposed to work with 2.2 kernels.
+- Written 1996-1999 by Donald Becker.
++ Written 1998-2003 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This driver is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+ It should work with all i82557/558/559 boards.
+
+ To use as a module, use the compile-command at the end of the file.
+
+- The author may be reached as becker@CESDIS.usra.edu, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
+ For updates see
+- http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
++ http://www.scyld.com/network/eepro100.html
+ For installation instructions
+- http://cesdis.gsfc.nasa.gov/linux/misc/modules.html
+- There is a Majordomo mailing list based at
+- linux-eepro100@cesdis.gsfc.nasa.gov
+-
+- The driver also contains updates by different kernel developers.
+- This driver clone is maintained by Andrey V. Savochkin <saw@saw.sw.com.sg>.
+- Please use this email address and linux-kernel mailing list for bug reports.
+-
+- Modification history:
+- 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
+- Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
+- 2000 May 27 Andrey Moruga <moruga@sw.com.sg>
+- Code duplication for 82559ER support was removed.
+- Accurate handling of all supported chips was implemented.
+- Some fixes in 2.3 clone of the driver were ported.
+- 2000 May 30 Dragan Stancevic <visitor@valinux.com> and
+- Andrey Moruga <moruga@sw.com.sg>
+- Honor PortReset timing specification.
+- 2000 Jul 25 Dragan Stancevic <visitor@valinux.com>
+- Changed to MMIO, resized FIFOs, resized rings, changed ISR timeout
+- Problem reported by:
+- Marc MERLIN <merlin@valinux.com>
+- 2000 Nov 15 Dragan Stancevic <visitor@valinux.com>
+- Changed command completion time and added debug info as to which
+- CMD timed out. Problem reported by:
+- "Ulrich Windl" <Ulrich.Windl@rz.uni-regensburg.de>
++ http://www.scyld.com/network/modules.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
+ */
+
+-/*#define USE_IO*/
+-static const char *version =
+-"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n"
+-"eepro100.c: $Revision: 1.2 $ 2000/05/31 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"
+-"eepro100.c: VA Linux custom, Dragan Stancevic <visitor@valinux.com> 2000/11/15\n";
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"eepro100.c:v1.28 7/22/2003 Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/eepro100.html\n";
++
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.
++ The first five are undocumented and spelled per Intel recommendations.
++*/
+
+-/* A few user-configurable values that apply to all boards.
+- First set is undocumented and spelled per Intel recommendations. */
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
+
+ static int congenb = 0; /* Enable congestion control in the DP83840. */
+-static int txfifo = 0; /* Tx FIFO threshold in 4 byte units, 0-15 */
+-static int rxfifo = 0xF; /* Rx FIFO threshold, default 32 bytes. */
++static int txfifo = 8; /* Tx FIFO threshold in 4 byte units, 0-15 */
++static int rxfifo = 8; /* Rx FIFO threshold, default 32 bytes. */
+ /* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+ static int txdmacount = 128;
+ static int rxdmacount = 0;
+
+-/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
+- Lower values use more memory, but are faster. */
+-#if defined(__alpha__) || defined(__sparc__)
+-/* force copying of all packets to avoid unaligned accesses on Alpha */
+-static int rx_copybreak = 1518;
+-#else
++/* Set the copy breakpoint for the copy-only-tiny-frame Rx method.
++ Lower values use more memory, but are faster.
++ Setting to > 1518 disables this feature. */
+ static int rx_copybreak = 200;
+-#endif
+
+ /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+-static int max_interrupt_work = 200;
++static int max_interrupt_work = 20;
+
+ /* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+ static int multicast_filter_limit = 64;
+
+-/* 'options' is used to pass a transceiver override or full-duplex flag
+- e.g. "options=16" for FD, "options=32" for 100mbps-only. */
+-static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-#ifdef MODULE
+-static int debug = -1; /* The debug level */
+-#endif
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability, however setting full_duplex[] is deprecated.
++ The media type is usually passed in 'options[]'.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
+
+-/* A few values that may be tweaked. */
+ /* The ring sizes should be a power of two for efficiency. */
+-#define TX_RING_SIZE 64
+-#define RX_RING_SIZE 64
+-/* How much slots multicast filter setup may take.
+- Do not descrease without changing set_rx_mode() implementaion. */
+-#define TX_MULTICAST_SIZE 2
+-#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
+-/* Actual number of TX packets queued, must be
+- <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
+-#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
+-/* Hysteresis marking queue as no longer full. */
+-#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
++#define TX_RING_SIZE 32 /* Effectively 2 entries fewer. */
++#define RX_RING_SIZE 32
++/* Actual number of TX packets queued, must be <= TX_RING_SIZE-2. */
++#define TX_QUEUE_LIMIT 12
++#define TX_QUEUE_UNFULL 8 /* Hysteresis marking queue as no longer full. */
+
+ /* Operational parameters that usually are not changed. */
+
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (2*HZ)
+-/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
+ #define PKT_BUF_SZ 1536
+
+-#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
+ #warning You must compile this file with the correct options!
+ #warning See the last lines of the source file.
+ #error You must compile this driver with "-O".
+ #endif
+
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
+ #include <linux/version.h>
+-#include <linux/module.h>
+ #if defined(MODVERSIONS)
+ #include <linux/modversions.h>
+ #endif
++#include <linux/module.h>
+
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+-#include <asm/spinlock.h>
+-
+-#include <asm/bitops.h>
+-#include <asm/io.h>
+-
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
+ #include <linux/delay.h>
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#if LINUX_VERSION_CODE >= 0x20300
++#include <linux/spinlock.h>
++#elif LINUX_VERSION_CODE >= 0x20200
++#include <asm/spinlock.h>
++#endif
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed bus+endian portability operations. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
+
+-#if defined(MODULE)
+-MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
+-MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Intel PCI EtherExpressPro 100 driver");
++MODULE_LICENSE("GPL");
+ MODULE_PARM(debug, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+ MODULE_PARM(congenb, "i");
+ MODULE_PARM(txfifo, "i");
+ MODULE_PARM(rxfifo, "i");
+@@ -149,42 +169,21 @@
+ MODULE_PARM(rx_copybreak, "i");
+ MODULE_PARM(max_interrupt_work, "i");
+ MODULE_PARM(multicast_filter_limit, "i");
++#ifdef MODULE_PARM_DESC
++MODULE_PARM_DESC(debug, "EEPro100 message level (0-31)");
++MODULE_PARM_DESC(options,
++ "EEPro100: force fixed speed+duplex 0x10 0x20 0x100 0x200");
++MODULE_PARM_DESC(max_interrupt_work,
++ "EEPro100 maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex, "EEPro100 set to forced full duplex when not 0"
++ " (deprecated)");
++MODULE_PARM_DESC(rx_copybreak,
++ "EEPro100 copy breakpoint for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "EEPro100 breakpoint for switching to Rx-all-multicast");
++/* Other settings are undocumented per Intel recommendation. */
+ #endif
+
+-#define RUN_AT(x) (jiffies + (x))
+-/* Condensed bus+endian portability operations. */
+-#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+-#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+-
+-#define net_device device
+-#define pci_base_address(p, n) (p)->base_address[n]
+-
+-#define dev_free_skb(skb) dev_kfree_skb(skb);
+-#define netif_wake_queue(dev) do { \
+- clear_bit(0, (void*)&dev->tbusy); \
+- mark_bh(NET_BH); \
+- } while(0)
+-#define netif_start_queue(dev) clear_bit(0, (void*)&dev->tbusy)
+-#define netif_stop_queue(dev) set_bit(0, (void*)&dev->tbusy)
+-#ifndef PCI_DEVICE_ID_INTEL_82559ER
+-#define PCI_DEVICE_ID_INTEL_82559ER 0x1209
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID1029
+-#define PCI_DEVICE_ID_INTEL_ID1029 0x1029
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID1030
+-#define PCI_DEVICE_ID_INTEL_ID1030 0x1030
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID2449
+-#define PCI_DEVICE_ID_INTEL_ID2449 0x2449
+-#endif
+-
+-/* The total I/O port extent of the board.
+- The registers beyond 0x18 only exist on the i82558. */
+-#define SPEEDO3_TOTAL_SIZE 0x20
+-
+-int speedo_debug = 1;
+-
+ /*
+ Theory of Operation
+
+@@ -234,7 +233,7 @@
+ (TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
+ speedo_private data structure for each adapter instance.
+
+-The newer i82558 explicitly supports this structure, and can read the two
++The i82558 and later explicitly supports this structure, and can read the two
+ TxBDs in the same PCI burst as the TxCB.
+
+ This ring structure is used for all normal transmit packets, but the
+@@ -245,7 +244,7 @@
+ that descriptor's link to the complex command.
+
+ An additional complexity of these non-transmit commands are that they may be
+-added asynchronous to the normal transmit queue, so we disable interrupts
++added asynchronous to the normal transmit queue, so we set a lock
+ whenever the Tx descriptor ring is manipulated.
+
+ A notable aspect of these special configure commands is that they do
+@@ -257,27 +256,16 @@
+ tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+ Commands may have bits set e.g. CmdSuspend in the command word to either
+-suspend or stop the transmit/command unit. This driver always flags the last
+-command with CmdSuspend, erases the CmdSuspend in the previous command, and
+-then issues a CU_RESUME.
+-Note: Watch out for the potential race condition here: imagine
+- erasing the previous suspend
+- the chip processes the previous command
+- the chip processes the final command, and suspends
+- doing the CU_RESUME
+- the chip processes the next-yet-valid post-final-command.
+-So blindly sending a CU_RESUME is only safe if we do it immediately after
+-after erasing the previous CmdSuspend, without the possibility of an
+-intervening delay. Thus the resume command is always within the
+-interrupts-disabled region. This is a timing dependence, but handling this
+-condition in a timing-independent way would considerably complicate the code.
++suspend or stop the transmit/command unit. This driver always initializes
++the current command with CmdSuspend before erasing the CmdSuspend in the
++previous command, and only then issues a CU_RESUME.
+
+ Note: In previous generation Intel chips, restarting the command unit was a
+ notoriously slow process. This is presumably no longer true.
+
+ IIIC. Receive structure
+
+-Because of the bus-master support on the Speedo3 this driver uses the new
++Because of the bus-master support on the Speedo3 this driver uses the
+ SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+ This scheme allocates full-sized skbuffs as receive buffers. The value
+ SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+@@ -291,6 +279,24 @@
+ is non-trivial, and the larger copy might flush the cache of useful data, so
+ we pass up the skbuff the packet was received into.
+
++IIID. Synchronization
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and other software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'sp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
++we can't avoid the interrupt overhead by having the Tx routine reap the Tx
++stats.) After reaping the stats, it marks the queue entry as empty by setting
++the 'base' to zero. Iff the 'sp->tx_full' flag is set, it clears both the
++tx_full and tbusy flags.
++
+ IV. Notes
+
+ Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+@@ -300,11 +306,13 @@
+ */
+
+ /* This table drives the PCI probe routines. */
+-static struct net_device *speedo_found1(struct pci_dev *pdev, int pci_bus,
+- int pci_devfn, long ioaddr,
+- int chip_idx, int card_idx);
++static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int fnd_cnt);
++static int speedo_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags { ResetMII=1, HasChksum=2};
+
+-#ifdef USE_IO
++/* I/O registers beyond 0x18 do not exist on the i82557. */
++#ifdef USE_IO_OPS
+ #define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
+ #define SPEEDO_SIZE 32
+ #else
+@@ -312,48 +320,48 @@
+ #define SPEEDO_SIZE 0x1000
+ #endif
+
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-struct pci_id_info {
+- const char *name;
+- u16 vendor_id, device_id;
+- int pci_index;
+-} static pci_tbl[] = {
+- { "Intel PCI EtherExpress Pro100 82557",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 82559ER",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 ID1029",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1029,
+- 0
+- },
+- { "Intel Corporation 82559 InBusiness 10/100",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1030,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 82562EM",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID2449,
+- 0
+- },
+- {0,} /* 0 terminated list. */
++struct pci_id_info static pci_id_tbl[] = {
++ {"Intel i82559 rev 8", { 0x12298086, ~0, 0,0, 8,0xff},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, HasChksum, },
++ {"Intel PCI EtherExpress Pro100", { 0x12298086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel EtherExpress Pro/100+ i82559ER", { 0x12098086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, ResetMII, },
++ {"Intel EtherExpress Pro/100 type 1029", { 0x10298086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel EtherExpress Pro/100 type 1030", { 0x10308086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 V Network", { 0x24498086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 1031)", { 0x10318086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VM (type 1038)", { 0x10388086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VM (type 1039)", { 0x10398086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VM (type 103a)", { 0x103a8086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"HP/Compaq D510 Intel Pro/100 VM",
++ { 0x103b8086, 0xffffffff, 0x00120e11, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VM (type 103b)", { 0x103b8086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 103D)", { 0x103d8086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel EtherExpress Pro/100 865G Northbridge type 1051",
++ { 0x10518086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VM (unknown type series 1030)",
++ { 0x10308086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 (unknown type series 1050)",
++ { 0x10508086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {0,}, /* 0 terminated list. */
+ };
+
+-static inline unsigned int io_inw(unsigned long port)
+-{
+- return inw(port);
+-}
+-static inline void io_outw(unsigned int val, unsigned long port)
+-{
+- outw(val, port);
+-}
++struct drv_id_info eepro100_drv_id = {
++ "eepro100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ speedo_found1, speedo_pwr_event, };
+
+-#ifndef USE_IO
++#ifndef USE_IO_OPS
+ #undef inb
+ #undef inw
+ #undef inl
+@@ -368,27 +376,6 @@
+ #define outl writel
+ #endif
+
+-/* How to wait for the command unit to accept a command.
+- Typically this takes 0 ticks. */
+-static inline void wait_for_cmd_done(long cmd_ioaddr)
+-{
+- int wait = 20000;
+- char cmd_reg1, cmd_reg2;
+- do ;
+- while((cmd_reg1 = inb(cmd_ioaddr)) && (--wait >= 0));
+-
+- /* Last chance to change your mind --Dragan*/
+- if (wait < 0){
+- cmd_reg2 = inb(cmd_ioaddr);
+- if(cmd_reg2){
+- printk(KERN_ALERT "eepro100: cmd_wait for(%#2.2x) timedout with(%#2.2x)!\n",
+- cmd_reg1, cmd_reg2);
+-
+- }
+- }
+-
+-}
+-
+ /* Offsets to the various registers.
+ All accesses need not be longword aligned. */
+ enum speedo_offsets {
+@@ -408,28 +395,36 @@
+ CmdIntr = 0x20000000, /* Interrupt after completion. */
+ CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
+ };
+-/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+- status bits. Previous driver versions used separate 16 bit fields for
+- commands and statuses. --SAW
+- */
+-#if defined(__LITTLE_ENDIAN)
+-#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+-#elif defined(__BIG_ENDIAN)
+-#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
++/* Do atomically if possible. */
++#if defined(__i386__)
++#define clear_suspend(cmd) ((char *)(&(cmd)->cmd_status))[3] &= ~0x40
++#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
++#define clear_suspend(cmd) clear_bit(30, &(cmd)->cmd_status)
++#elif defined(__powerpc__) || defined(__sparc__) || (__BIG_ENDIAN)
++#define clear_suspend(cmd) clear_bit(6, &(cmd)->cmd_status)
+ #else
+-#error Unsupported byteorder
++#warning Undefined architecture.
++#define clear_suspend(cmd) (cmd)->cmd_status &= cpu_to_le32(~CmdSuspend)
+ #endif
+
+ enum SCBCmdBits {
+- SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+- SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+- SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+- /* The rest are Rx and Tx commands. */
+- CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
+- CUCmdBase=0x0060, /* CU Base address (set to zero) . */
+- CUDumpStats=0x0070, /* Dump then reset stats counters. */
+- RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+- RxResumeNoResources=0x0007,
++ SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
++ SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
++ SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
++ /* The rest are Rx and Tx commands. */
++ CUStart=0x0010, CUResume=0x0020, CUHiPriStart=0x0030, CUStatsAddr=0x0040,
++ CUShowStats=0x0050,
++ CUCmdBase=0x0060, /* CU Base address (set to zero) . */
++ CUDumpStats=0x0070, /* Dump then reset stats counters. */
++ CUHiPriResume=0x00b0, /* Resume for the high priority Tx queue. */
++ RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
++ RxResumeNoResources=0x0007,
++};
++
++enum intr_status_bits {
++ IntrCmdDone=0x8000, IntrRxDone=0x4000, IntrCmdIdle=0x2000,
++ IntrRxSuspend=0x1000, IntrMIIDone=0x0800, IntrDrvrIntr=0x0400,
++ IntrAllNormal=0xfc00,
+ };
+
+ enum SCBPort_cmds {
+@@ -437,9 +432,9 @@
+ };
+
+ /* The Speedo3 Rx and Tx frame/buffer descriptors. */
+-struct descriptor { /* A generic descriptor. */
+- s32 cmd_status; /* All command and status fields. */
+- u32 link; /* struct descriptor * */
++struct descriptor { /* A generic descriptor. */
++ s32 cmd_status; /* All command and status fields. */
++ u32 link; /* struct descriptor * */
+ unsigned char params[0];
+ };
+
+@@ -464,18 +459,11 @@
+ u32 link; /* void * */
+ u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
+ s32 count; /* # of TBD (=1), Tx start thresh., etc. */
+- /* This constitutes two "TBD" entries -- we only use one. */
++ /* This constitutes two "TBD" entries. Non-zero-copy uses only one. */
+ u32 tx_buf_addr0; /* void *, frame to be transmitted. */
+ s32 tx_buf_size0; /* Length of Tx frame. */
+- u32 tx_buf_addr1; /* void *, frame to be transmitted. */
+- s32 tx_buf_size1; /* Length of Tx frame. */
+-};
+-
+-/* Multicast filter setting block. --SAW */
+-struct speedo_mc_block {
+- struct speedo_mc_block *next;
+- unsigned int tx;
+- struct descriptor frame __attribute__ ((__aligned__(16)));
++ u32 tx_buf_addr1; /* Used only for zero-copy data section. */
++ s32 tx_buf_size1; /* Length of second data buffer (0). */
+ };
+
+ /* Elements of the dump_statistics block. This block must be lword aligned. */
+@@ -499,48 +487,70 @@
+ u32 done_marker;
+ };
+
+-enum Rx_ring_state_bits {
+- RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
+-};
+-
+ /* Do not change the position (alignment) of the first few elements!
+ The later elements are grouped for cache locality. */
+ struct speedo_private {
+ struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
+ struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
++ struct speedo_stats lstats; /* Statistics and self-test region */
++
+ /* The addresses of a Tx/Rx-in-place packets/buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++
++ /* Transmit and other commands control. */
+ struct descriptor *last_cmd; /* Last command sent. */
+ unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
+ spinlock_t lock; /* Group with Tx control cache line. */
+ u32 tx_threshold; /* The value for txdesc.count. */
+- struct RxFD *last_rxf; /* Last command sent. */
++ unsigned long last_cmd_time;
++
++ /* Rx control, one cache line. */
++ struct RxFD *last_rxf; /* Most recent Rx frame. */
+ unsigned int cur_rx, dirty_rx; /* The next free ring entry */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
+ long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+- const char *product_name;
++ int rx_copybreak;
++
++ int msg_level;
++ int max_interrupt_work;
+ struct net_device *next_module;
+ void *priv_addr; /* Unaligned address for kfree */
+- struct enet_statistics stats;
+- struct speedo_stats lstats;
+- int chip_id;
+- unsigned char pci_bus, pci_devfn, acpi_pwr;
++ struct net_device_stats stats;
++ int alloc_failures;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ unsigned char acpi_pwr;
+ struct timer_list timer; /* Media selection timer. */
+- struct speedo_mc_block *mc_setup_head;/* Multicast setup frame list head. */
+- struct speedo_mc_block *mc_setup_tail;/* Multicast setup frame list tail. */
++ /* Multicast filter command. */
++ int mc_setup_frm_len; /* The length of an allocated.. */
++ struct descriptor *mc_setup_frm; /* ..multicast setup frame. */
++ int mc_setup_busy; /* Avoid double-use of setup frame. */
++ int multicast_filter_limit;
++
+ int in_interrupt; /* Word-aligned dev->interrupt */
+- char rx_mode; /* Current PROMISC/ALLMULTI setting. */
++ int rx_mode; /* Current PROMISC/ALLMULTI setting. */
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
+ unsigned int rx_bug:1; /* Work around receiver hang errata. */
+ unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
+ unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
+- unsigned char default_port:8; /* Last dev->if_port value. */
+- unsigned char rx_ring_state; /* RX ring status flags. */
++ unsigned int polling:1; /* Hardware blocked interrupt line. */
++ unsigned int medialock:1; /* The media speed/duplex is fixed. */
++ unsigned char default_port; /* Last dev->if_port value. */
+ unsigned short phy[2]; /* PHY media interfaces available. */
+ unsigned short advertising; /* Current PHY advertised caps. */
+ unsigned short partner; /* Link partner caps. */
++ long last_reset;
++};
++
++/* Our internal RxMode state, not tied to the hardware bits. */
++enum rx_mode_bits {
++ AcceptAllMulticast=0x01, AcceptAllPhys=0x02,
++ AcceptErr=0x80, AcceptRunt=0x10,
++ AcceptBroadcast=0x08, AcceptMulticast=0x04,
++ AcceptMyPhys=0x01, RxInvalidMode=0x7f
+ };
+
+ /* The parameters for a CmdConfigure operation.
+@@ -554,10 +564,10 @@
+ const char i82558_config_cmd[22] = {
+ 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
+ 0, 0x2E, 0, 0x60, 0x08, 0x88,
+- 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
++ 0x68, 0, 0x40, 0xf2, 0xBD, /* 0xBD->0xFD=Force full-duplex */
+ 0x31, 0x05, };
+
+-/* PHY media interface chips. */
++/* PHY media interface chips, defined by the databook. */
+ static const char *phys[] = {
+ "None", "i82553-A/B", "i82553-C", "i82503",
+ "DP83840", "80c240", "80c24", "i82555",
+@@ -566,10 +576,12 @@
+ enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+ S80C24, I82555, DP83840A=10, };
+ static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
++
++/* Standard serial configuration EEPROM commands. */
+ #define EE_READ_CMD (6)
+
+ static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+-static int mdio_read(long ioaddr, int phy_id, int location);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
+ static int mdio_write(long ioaddr, int phy_id, int location, int value);
+ static int speedo_open(struct net_device *dev);
+ static void speedo_resume(struct net_device *dev);
+@@ -577,15 +589,12 @@
+ static void speedo_init_rx_ring(struct net_device *dev);
+ static void speedo_tx_timeout(struct net_device *dev);
+ static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
+-static void speedo_refill_rx_buffers(struct net_device *dev, int force);
+ static int speedo_rx(struct net_device *dev);
+-static void speedo_tx_buffer_gc(struct net_device *dev);
+ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+ static int speedo_close(struct net_device *dev);
+-static struct enet_statistics *speedo_get_stats(struct net_device *dev);
++static struct net_device_stats *speedo_get_stats(struct net_device *dev);
+ static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ static void set_rx_mode(struct net_device *dev);
+-static void speedo_show_state(struct net_device *dev);
+
+
+
+@@ -599,112 +608,28 @@
+ /* A list of all installed Speedo devices, for removing the driver module. */
+ static struct net_device *root_speedo_dev = NULL;
+
+-int eepro100_init(void)
+-{
+- int cards_found = 0;
+- int chip_idx;
+- struct pci_dev *pdev;
+-
+- if (! pcibios_present())
+- return cards_found;
+-
+- for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+- for (; pci_tbl[chip_idx].pci_index < 8; pci_tbl[chip_idx].pci_index++) {
+- unsigned char pci_bus, pci_device_fn, pci_latency;
+- unsigned long pciaddr;
+- long ioaddr;
+- int irq;
+-
+- u16 pci_command, new_command;
+-
+- if (pcibios_find_device(pci_tbl[chip_idx].vendor_id,
+- pci_tbl[chip_idx].device_id,
+- pci_tbl[chip_idx].pci_index, &pci_bus,
+- &pci_device_fn))
+- break;
+- {
+- pdev = pci_find_slot(pci_bus, pci_device_fn);
+-#ifdef USE_IO
+- pciaddr = pci_base_address(pdev, 1); /* Use [0] to mem-map */
+-#else
+- pciaddr = pci_base_address(pdev, 0);
+-#endif
+- irq = pdev->irq;
+- }
+- /* Remove I/O space marker in bit 0. */
+- if (pciaddr & 1) {
+- ioaddr = pciaddr & ~3UL;
+- if (check_region(ioaddr, 32))
+- continue;
+- } else {
+-#ifdef __sparc__
+- /* ioremap is hosed in 2.2.x on Sparc. */
+- ioaddr = pciaddr & ~0xfUL;
+-#else
+- if ((ioaddr = (long)ioremap(pciaddr & ~0xfUL, 0x1000)) == 0) {
+- printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+- pciaddr);
+- continue;
+- }
+-#endif
+- }
+- if (speedo_debug > 2)
+- printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
+- ioaddr, irq);
+-
+- /* Get and check the bus-master and latency values. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled this"
+- " device! Updating PCI command %4.4x->%4.4x.\n",
+- pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < 32) {
+- printk(" PCI latency timer (CFLT) is unreasonably low at %d."
+- " Setting to 32 clocks.\n", pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, 32);
+- } else if (speedo_debug > 1)
+- printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
+-
+- if (speedo_found1(pdev, pci_bus, pci_device_fn, ioaddr, chip_idx, cards_found))
+- cards_found++;
+- }
+- }
+-
+- return cards_found;
+-}
+-
+-static struct net_device *speedo_found1(struct pci_dev *pdev, int pci_bus,
+- int pci_devfn, long ioaddr,
+- int chip_idx, int card_idx)
++static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
+ {
+ struct net_device *dev;
+ struct speedo_private *sp;
+- const char *product;
++ void *priv_mem;
+ int i, option;
+ u16 eeprom[0x100];
+ int acpi_idle_state = 0;
+-#ifndef MODULE
+- static int did_version = 0; /* Already printed version info. */
+- if (speedo_debug > 0 && did_version++ == 0)
+- printk(version);
+-#endif
+
+- dev = init_etherdev(NULL, sizeof(struct speedo_private));
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
+
+ if (dev->mem_start > 0)
+ option = dev->mem_start;
+ else if (card_idx >= 0 && options[card_idx] >= 0)
+ option = options[card_idx];
+ else
+- option = 0;
++ option = -1;
++
++ acpi_idle_state = acpi_set_pwr_state(pdev, ACPI_D0);
+
+ /* Read the station address EEPROM before doing the reset.
+ Nominally his should even be done before accepting the device, but
+@@ -712,15 +637,11 @@
+ The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+ */
+ {
+- unsigned long iobase;
+- int read_cmd, ee_size;
+- u16 sum;
++ u16 sum = 0;
+ int j;
++ int read_cmd, ee_size;
+
+- /* Use IO only to avoid postponed writes and satisfy EEPROM timing
+- requirements. */
+- iobase = pci_base_address(pdev, 1) & ~3UL;
+- if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
++ if ((do_eeprom_cmd(ioaddr, EE_READ_CMD << 24, 27) & 0xffe0000)
+ == 0xffe0000) {
+ ee_size = 0x100;
+ read_cmd = EE_READ_CMD << 24;
+@@ -729,8 +650,8 @@
+ read_cmd = EE_READ_CMD << 22;
+ }
+
+- for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
+- u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
++ for (j = 0, i = 0; i < ee_size; i++) {
++ u16 value = do_eeprom_cmd(ioaddr, read_cmd | (i << 16), 27);
+ eeprom[i] = value;
+ sum += value;
+ if (i < 3) {
+@@ -743,45 +664,41 @@
+ "check settings before activating this device!\n",
+ dev->name, sum);
+ /* Don't unregister_netdev(dev); as the EEPro may actually be
+- usable, especially if the MAC address is set later.
+- On the other hand, it may be unusable if MDI data is corrupted. */
++ usable, especially if the MAC address is set later. */
+ }
+
+ /* Reset the chip: stop Tx and Rx processes and clear counters.
+ This takes less than 10usec and will easily finish before the next
+ action. */
+ outl(PortReset, ioaddr + SCBPort);
+- inl(ioaddr + SCBPort);
+- /* Honor PortReset timing. */
+- udelay(10);
+
+- if (eeprom[3] & 0x0100)
+- product = "OEM i82557/i82558 10/100 Ethernet";
+- else
+- product = pci_tbl[chip_idx].name;
+-
+- printk(KERN_INFO "%s: %s, ", dev->name, product);
++ printk(KERN_INFO "%s: %s%s at %#3lx, ", dev->name,
++ eeprom[3] & 0x0100 ? "OEM " : "", pci_id_tbl[chip_idx].name,
++ ioaddr);
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2X:", dev->dev_addr[i]);
+- printk("%2.2X, ", dev->dev_addr[i]);
+-#ifdef USE_IO
+- printk("I/O at %#3lx, ", ioaddr);
+-#endif
+- printk("IRQ %d.\n", pdev->irq);
++ printk("%2.2X, IRQ %d.\n", dev->dev_addr[i], irq);
+
+-#if 1 || defined(kernel_bloat)
++ /* We have decided to accept this device. */
++ /* Allocate cached private storage.
++ The PCI coherent descriptor rings are allocated at each open. */
++ sp = priv_mem = kmalloc(sizeof(*sp), GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++#ifndef kernel_bloat
+ /* OK, this is pure kernel bloat. I don't like it when other drivers
+ waste non-pageable kernel space to emit similar messages, but I need
+ them for bug reports. */
+ {
+ const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
+ /* The self-test results must be paragraph aligned. */
+- s32 str[6], *volatile self_test_results;
++ s32 *volatile self_test_results;
+ int boguscnt = 16000; /* Timeout for set-test. */
+- if ((eeprom[3] & 0x03) != 0x03)
+- printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
+- " work-around.\n");
+ printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
+ " connectors present:",
+ eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
+@@ -795,24 +712,42 @@
+ phys[(eeprom[7]>>8)&7]);
+ if (((eeprom[6]>>8) & 0x3f) == DP83840
+ || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
+- int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
++ int mdi_reg23 = mdio_read(dev, eeprom[6] & 0x1f, 23) | 0x0422;
+ if (congenb)
+ mdi_reg23 |= 0x0100;
+ printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
+ mdi_reg23);
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
+ }
+- if ((option >= 0) && (option & 0x70)) {
++ if ((option >= 0) && (option & 0x330)) {
+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+- (option & 0x20 ? 100 : 10),
+- (option & 0x10 ? "full" : "half"));
++ (option & 0x300 ? 100 : 10),
++ (option & 0x220 ? "full" : "half"));
+ mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
+- ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
+- ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+- }
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
++ } else {
++ int mii_bmcrctrl = mdio_read(dev, eeprom[6] & 0x1f, 0);
++ /* Reset out of a transceiver left in 10baseT-fixed mode. */
++ if ((mii_bmcrctrl & 0x3100) == 0)
++ mdio_write(ioaddr, eeprom[6] & 0x1f, 0, 0x8000);
++ }
++ if (eeprom[10] & 0x0002)
++ printk(KERN_INFO "\n" KERN_INFO " ** The configuration "
++ "EEPROM enables Sleep Mode.\n" KERN_INFO "\n"
++ " ** This will cause PCI bus errors!\n"
++ KERN_INFO " ** Update the configuration EEPROM "
++ "with the eepro100-diag program.\n" );
++ if (eeprom[6] == 0)
++ printk(KERN_INFO " ** The configuration EEPROM does not have a "
++ "transceiver type set.\n" KERN_INFO "\n"
++ " ** This will cause configuration problems and prevent "
++ "monitoring the link!\n"
++ KERN_INFO " ** Update the configuration EEPROM "
++ "with the eepro100-diag program.\n" );
+
+ /* Perform a system self-test. */
+- self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
++ self_test_results = (s32*)(&sp->lstats);
+ self_test_results[0] = 0;
+ self_test_results[1] = -1;
+ outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
+@@ -840,37 +775,36 @@
+ #endif /* kernel_bloat */
+
+ outl(PortReset, ioaddr + SCBPort);
+- inl(ioaddr + SCBPort);
+- /* Honor PortReset timing. */
+- udelay(10);
+
+- /* We do a request_region() only to register /proc/ioports info. */
+- request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
++ /* Return the chip to its original power state. */
++ acpi_set_pwr_state(pdev, acpi_idle_state);
+
+- dev->base_addr = ioaddr;
+- dev->irq = pdev->irq;
++ /* We do a request_region() only to register /proc/ioports info. */
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+- sp = dev->priv;
+- if (dev->priv == NULL) {
+- void *mem = kmalloc(sizeof(*sp), GFP_KERNEL);
+- dev->priv = sp = mem; /* Cache align here if kmalloc does not. */
+- sp->priv_addr = mem;
+- }
++ dev->priv = sp; /* Allocated above. */
+ memset(sp, 0, sizeof(*sp));
+ sp->next_module = root_speedo_dev;
+ root_speedo_dev = dev;
+
+- sp->pci_bus = pci_bus;
+- sp->pci_devfn = pci_devfn;
++ sp->priv_addr = priv_mem;
++ sp->pci_dev = pdev;
+ sp->chip_id = chip_idx;
++ sp->drv_flags = pci_id_tbl[chip_idx].drv_flags;
+ sp->acpi_pwr = acpi_idle_state;
++ sp->msg_level = (1 << debug) - 1;
++ sp->rx_copybreak = rx_copybreak;
++ sp->max_interrupt_work = max_interrupt_work;
++ sp->multicast_filter_limit = multicast_filter_limit;
+
+- sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
++ sp->full_duplex = option >= 0 && (option & 0x220) ? 1 : 0;
+ if (card_idx >= 0) {
+ if (full_duplex[card_idx] >= 0)
+ sp->full_duplex = full_duplex[card_idx];
+ }
+ sp->default_port = option >= 0 ? (option & 0x0f) : 0;
++ if (sp->full_duplex)
++ sp->medialock = 1;
+
+ sp->phy[0] = eeprom[6];
+ sp->phy[1] = eeprom[7];
+@@ -882,10 +816,6 @@
+ /* The Speedo-specific entries in the device structure. */
+ dev->open = &speedo_open;
+ dev->hard_start_xmit = &speedo_start_xmit;
+-#if defined(HAS_NETIF_QUEUE)
+- dev->tx_timeout = &speedo_tx_timeout;
+- dev->watchdog_timeo = TX_TIMEOUT;
+-#endif
+ dev->stop = &speedo_close;
+ dev->get_stats = &speedo_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+@@ -893,6 +823,50 @@
+
+ return dev;
+ }
++
++/* How to wait for the command unit to accept a command.
++ Typically this takes 0 ticks. */
++
++static inline void wait_for_cmd_done(struct net_device *dev)
++{
++ long cmd_ioaddr = dev->base_addr + SCBCmd;
++ int wait = 0;
++ int delayed_cmd;
++ do
++ if (inb(cmd_ioaddr) == 0) return;
++ while(++wait <= 100);
++ delayed_cmd = inb(cmd_ioaddr);
++ do
++ if (inb(cmd_ioaddr) == 0) break;
++ while(++wait <= 10000);
++ printk(KERN_ERR "%s: Command %2.2x was not immediately accepted, "
++ "%d ticks!\n",
++ dev->name, delayed_cmd, wait);
++}
++
++/* Perform a SCB command known to be slow.
++ This function checks the status both before and after command execution. */
++static void do_slow_command(struct net_device *dev, int cmd)
++{
++ long cmd_ioaddr = dev->base_addr + SCBCmd;
++ int wait = 0;
++ do
++ if (inb(cmd_ioaddr) == 0) break;
++ while(++wait <= 200);
++ if (wait > 100)
++ printk(KERN_ERR "%s: Command %4.4x was never accepted (%d polls)!\n",
++ dev->name, inb(cmd_ioaddr), wait);
++ outb(cmd, cmd_ioaddr);
++ for (wait = 0; wait <= 100; wait++)
++ if (inb(cmd_ioaddr) == 0) return;
++ for (; wait <= 20000; wait++)
++ if (inb(cmd_ioaddr) == 0) return;
++ else udelay(1);
++ printk(KERN_ERR "%s: Command %4.4x was not accepted after %d polls!"
++ " Current status %8.8x.\n",
++ dev->name, cmd, wait, (int)inl(dev->base_addr + SCBStatus));
++}
++
+
+ /* Serial EEPROM section.
+ A "bit" grungy, but we work our way through bit-by-bit :->. */
+@@ -906,43 +880,48 @@
+ #define EE_WRITE_1 0x4806
+ #define EE_OFFSET SCBeeprom
+
+-/* The fixes for the code were kindly provided by Dragan Stancevic
+- <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
+- access timing.
+- The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
+- interval for serial EEPROM. However, it looks like that there is an
+- additional requirement dictating larger udelay's in the code below.
+- 2000/05/24 SAW */
++/* Delay between EEPROM clock transitions.
++ The code works with no delay on 33Mhz PCI. */
++#ifndef USE_IO_OPS
++#define eeprom_delay(ee_addr) writew(readw(ee_addr), ee_addr)
++#else
++#define eeprom_delay(ee_addr) inw(ee_addr)
++#endif
++
+ static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+ {
+ unsigned retval = 0;
+ long ee_addr = ioaddr + SCBeeprom;
+
+- io_outw(EE_ENB, ee_addr); udelay(2);
+- io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
++ outw(EE_ENB | EE_SHIFT_CLK, ee_addr);
+
+ /* Shift the command bits out. */
+ do {
+ short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+- io_outw(dataval, ee_addr); udelay(2);
+- io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+- retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
++ outw(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ outw(dataval | EE_SHIFT_CLK, ee_addr);
++ eeprom_delay(ee_addr);
++ retval = (retval << 1) | ((inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ } while (--cmd_len >= 0);
+- io_outw(EE_ENB, ee_addr); udelay(2);
++ outw(EE_ENB, ee_addr);
+
+ /* Terminate the EEPROM access. */
+- io_outw(EE_ENB & ~EE_CS, ee_addr);
++ outw(EE_ENB & ~EE_CS, ee_addr);
+ return retval;
+ }
+
+-static int mdio_read(long ioaddr, int phy_id, int location)
++static int mdio_read(struct net_device *dev, int phy_id, int location)
+ {
++ long ioaddr = dev->base_addr;
+ int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
++
+ outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+ do {
+ val = inl(ioaddr + SCBCtrlMDI);
+ if (--boguscnt < 0) {
+- printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
++ printk(KERN_ERR "%s: mdio_read() timed out with val = %8.8x.\n",
++ dev->name, val);
+ break;
+ }
+ } while (! (val & 0x10000000));
+@@ -971,10 +950,11 @@
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+-
+ MOD_INC_USE_COUNT;
++ acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
++
++ if (sp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+
+ /* Set up the Tx queue early.. */
+ sp->cur_tx = 0;
+@@ -982,19 +962,16 @@
+ sp->last_cmd = 0;
+ sp->tx_full = 0;
+ sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+- sp->in_interrupt = 0;
+-
+- /* .. we can safely take handler calls during init. */
+- if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
+- MOD_DEC_USE_COUNT;
+- return -EAGAIN;
+- }
++ sp->polling = sp->in_interrupt = 0;
+
+ dev->if_port = sp->default_port;
+
+-#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
+- /* Retrigger negotiation to reset previous errors. */
+- if ((sp->phy[0] & 0x8000) == 0) {
++ if ((sp->phy[0] & 0x8000) == 0)
++ sp->advertising = mdio_read(dev, sp->phy[0] & 0x1f, 4);
++ /* With some transceivers we must retrigger negotiation to reset
++ power-up errors. */
++ if ((sp->drv_flags & ResetMII) &&
++ (sp->phy[0] & 0x8000) == 0) {
+ int phy_addr = sp->phy[0] & 0x1f ;
+ /* Use 0x3300 for restarting NWay, other values to force xcvr:
+ 0x0000 10-HD
+@@ -1008,31 +985,31 @@
+ mdio_write(ioaddr, phy_addr, 0, 0x3300);
+ #endif
+ }
+-#endif
++
++ /* We can safely take handler calls during init.
++ Doing this after speedo_init_rx_ring() results in a memory leak. */
++ if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
+
+ speedo_init_rx_ring(dev);
+
+ /* Fire up the hardware. */
+- outw(SCBMaskAll, ioaddr + SCBCmd);
+ speedo_resume(dev);
+-
+- dev->interrupt = 0;
+- dev->start = 1;
+- netif_start_queue(dev);
++ netif_start_tx_queue(dev);
+
+ /* Setup the chip and configure the multicast list. */
+- sp->mc_setup_head = NULL;
+- sp->mc_setup_tail = NULL;
++ sp->mc_setup_frm = NULL;
++ sp->mc_setup_frm_len = 0;
++ sp->mc_setup_busy = 0;
++ sp->rx_mode = RxInvalidMode; /* Invalid -> always reset the mode. */
+ sp->flow_ctrl = sp->partner = 0;
+- sp->rx_mode = -1; /* Invalid -> always reset the mode. */
+ set_rx_mode(dev);
+- if ((sp->phy[0] & 0x8000) == 0)
+- sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4);
+
+- if (speedo_debug > 2) {
++ if (sp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
+- }
++ dev->name, (int)inw(ioaddr + SCBStatus));
+
+ /* Set the timer. The timer serves a dual purpose:
+ 1) to monitor the media interface (e.g. link beat) and perhaps switch
+@@ -1040,15 +1017,14 @@
+ 2) to monitor Rx activity, and restart the Rx process if the receiver
+ hangs. */
+ init_timer(&sp->timer);
+- sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
++ sp->timer.expires = jiffies + 3*HZ;
+ sp->timer.data = (unsigned long)dev;
+ sp->timer.function = &speedo_timer; /* timer handler */
+ add_timer(&sp->timer);
+
+ /* No need to wait for the command unit to accept here. */
+ if ((sp->phy[0] & 0x8000) == 0)
+- mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
+-
++ mdio_read(dev, sp->phy[0] & 0x1f, 0);
+ return 0;
+ }
+
+@@ -1058,60 +1034,57 @@
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
++ outw(SCBMaskAll, ioaddr + SCBCmd);
++
+ /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+ sp->tx_threshold = 0x01208000;
+
+ /* Set the segment registers to '0'. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ wait_for_cmd_done(dev);
++ if (inb(ioaddr + SCBCmd)) {
++ outl(PortPartialReset, ioaddr + SCBPort);
++ udelay(10);
++ }
+ outl(0, ioaddr + SCBPointer);
+- /* impose a delay to avoid a bug */
+- inl(ioaddr + SCBPointer);
+- udelay(10);
+- outb(RxAddrLoad, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outb(CUCmdBase, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ inl(ioaddr + SCBPointer); /* Flush to PCI. */
++ udelay(10); /* Bogus, but it avoids the bug. */
++ /* Note: these next two operations can take a while. */
++ do_slow_command(dev, RxAddrLoad);
++ do_slow_command(dev, CUCmdBase);
+
+ /* Load the statistics block and rx ring addresses. */
+ outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
++ inl(ioaddr + SCBPointer); /* Flush to PCI. */
+ outb(CUStatsAddr, ioaddr + SCBCmd);
+ sp->lstats.done_marker = 0;
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ wait_for_cmd_done(dev);
+
+- if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
+- dev->name);
+- } else {
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- }
+-
+- outb(CUDumpStats, ioaddr + SCBCmd);
++ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
++ ioaddr + SCBPointer);
++ inl(ioaddr + SCBPointer); /* Flush to PCI. */
++ /* Note: RxStart should complete instantly. */
++ do_slow_command(dev, RxStart);
++ do_slow_command(dev, CUDumpStats);
+
+ /* Fill the first command with our physical address. */
+ {
+- struct descriptor *ias_cmd;
++ int entry = sp->cur_tx++ % TX_RING_SIZE;
++ struct descriptor *cur_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+- ias_cmd =
+- (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
+ /* Avoid a bug(?!) here by marking the command already completed. */
+- ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+- ias_cmd->link =
++ cur_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
++ cur_cmd->link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+- memcpy(ias_cmd->params, dev->dev_addr, 6);
+- sp->last_cmd = ias_cmd;
++ memcpy(cur_cmd->params, dev->dev_addr, 6);
++ if (sp->last_cmd)
++ clear_suspend(sp->last_cmd);
++ sp->last_cmd = cur_cmd;
+ }
+
+ /* Start the chip's Tx process and unmask interrupts. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+- /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
+- remain masked --Dragan */
+- outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
++ outw(CUStart, ioaddr + SCBCmd);
+ }
+
+ /* Media monitoring and control. */
+@@ -1121,90 +1094,116 @@
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_num = sp->phy[0] & 0x1f;
++ int status = inw(ioaddr + SCBStatus);
+
++ if (sp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Interface monitor tick, chip status %4.4x.\n",
++ dev->name, status);
++
++ /* Normally we check every two seconds. */
++ sp->timer.expires = jiffies + 2*HZ;
++
++ if (sp->polling) {
++ /* Continue to be annoying. */
++ if (status & 0xfc00) {
++ speedo_interrupt(dev->irq, dev, 0);
++ if (jiffies - sp->last_reset > 10*HZ) {
++ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
++ dev->name, dev->irq);
++ sp->last_reset = jiffies;
++ }
++ } else if (jiffies - sp->last_reset > 10*HZ)
++ sp->polling = 0;
++ sp->timer.expires = jiffies + 2;
++ }
+ /* We have MII and lost link beat. */
+ if ((sp->phy[0] & 0x8000) == 0) {
+- int partner = mdio_read(ioaddr, phy_num, 5);
++ int partner = mdio_read(dev, phy_num, 5);
+ if (partner != sp->partner) {
+ int flow_ctrl = sp->advertising & partner & 0x0400 ? 1 : 0;
+- if (speedo_debug > 2) {
+- printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
+- printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
+- dev->name, sp->partner, partner, sp->advertising);
+- }
+ sp->partner = partner;
+ if (flow_ctrl != sp->flow_ctrl) {
+ sp->flow_ctrl = flow_ctrl;
+- sp->rx_mode = -1; /* Trigger a reload. */
++ sp->rx_mode = RxInvalidMode; /* Trigger a reload. */
+ }
+ /* Clear sticky bit. */
+- mdio_read(ioaddr, phy_num, 1);
++ mdio_read(dev, phy_num, 1);
+ /* If link beat has returned... */
+- if (mdio_read(ioaddr, phy_num, 1) & 0x0004)
+- dev->flags |= IFF_RUNNING;
++ if (mdio_read(dev, phy_num, 1) & 0x0004)
++ netif_link_up(dev);
+ else
+- dev->flags &= ~IFF_RUNNING;
++ netif_link_down(dev);
+ }
+ }
+- if (speedo_debug > 3) {
+- printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
++
++ /* This no longer has a false-trigger window. */
++ if (sp->cur_tx - sp->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT &&
++ (jiffies - sp->last_cmd_time) > TX_TIMEOUT) {
++ if (status == 0xffff) {
++ if (jiffies - sp->last_reset > 10*HZ) {
++ sp->last_reset = jiffies;
++ printk(KERN_ERR "%s: The EEPro100 chip is missing!\n",
++ dev->name);
++ }
++ } else if (status & 0xfc00) {
++ /* We have a blocked IRQ line. This should never happen, but
++ we recover as best we can.*/
++ if ( ! sp->polling) {
++ if (jiffies - sp->last_reset > 10*HZ) {
++ printk(KERN_ERR "%s: IRQ %d is physically blocked! (%4.4x)"
++ "Failing back to low-rate polling.\n",
++ dev->name, dev->irq, status);
++ sp->last_reset = jiffies;
++ }
++ sp->polling = 1;
++ }
++ speedo_interrupt(dev->irq, dev, 0);
++ sp->timer.expires = jiffies + 2; /* Avoid */
++ } else {
++ speedo_tx_timeout(dev);
++ sp->last_reset = jiffies;
++ }
+ }
+- if (sp->rx_mode < 0 ||
++ if (sp->rx_mode == RxInvalidMode ||
+ (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
+ /* We haven't received a packet in a Long Time. We might have been
+ bitten by the receiver hang bug. This can be cleared by sending
+ a set multicast list command. */
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: Sending a multicast list set command"
+- " from a timer routine.\n", dev->name);
+ set_rx_mode(dev);
+ }
+- /* We must continue to monitor the media. */
+- sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
+ add_timer(&sp->timer);
+ }
+
+ static void speedo_show_state(struct net_device *dev)
+ {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-#if 0
+- long ioaddr = dev->base_addr;
+ int phy_num = sp->phy[0] & 0x1f;
+-#endif
+ int i;
+
+ /* Print a few items for debugging. */
+- if (speedo_debug > 0) {
++ if (sp->msg_level & NETIF_MSG_DRV) {
+ int i;
+- printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n", dev->name,
++ printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %d / %d:\n", dev->name,
+ sp->cur_tx, sp->dirty_tx);
+ for (i = 0; i < TX_RING_SIZE; i++)
+- printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
++ printk(KERN_DEBUG "%s: %c%c%d %8.8x.\n", dev->name,
+ i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+ i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+ i, sp->tx_ring[i].status);
+ }
+- printk(KERN_DEBUG "%s: Printing Rx ring"
+- " (next to receive into %u, dirty index %u).\n",
+- dev->name, sp->cur_rx, sp->dirty_rx);
++ printk(KERN_DEBUG "%s:Printing Rx ring (next to receive into %d).\n",
++ dev->name, sp->cur_rx);
+
+ for (i = 0; i < RX_RING_SIZE; i++)
+- printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
+- sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
+- i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
+- i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
+- i, (sp->rx_ringp[i] != NULL) ?
+- (unsigned)sp->rx_ringp[i]->status : 0);
++ printk(KERN_DEBUG " Rx ring entry %d %8.8x.\n",
++ i, sp->rx_ringp[i] ? (int)sp->rx_ringp[i]->status : 0);
+
+-#if 0
+ for (i = 0; i < 16; i++) {
+- /* FIXME: what does it mean? --SAW */
+ if (i == 6) i = 21;
+- printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
+- dev->name, phy_num, i, mdio_read(ioaddr, phy_num, i));
++ printk(KERN_DEBUG " PHY index %d register %d is %4.4x.\n",
++ phy_num, i, mdio_read(dev, phy_num, i));
+ }
+-#endif
+
+ }
+
+@@ -1217,10 +1216,18 @@
+ int i;
+
+ sp->cur_rx = 0;
++#if defined(CONFIG_VLAN)
++ /* Note that buffer sizing is not a run-time check! */
++ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD) + 4;
++#else
++ sp->rx_buf_sz = dev->mtu + 14 + sizeof(struct RxFD);
++#endif
++ if (sp->rx_buf_sz < PKT_BUF_SZ)
++ sp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb;
+- skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
++ skb = dev_alloc_skb(sp->rx_buf_sz);
+ sp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break; /* OK. Just initially short of Rx bufs. */
+@@ -1233,9 +1240,13 @@
+ last_rxf = rxf;
+ rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
+ rxf->link = 0; /* None yet. */
+- /* This field unused by i82557. */
++ /* This field unused by i82557, we use it as a consistency check. */
++#ifdef final_version
+ rxf->rx_buf_addr = 0xffffffff;
+- rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
++#else
++ rxf->rx_buf_addr = virt_to_bus(skb->tail);
++#endif
++ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
+ }
+ sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+ /* Mark the last entry as end-of-list. */
+@@ -1243,121 +1254,86 @@
+ sp->last_rxf = last_rxf;
+ }
+
+-static void speedo_purge_tx(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int entry;
+-
+- while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
+- entry = sp->dirty_tx % TX_RING_SIZE;
+- if (sp->tx_skbuff[entry]) {
+- sp->stats.tx_errors++;
+- dev_free_skb(sp->tx_skbuff[entry]);
+- sp->tx_skbuff[entry] = 0;
+- }
+- sp->dirty_tx++;
+- }
+- while (sp->mc_setup_head != NULL) {
+- struct speedo_mc_block *t;
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- sp->mc_setup_tail = NULL;
+- sp->tx_full = 0;
+- netif_wake_queue(dev);
+-}
+-
+-static void reset_mii(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
+- if ((sp->phy[0] & 0x8000) == 0) {
+- int phy_addr = sp->phy[0] & 0x1f;
+- int advertising = mdio_read(ioaddr, phy_addr, 4);
+- int mii_bmcr = mdio_read(ioaddr, phy_addr, 0);
+- mdio_write(ioaddr, phy_addr, 0, 0x0400);
+- mdio_write(ioaddr, phy_addr, 1, 0x0000);
+- mdio_write(ioaddr, phy_addr, 4, 0x0000);
+- mdio_write(ioaddr, phy_addr, 0, 0x8000);
+-#ifdef honor_default_port
+- mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+-#else
+- mdio_read(ioaddr, phy_addr, 0);
+- mdio_write(ioaddr, phy_addr, 0, mii_bmcr);
+- mdio_write(ioaddr, phy_addr, 4, advertising);
+-#endif
+- }
+-}
+-
+ static void speedo_tx_timeout(struct net_device *dev)
+ {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int status = inw(ioaddr + SCBStatus);
+- unsigned long flags;
+
+ printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
+- " %4.4x at %d/%d command %8.8x.\n",
+- dev->name, status, inw(ioaddr + SCBCmd),
++ " %4.4x at %d/%d commands %8.8x %8.8x %8.8x.\n",
++ dev->name, status, (int)inw(ioaddr + SCBCmd),
+ sp->dirty_tx, sp->cur_tx,
+- sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
++ sp->tx_ring[(sp->dirty_tx+0) % TX_RING_SIZE].status,
++ sp->tx_ring[(sp->dirty_tx+1) % TX_RING_SIZE].status,
++ sp->tx_ring[(sp->dirty_tx+2) % TX_RING_SIZE].status);
+
+ /* Trigger a stats dump to give time before the reset. */
+ speedo_get_stats(dev);
+
+ speedo_show_state(dev);
+-#if 0
+ if ((status & 0x00C0) != 0x0080
+- && (status & 0x003C) == 0x0010) {
++ && (status & 0x003C) == 0x0010 && 0) {
+ /* Only the command unit has stopped. */
+ printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
+ dev->name);
+ outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+ ioaddr + SCBPointer);
+ outw(CUStart, ioaddr + SCBCmd);
+- reset_mii(dev);
+ } else {
+-#else
+- {
+-#endif
+- start_bh_atomic();
+- /* Ensure that timer routine doesn't run! */
+- del_timer(&sp->timer);
+- end_bh_atomic();
++ printk(KERN_WARNING "%s: Restarting the chip...\n",
++ dev->name);
+ /* Reset the Tx and Rx units. */
+ outl(PortReset, ioaddr + SCBPort);
+- /* We may get spurious interrupts here. But I don't think that they
+- may do much harm. 1999/12/09 SAW */
++ if (sp->msg_level & NETIF_MSG_TX_ERR)
++ speedo_show_state(dev);
+ udelay(10);
+- /* Disable interrupts. */
+- outw(SCBMaskAll, ioaddr + SCBCmd);
+- synchronize_irq();
+- speedo_tx_buffer_gc(dev);
+- /* Free as much as possible.
+- It helps to recover from a hang because of out-of-memory.
+- It also simplifies speedo_resume() in case TX ring is full or
+- close-to-be full. */
+- speedo_purge_tx(dev);
+- speedo_refill_rx_buffers(dev, 1);
+- spin_lock_irqsave(&sp->lock, flags);
+ speedo_resume(dev);
+- sp->rx_mode = -1;
+- dev->trans_start = jiffies;
+- spin_unlock_irqrestore(&sp->lock, flags);
+- set_rx_mode(dev); /* it takes the spinlock itself --SAW */
+- /* Reset MII transceiver. Do it before starting the timer to serialize
+- mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
+- reset_mii(dev);
+- sp->timer.expires = RUN_AT(2*HZ);
+- add_timer(&sp->timer);
+ }
++ /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
++ if ((sp->phy[0] & 0x8000) == 0) {
++ int phy_addr = sp->phy[0] & 0x1f;
++ int advertising = mdio_read(dev, phy_addr, 4);
++ int mii_bmcr = mdio_read(dev, phy_addr, 0);
++ mdio_write(ioaddr, phy_addr, 0, 0x0400);
++ mdio_write(ioaddr, phy_addr, 1, 0x0000);
++ mdio_write(ioaddr, phy_addr, 4, 0x0000);
++ mdio_write(ioaddr, phy_addr, 0, 0x8000);
++#ifdef honor_default_port
++ mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
++#else
++ mdio_read(dev, phy_addr, 0);
++ mdio_write(ioaddr, phy_addr, 0, mii_bmcr);
++ mdio_write(ioaddr, phy_addr, 4, advertising);
++#endif
++ }
++ sp->stats.tx_errors++;
++ dev->trans_start = jiffies;
+ return;
+ }
+
++/* Handle the interrupt cases when something unexpected happens. */
++static void speedo_intr_error(struct net_device *dev, int intr_status)
++{
++ long ioaddr = dev->base_addr;
++ struct speedo_private *sp = (struct speedo_private *)dev->priv;
++
++ if (intr_status & IntrRxSuspend) {
++ if ((intr_status & 0x003c) == 0x0028) /* No more Rx buffers. */
++ outb(RxResumeNoResources, ioaddr + SCBCmd);
++ else if ((intr_status & 0x003c) == 0x0008) { /* No resources (why?!) */
++ printk(KERN_DEBUG "%s: Unknown receiver error, status=%#4.4x.\n",
++ dev->name, intr_status);
++ /* No idea of what went wrong. Restart the receiver. */
++ outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
++ ioaddr + SCBPointer);
++ outb(RxStart, ioaddr + SCBCmd);
++ }
++ sp->stats.rx_errors++;
++ }
++}
++
++
+ static int
+ speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+@@ -1365,154 +1341,82 @@
+ long ioaddr = dev->base_addr;
+ int entry;
+
+-#if ! defined(HAS_NETIF_QUEUE)
+- if (test_bit(0, (void*)&dev->tbusy) != 0) {
++ /* Block a timer-based transmit from overlapping. This could better be
++ done with atomic_swap(1, dev->tbusy), but set_bit() works as well.
++ If this ever occurs the queue layer is doing something evil! */
++ if (netif_pause_tx_queue(dev) != 0) {
+ int tickssofar = jiffies - dev->trans_start;
+ if (tickssofar < TX_TIMEOUT - 2)
+ return 1;
+ if (tickssofar < TX_TIMEOUT) {
+ /* Reap sent packets from the full Tx queue. */
+- unsigned long flags;
+- /* Take a spinlock to make wait_for_cmd_done and sending the
+- command atomic. --SAW */
+- spin_lock_irqsave(&sp->lock, flags);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+ outw(SCBTriggerIntr, ioaddr + SCBCmd);
+- spin_unlock_irqrestore(&sp->lock, flags);
+ return 1;
+ }
+ speedo_tx_timeout(dev);
+ return 1;
+ }
+-#endif
++
++ /* Caution: the write order is important here, set the base address
++ with the "ownership" bits last. */
+
+ { /* Prevent interrupts from changing the Tx ring from underneath us. */
+ unsigned long flags;
+
+ spin_lock_irqsave(&sp->lock, flags);
+-
+- /* Check if there are enough space. */
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- spin_unlock_irqrestore(&sp->lock, flags);
+- return 1;
+- }
+-
+ /* Calculate the Tx descriptor entry. */
+- entry = sp->cur_tx++ % TX_RING_SIZE;
++ entry = sp->cur_tx % TX_RING_SIZE;
+
+ sp->tx_skbuff[entry] = skb;
++ /* Todo: be a little more clever about setting the interrupt bit. */
+ sp->tx_ring[entry].status =
+ cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+- if (!(entry & ((TX_RING_SIZE>>2)-1)))
+- sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
++ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
++ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr =
+ virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0);
+ /* The data region is always in one buffer descriptor. */
+ sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+ sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data);
+ sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+- /* Trigger the command unit resume. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- clear_suspend(sp->last_cmd);
+- /* We want the time window between clearing suspend flag on the previous
+- command and resuming CU to be as small as possible.
+- Interrupts in between are very undesired. --SAW */
+- outb(CUResume, ioaddr + SCBCmd);
+- sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+-
+- /* Leave room for set_rx_mode(). If there is no more space than reserved
+- for multicast filter mark the ring as full. */
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
++ /* Todo: perhaps leave the interrupt bit set if the Tx queue is more
++ than half full. Argument against: we should be receiving packets
++ and scavenging the queue. Argument for: if so, it shouldn't
++ matter. */
++ {
++ struct descriptor *last_cmd = sp->last_cmd;
++ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
++ clear_suspend(last_cmd);
+ }
+-
++ if (sp->cur_tx - sp->dirty_tx >= TX_QUEUE_LIMIT) {
++ sp->tx_full = 1;
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev);
+ spin_unlock_irqrestore(&sp->lock, flags);
+ }
+-
++ wait_for_cmd_done(dev);
++ outb(CUResume, ioaddr + SCBCmd);
+ dev->trans_start = jiffies;
+
+ return 0;
+ }
+
+-static void speedo_tx_buffer_gc(struct net_device *dev)
+-{
+- unsigned int dirty_tx;
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-
+- dirty_tx = sp->dirty_tx;
+- while ((int)(sp->cur_tx - dirty_tx) > 0) {
+- int entry = dirty_tx % TX_RING_SIZE;
+- int status = le32_to_cpu(sp->tx_ring[entry].status);
+-
+- if (speedo_debug > 5)
+- printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+- entry, status);
+- if ((status & StatusComplete) == 0)
+- break; /* It still hasn't been processed. */
+- if (status & TxUnderrun)
+- if (sp->tx_threshold < 0x01e08000) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
+- dev->name);
+- sp->tx_threshold += 0x00040000;
+- }
+- /* Free the original skb. */
+- if (sp->tx_skbuff[entry]) {
+- sp->stats.tx_packets++; /* Count only user packets. */
+- sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+- dev_free_skb(sp->tx_skbuff[entry]);
+- sp->tx_skbuff[entry] = 0;
+- }
+- dirty_tx++;
+- }
+-
+- if (speedo_debug && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
+- printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+- " full=%d.\n",
+- dirty_tx, sp->cur_tx, sp->tx_full);
+- dirty_tx += TX_RING_SIZE;
+- }
+-
+- while (sp->mc_setup_head != NULL
+- && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
+- struct speedo_mc_block *t;
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- if (sp->mc_setup_head == NULL)
+- sp->mc_setup_tail = NULL;
+-
+- sp->dirty_tx = dirty_tx;
+-}
+-
+ /* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+ static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+ {
+ struct net_device *dev = (struct net_device *)dev_instance;
+ struct speedo_private *sp;
+- long ioaddr, boguscnt = max_interrupt_work;
+- unsigned short status;
+-
+-#ifndef final_version
+- if (dev == NULL) {
+- printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
+- return;
+- }
+-#endif
++ long ioaddr;
++ int work_limit;
++ u16 status;
+
+ ioaddr = dev->base_addr;
+ sp = (struct speedo_private *)dev->priv;
+-
++ work_limit = sp->max_interrupt_work;
+ #ifndef final_version
+ /* A lock to prevent simultaneous entry on SMP machines. */
+ if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+@@ -1521,211 +1425,108 @@
+ sp->in_interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+- dev->interrupt = 1;
+ #endif
+
+ do {
+ status = inw(ioaddr + SCBStatus);
++
++ if ((status & IntrAllNormal) == 0 || status == 0xffff)
++ break;
+ /* Acknowledge all of the current interrupt sources ASAP. */
+- /* Will change from 0xfc00 to 0xff00 when we start handling
+- FCP and ER interrupts --Dragan */
+- outw(status & 0xfc00, ioaddr + SCBStatus);
++ outw(status & IntrAllNormal, ioaddr + SCBStatus);
+
+- if (speedo_debug > 3)
++ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
+ dev->name, status);
+
+- if ((status & 0xfc00) == 0)
+- break;
+-
+- /* Always check if all rx buffers are allocated. --SAW */
+- speedo_refill_rx_buffers(dev, 0);
+-
+- if ((status & 0x5000) || /* Packet received, or Rx error. */
+- (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+- /* Need to gather the postponed packet. */
++ if (status & (IntrRxDone|IntrRxSuspend))
+ speedo_rx(dev);
+
+- if (status & 0x1000) {
++ /* The command unit did something, scavenge finished Tx entries. */
++ if (status & (IntrCmdDone | IntrCmdIdle | IntrDrvrIntr)) {
++ unsigned int dirty_tx;
++ /* We should nominally not need this lock. */
+ spin_lock(&sp->lock);
+- if ((status & 0x003c) == 0x0028) { /* No more Rx buffers. */
+- struct RxFD *rxf;
+- printk(KERN_WARNING "%s: card reports no RX buffers.\n",
+- dev->name);
+- rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+- if (rxf == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: NULL cur_rx in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else if (rxf == sp->last_rxf) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: cur_rx is last in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else
+- outb(RxResumeNoResources, ioaddr + SCBCmd);
+- } else if ((status & 0x003c) == 0x0008) { /* No resources. */
+- struct RxFD *rxf;
+- printk(KERN_WARNING "%s: card reports no resources.\n",
+- dev->name);
+- rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+- if (rxf == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: NULL cur_rx in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else if (rxf == sp->last_rxf) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: cur_rx is last in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else {
+- /* Restart the receiver. */
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
++
++ dirty_tx = sp->dirty_tx;
++ while (sp->cur_tx - dirty_tx > 0) {
++ int entry = dirty_tx % TX_RING_SIZE;
++ int status = le32_to_cpu(sp->tx_ring[entry].status);
++
++ if (sp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
++ entry, status);
++ if ((status & StatusComplete) == 0) {
++ /* Special case error check: look for descriptor that the
++ chip skipped(?). */
++ if (sp->cur_tx - dirty_tx > 2 &&
++ (sp->tx_ring[(dirty_tx+1) % TX_RING_SIZE].status
++ & cpu_to_le32(StatusComplete))) {
++ printk(KERN_ERR "%s: Command unit failed to mark "
++ "command %8.8x as complete at %d.\n",
++ dev->name, status, dirty_tx);
++ } else
++ break; /* It still hasn't been processed. */
+ }
++ if ((status & TxUnderrun) &&
++ (sp->tx_threshold < 0x01e08000)) {
++ sp->tx_threshold += 0x00040000;
++ if (sp->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Tx threshold increased, "
++ "%#8.8x.\n", dev->name, sp->tx_threshold);
++ }
++ /* Free the original skb. */
++ if (sp->tx_skbuff[entry]) {
++ sp->stats.tx_packets++; /* Count only user packets. */
++#if LINUX_VERSION_CODE > 0x20127
++ sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
++#endif
++ dev_free_skb_irq(sp->tx_skbuff[entry]);
++ sp->tx_skbuff[entry] = 0;
++ } else if ((status & 0x70000) == CmdNOp)
++ sp->mc_setup_busy = 0;
++ dirty_tx++;
+ }
+- sp->stats.rx_errors++;
+- spin_unlock(&sp->lock);
+- }
+
+- if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {
+- printk(KERN_WARNING
+- "%s: restart the receiver after a possible hang.\n",
+- dev->name);
+- spin_lock(&sp->lock);
+- /* Restart the receiver.
+- I'm not sure if it's always right to restart the receiver
+- here but I don't know another way to prevent receiver hangs.
+- 1999/12/25 SAW */
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
+- sp->rx_ring_state &= ~RrNoResources;
+- spin_unlock(&sp->lock);
+- }
++#ifndef final_version
++ if (sp->cur_tx - dirty_tx > TX_RING_SIZE) {
++ printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
++ " full=%d.\n",
++ dirty_tx, sp->cur_tx, sp->tx_full);
++ dirty_tx += TX_RING_SIZE;
++ }
++#endif
+
+- /* User interrupt, Command/Tx unit interrupt or CU not active. */
+- if (status & 0xA400) {
+- spin_lock(&sp->lock);
+- speedo_tx_buffer_gc(dev);
++ sp->dirty_tx = dirty_tx;
+ if (sp->tx_full
+- && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+- /* The ring is no longer full. */
++ && sp->cur_tx - dirty_tx < TX_QUEUE_UNFULL) {
++ /* The ring is no longer full, clear tbusy. */
+ sp->tx_full = 0;
+- netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
++ netif_resume_tx_queue(dev);
+ }
+ spin_unlock(&sp->lock);
+ }
+
+- if (--boguscnt < 0) {
++ if (status & IntrRxSuspend)
++ speedo_intr_error(dev, status);
++
++ if (--work_limit < 0) {
+ printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+- /* Will change from 0xfc00 to 0xff00 when we start handling
+- FCP and ER interrupts --Dragan */
+ outl(0xfc00, ioaddr + SCBStatus);
+ break;
+ }
+ } while (1);
+
+- if (speedo_debug > 3)
++ if (sp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
++ dev->name, (int)inw(ioaddr + SCBStatus));
+
+- dev->interrupt = 0;
+ clear_bit(0, (void*)&sp->in_interrupt);
+ return;
+ }
+
+-static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- struct RxFD *rxf;
+- struct sk_buff *skb;
+- /* Get a fresh skbuff to replace the consumed one. */
+- skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
+- sp->rx_skbuff[entry] = skb;
+- if (skb == NULL) {
+- sp->rx_ringp[entry] = NULL;
+- return NULL;
+- }
+- rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+- skb->dev = dev;
+- skb_reserve(skb, sizeof(struct RxFD));
+- rxf->rx_buf_addr = virt_to_bus(skb->tail);
+- return rxf;
+-}
+-
+-static inline void speedo_rx_link(struct net_device *dev, int entry,
+- struct RxFD *rxf)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
+- rxf->link = 0; /* None yet. */
+- rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+- sp->last_rxf->link = virt_to_le32desc(rxf);
+- sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+- sp->last_rxf = rxf;
+-}
+-
+-static int speedo_refill_rx_buf(struct net_device *dev, int force)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int entry;
+- struct RxFD *rxf;
+-
+- entry = sp->dirty_rx % RX_RING_SIZE;
+- if (sp->rx_skbuff[entry] == NULL) {
+- rxf = speedo_rx_alloc(dev, entry);
+- if (rxf == NULL) {
+- unsigned int forw;
+- int forw_entry;
+- if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) {
+- printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
+- dev->name, force);
+- speedo_show_state(dev);
+- sp->rx_ring_state |= RrOOMReported;
+- }
+- if (!force)
+- return -1; /* Better luck next time! */
+- /* Borrow an skb from one of next entries. */
+- for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
+- if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
+- break;
+- if (forw == sp->cur_rx)
+- return -1;
+- forw_entry = forw % RX_RING_SIZE;
+- sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
+- sp->rx_skbuff[forw_entry] = NULL;
+- rxf = sp->rx_ringp[forw_entry];
+- sp->rx_ringp[forw_entry] = NULL;
+- sp->rx_ringp[entry] = rxf;
+- }
+- } else {
+- rxf = sp->rx_ringp[entry];
+- }
+- speedo_rx_link(dev, entry, rxf);
+- sp->dirty_rx++;
+- sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
+- return 0;
+-}
+-
+-static void speedo_refill_rx_buffers(struct net_device *dev, int force)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-
+- /* Refill the RX ring. */
+- while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
+- speedo_refill_rx_buf(dev, force) != -1);
+-}
+-
+ static int
+ speedo_rx(struct net_device *dev)
+ {
+@@ -1733,63 +1534,48 @@
+ int entry = sp->cur_rx % RX_RING_SIZE;
+ int status;
+ int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+- int alloc_ok = 1;
+
+- if (speedo_debug > 4)
++ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In speedo_rx().\n");
+ /* If we own the next entry, it's a new packet. Send it up. */
+ while (sp->rx_ringp[entry] != NULL &&
+ (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {
+- int pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
++ int desc_count = le32_to_cpu(sp->rx_ringp[entry]->count);
++ int pkt_len = desc_count & 0x07ff;
+
+ if (--rx_work_limit < 0)
+ break;
+-
+- /* Check for a rare out-of-memory case: the current buffer is
+- the last buffer allocated in the RX ring. --SAW */
+- if (sp->last_rxf == sp->rx_ringp[entry]) {
+- /* Postpone the packet. It'll be reaped at an interrupt when this
+- packet is no longer the last packet in the ring. */
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: RX packet postponed!\n",
+- dev->name);
+- sp->rx_ring_state |= RrPostponed;
+- break;
+- }
+-
+- if (speedo_debug > 4)
++ if (sp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
+ pkt_len);
+ if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+ if (status & RxErrTooBig)
+ printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+ "status %8.8x!\n", dev->name, status);
+- else if (! (status & RxOK)) {
++ else if ( ! (status & RxOK)) {
+ /* There was a fatal error. This *should* be impossible. */
+ sp->stats.rx_errors++;
+ printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+- "status %8.8x.\n",
+- dev->name, status);
++ "status %8.8x.\n", dev->name, status);
+ }
+ } else {
+ struct sk_buff *skb;
+
++ if (sp->drv_flags & HasChksum)
++ pkt_len -= 2;
++
+ /* Check if the packet is long enough to just accept without
+ copying to a properly sized skbuff. */
+- if (pkt_len < rx_copybreak
++ if (pkt_len < sp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+ /* 'skb_put()' points to the start of sk_buff data area. */
+-#if !defined(__alpha__)
+ /* Packet is in one chunk -- we can copy + cksum. */
+ eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+-#else
+- memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
+- pkt_len);
+-#endif
+ } else {
++ void *temp;
+ /* Pass up the already-filled skbuff. */
+ skb = sp->rx_skbuff[entry];
+ if (skb == NULL) {
+@@ -1798,27 +1584,64 @@
+ break;
+ }
+ sp->rx_skbuff[entry] = NULL;
+- skb_put(skb, pkt_len);
++ temp = skb_put(skb, pkt_len);
++#if !defined(final_version) && !defined(__powerpc__)
++ if (bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr) != temp)
++ printk(KERN_ERR "%s: Rx consistency error -- the skbuff "
++ "addresses do not match in speedo_rx: %p vs. %p "
++ "/ %p.\n", dev->name,
++ bus_to_virt(sp->rx_ringp[entry]->rx_buf_addr),
++ skb->head, temp);
++#endif
+ sp->rx_ringp[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
++ if (sp->drv_flags & HasChksum) {
++#if 0
++ u16 csum = get_unaligned((u16*)(skb->head + pkt_len))
++ if (desc_count & 0x8000)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++#endif
++ }
+ netif_rx(skb);
+ sp->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
+ sp->stats.rx_bytes += pkt_len;
++#endif
+ }
+ entry = (++sp->cur_rx) % RX_RING_SIZE;
+- sp->rx_ring_state &= ~RrPostponed;
+- /* Refill the recently taken buffers.
+- Do it one-by-one to handle traffic bursts better. */
+- if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
+- alloc_ok = 0;
+ }
+
+- /* Try hard to refill the recently taken buffers. */
+- speedo_refill_rx_buffers(dev, 1);
++ /* Refill the Rx ring buffers. */
++ for (; sp->cur_rx - sp->dirty_rx > 0; sp->dirty_rx++) {
++ struct RxFD *rxf;
++ entry = sp->dirty_rx % RX_RING_SIZE;
++ if (sp->rx_skbuff[entry] == NULL) {
++ struct sk_buff *skb;
++ /* Get a fresh skbuff to replace the consumed one. */
++ skb = dev_alloc_skb(sp->rx_buf_sz);
++ sp->rx_skbuff[entry] = skb;
++ if (skb == NULL) {
++ sp->rx_ringp[entry] = NULL;
++ sp->alloc_failures++;
++ break; /* Better luck next time! */
++ }
++ rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
++ skb->dev = dev;
++ skb_reserve(skb, sizeof(struct RxFD));
++ rxf->rx_buf_addr = virt_to_le32desc(skb->tail);
++ } else {
++ rxf = sp->rx_ringp[entry];
++ }
++ rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
++ rxf->link = 0; /* None yet. */
++ rxf->count = cpu_to_le32((sp->rx_buf_sz - sizeof(struct RxFD)) << 16);
++ sp->last_rxf->link = virt_to_le32desc(rxf);
++ sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
++ sp->last_rxf = rxf;
++ }
+
+ sp->last_rx_time = jiffies;
+-
+ return 0;
+ }
+
+@@ -1829,34 +1652,33 @@
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- netif_stop_queue(dev);
++ netif_stop_tx_queue(dev);
+
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
++ if (sp->msg_level & NETIF_MSG_IFDOWN)
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n"
++ KERN_DEBUG "%s: Cumlative allocation failures: %d.\n",
++ dev->name, (int)inw(ioaddr + SCBStatus),
++ dev->name, sp->alloc_failures);
+
+ /* Shut off the media monitoring timer. */
+- start_bh_atomic();
+ del_timer(&sp->timer);
+- end_bh_atomic();
+
+ /* Shutting down the chip nicely fails to disable flow control. So.. */
+ outl(PortPartialReset, ioaddr + SCBPort);
+
+ free_irq(dev->irq, dev);
+
+- /* Print a few items for debugging. */
+- if (speedo_debug > 3)
+- speedo_show_state(dev);
+-
+- /* Free all the skbuffs in the Rx and Tx queues. */
++ /* Free all the skbuffs in the Rx and Tx queues. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = sp->rx_skbuff[i];
+ sp->rx_skbuff[i] = 0;
+ /* Clear the Rx descriptors. */
+- if (skb)
++ if (skb) {
++#if LINUX_VERSION_CODE < 0x20100
++ skb->free = 1;
++#endif
+ dev_free_skb(skb);
++ }
+ }
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+@@ -1866,18 +1688,17 @@
+ if (skb)
+ dev_free_skb(skb);
+ }
++ if (sp->mc_setup_frm) {
++ kfree(sp->mc_setup_frm);
++ sp->mc_setup_frm_len = 0;
++ }
+
+- /* Free multicast setting blocks. */
+- for (i = 0; sp->mc_setup_head != NULL; i++) {
+- struct speedo_mc_block *t;
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- sp->mc_setup_tail = NULL;
+- if (speedo_debug > 0)
+- printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
++ /* Print a few items for debugging. */
++ if (sp->msg_level & NETIF_MSG_IFDOWN)
++ speedo_show_state(dev);
+
++ /* Alt: acpi_set_pwr_state(pdev, sp->acpi_pwr); */
++ acpi_set_pwr_state(sp->pci_dev, ACPI_D2);
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+@@ -1895,8 +1716,7 @@
+
+ Oh, and incoming frames are dropped while executing dump-stats!
+ */
+-static struct enet_statistics *
+-speedo_get_stats(struct net_device *dev)
++static struct net_device_stats *speedo_get_stats(struct net_device *dev)
+ {
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -1915,14 +1735,9 @@
+ sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
+ sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
+ sp->lstats.done_marker = 0x0000;
+- if (dev->start) {
+- unsigned long flags;
+- /* Take a spinlock to make wait_for_cmd_done and sending the
+- command atomic. --SAW */
+- spin_lock_irqsave(&sp->lock, flags);
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ if (netif_running(dev)) {
++ wait_for_cmd_done(dev);
+ outb(CUDumpStats, ioaddr + SCBCmd);
+- spin_unlock_irqrestore(&sp->lock, flags);
+ }
+ }
+ return &sp->stats;
+@@ -1933,26 +1748,68 @@
+ struct speedo_private *sp = (struct speedo_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
+ int phy = sp->phy[0] & 0x1f;
++ int saved_acpi;
+
+- switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ data[0] = phy;
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+- /* FIXME: these operations need to be serialized with MDIO
+- access from the timeout handler.
+- They are currently serialized only with MDIO access from the
+- timer routine. 2000/05/09 SAW */
+- start_bh_atomic();
+- data[3] = mdio_read(ioaddr, data[0], data[1]);
+- end_bh_atomic();
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
++ data[3] = mdio_read(dev, data[0], data[1]);
++ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
+ return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+- start_bh_atomic();
++ if (data[0] == sp->phy[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ sp->medialock = (value & 0x9000) ? 0 : 1;
++ if (sp->medialock) {
++ sp->full_duplex = (value & 0x0100) ? 1 : 0;
++ sp->rx_mode = RxInvalidMode;
++ }
++ break;
++ case 4: sp->advertising = value; break;
++ }
++ }
++ saved_acpi = acpi_set_pwr_state(sp->pci_dev, ACPI_D0);
+ mdio_write(ioaddr, data[0], data[1], data[2]);
+- end_bh_atomic();
++ acpi_set_pwr_state(sp->pci_dev, saved_acpi);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = sp->msg_level;
++ data32[1] = sp->multicast_filter_limit;
++ data32[2] = sp->max_interrupt_work;
++ data32[3] = sp->rx_copybreak;
++#if 0
++ /* No room in the ioctl() to set these. */
++ data32[4] = txfifo;
++ data32[5] = rxfifo;
++#endif
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ sp->msg_level = data32[0];
++ sp->multicast_filter_limit = data32[1];
++ sp->max_interrupt_work = data32[2];
++ sp->rx_copybreak = data32[3];
++#if 0
++ /* No room in the ioctl() to set these. */
++ if (data32[4] < 16)
++ txfifo = data32[4];
++ if (data32[5] < 16)
++ rxfifo = data32[5];
++#endif
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+@@ -1978,21 +1835,18 @@
+ int entry, i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+- new_rx_mode = 3;
++ new_rx_mode = AcceptAllMulticast | AcceptAllPhys;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+- dev->mc_count > multicast_filter_limit) {
+- new_rx_mode = 1;
++ dev->mc_count > sp->multicast_filter_limit) {
++ new_rx_mode = AcceptAllMulticast;
+ } else
+ new_rx_mode = 0;
+
+- if (speedo_debug > 3)
+- printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
+- sp->rx_mode, new_rx_mode);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
+- /* The Tx ring is full -- don't add anything! Hope the mode will be
+- * set again later. */
+- sp->rx_mode = -1;
++ if (sp->cur_tx - sp->dirty_tx >= TX_RING_SIZE - 1) {
++ /* The Tx ring is full -- don't add anything! Presumably the new mode
++ is in config_cmd_data and will be added anyway, otherwise we wait
++ for a timer tick or the mode to change again. */
++ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+
+@@ -2000,40 +1854,41 @@
+ u8 *config_cmd_data;
+
+ spin_lock_irqsave(&sp->lock, flags);
+- entry = sp->cur_tx++ % TX_RING_SIZE;
++ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0; /* Redundant. */
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
++ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
++ /* We may nominally release the lock here. */
++
+ config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+ /* Construct a full CmdConfig frame. */
+ memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
+ config_cmd_data[1] = (txfifo << 4) | rxfifo;
+ config_cmd_data[4] = rxdmacount;
+ config_cmd_data[5] = txdmacount + 0x80;
+- config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
+- /* 0x80 doesn't disable FC 0x84 does.
+- Disable Flow control since we are not ACK-ing any FC interrupts
+- for now. --Dragan */
+- config_cmd_data[19] = 0x84;
++ config_cmd_data[6] |= (new_rx_mode & AcceptErr) ? 0x80 : 0;
++ config_cmd_data[7] &= (new_rx_mode & AcceptRunt) ? ~0x01 : ~0;
++ if (sp->drv_flags & HasChksum)
++ config_cmd_data[9] |= 1;
++ config_cmd_data[15] |= (new_rx_mode & AcceptAllPhys) ? 1 : 0;
++ config_cmd_data[19] = sp->flow_ctrl ? 0xBD : 0x80;
+ config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+- config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
++ config_cmd_data[21] = (new_rx_mode & AcceptAllMulticast) ? 0x0D : 0x05;
+ if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
+ config_cmd_data[15] |= 0x80;
+ config_cmd_data[8] = 0;
+ }
+ /* Trigger the command unit resume. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ outb(CUResume, ioaddr + SCBCmd);
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+ spin_unlock_irqrestore(&sp->lock, flags);
++ sp->last_cmd_time = jiffies;
+ }
+
+ if (new_rx_mode == 0 && dev->mc_count < 4) {
+@@ -2043,14 +1898,16 @@
+ u16 *setup_params, *eaddrs;
+
+ spin_lock_irqsave(&sp->lock, flags);
+- entry = sp->cur_tx++ % TX_RING_SIZE;
++ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
++ sp->cur_tx++;
+ sp->tx_ring[entry].link =
+ virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
++ /* We may nominally release the lock here. */
+ sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+ setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+ *setup_params++ = cpu_to_le16(dev->mc_count*6);
+@@ -2063,38 +1920,45 @@
+ *setup_params++ = *eaddrs++;
+ }
+
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+ spin_unlock_irqrestore(&sp->lock, flags);
++ sp->last_cmd_time = jiffies;
+ } else if (new_rx_mode == 0) {
+ struct dev_mc_list *mclist;
+ u16 *setup_params, *eaddrs;
+- struct speedo_mc_block *mc_blk;
+- struct descriptor *mc_setup_frm;
++ struct descriptor *mc_setup_frm = sp->mc_setup_frm;
+ int i;
+
+- mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
+- GFP_ATOMIC);
+- if (mc_blk == NULL) {
+- printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
+- dev->name);
+- sp->rx_mode = -1; /* We failed, try again. */
++ if (sp->mc_setup_frm_len < 10 + dev->mc_count*6
++ || sp->mc_setup_frm == NULL) {
++ /* Allocate a full setup frame, 10bytes + <max addrs>. */
++ if (sp->mc_setup_frm)
++ kfree(sp->mc_setup_frm);
++ sp->mc_setup_busy = 0;
++ sp->mc_setup_frm_len = 10 + sp->multicast_filter_limit*6;
++ sp->mc_setup_frm = kmalloc(sp->mc_setup_frm_len, GFP_ATOMIC);
++ if (sp->mc_setup_frm == NULL) {
++ printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
++ dev->name);
++ sp->rx_mode = RxInvalidMode; /* We failed, try again. */
++ return;
++ }
++ }
++ /* If we are busy, someone might be quickly adding to the MC list.
++ Try again later when the list updates stop. */
++ if (sp->mc_setup_busy) {
++ sp->rx_mode = RxInvalidMode;
+ return;
+ }
+- mc_blk->next = NULL;
+- mc_setup_frm = &mc_blk->frame;
+-
++ mc_setup_frm = sp->mc_setup_frm;
+ /* Fill the setup frame. */
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
+- dev->name, mc_setup_frm);
++ if (sp->msg_level & NETIF_MSG_RXFILTER)
++ printk(KERN_DEBUG "%s: Constructing a setup frame at %p, "
++ "%d bytes.\n",
++ dev->name, sp->mc_setup_frm, sp->mc_setup_frm_len);
+ mc_setup_frm->cmd_status =
+ cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
+ /* Link set below. */
+@@ -2111,81 +1975,125 @@
+
+ /* Disable interrupts while playing with the Tx Cmd list. */
+ spin_lock_irqsave(&sp->lock, flags);
+-
+- if (sp->mc_setup_tail)
+- sp->mc_setup_tail->next = mc_blk;
+- else
+- sp->mc_setup_head = mc_blk;
+- sp->mc_setup_tail = mc_blk;
+- mc_blk->tx = sp->cur_tx;
+-
+- entry = sp->cur_tx++ % TX_RING_SIZE;
++ entry = sp->cur_tx % TX_RING_SIZE;
+ last_cmd = sp->last_cmd;
+ sp->last_cmd = mc_setup_frm;
++ sp->mc_setup_busy++;
+
+ /* Change the command to a NoOp, pointing to the CmdMulti command. */
+ sp->tx_skbuff[entry] = 0;
+ sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
++ sp->cur_tx++;
+ sp->tx_ring[entry].link = virt_to_le32desc(mc_setup_frm);
++ /* We may nominally release the lock here. */
+
+ /* Set the link in the setup frame. */
+ mc_setup_frm->link =
+ virt_to_le32desc(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
+
+- wait_for_cmd_done(ioaddr + SCBCmd);
++ wait_for_cmd_done(dev);
+ clear_suspend(last_cmd);
+ /* Immediately trigger the command unit resume. */
+ outb(CUResume, ioaddr + SCBCmd);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+ spin_unlock_irqrestore(&sp->lock, flags);
+-
+- if (speedo_debug > 5)
+- printk(" CmdMCSetup frame length %d in entry %d.\n",
++ sp->last_cmd_time = jiffies;
++ if (sp->msg_level & NETIF_MSG_RXFILTER)
++ printk(KERN_DEBUG " CmdMCSetup frame length %d in entry %d.\n",
+ dev->mc_count, entry);
+ }
+
+ sp->rx_mode = new_rx_mode;
+ }
++
++static int speedo_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct speedo_private *np = (struct speedo_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ outl(PortPartialReset, ioaddr + SCBPort);
++ break;
++ case DRV_RESUME:
++ speedo_resume(dev);
++ np->rx_mode = RxInvalidMode;
++ np->flow_ctrl = np->partner = 0;
++ set_rx_mode(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_speedo_dev; *devp; devp = next) {
++ next = &((struct speedo_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ case DRV_PWR_DOWN:
++ case DRV_PWR_UP:
++ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_DOWN ? ACPI_D3:ACPI_D0);
++ break;
++ case DRV_PWR_WakeOn:
++ default:
++ return -1;
++ }
++
++ return 0;
++}
+
+-#ifdef MODULE
++
++#if defined(MODULE) || (LINUX_VERSION_CODE >= 0x020400)
+
+ int init_module(void)
+ {
+ int cards_found;
+
+- if (debug >= 0 && speedo_debug != debug)
+- printk(KERN_INFO "eepro100.c: Debug level is %d.\n", debug);
+- if (debug >= 0)
+- speedo_debug = debug;
+- /* Always emit the version message. */
+- if (speedo_debug)
+- printk(KERN_INFO "%s", version);
+-
+- cards_found = eepro100_init();
+- if (cards_found <= 0) {
++ /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ cards_found = pci_drv_register(&eepro100_drv_id, NULL);
++ if (cards_found < 0)
+ printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
+- return -ENODEV;
+- }
+- return 0;
++ return cards_found;
+ }
+
+-void
+-cleanup_module(void)
++void cleanup_module(void)
+ {
+ struct net_device *next_dev;
+
++ pci_drv_unregister(&eepro100_drv_id);
++
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_speedo_dev) {
+ struct speedo_private *sp = (void *)root_speedo_dev->priv;
+ unregister_netdev(root_speedo_dev);
+- release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
+-#ifndef USE_IO
++#ifdef USE_IO_OPS
++ release_region(root_speedo_dev->base_addr,
++ pci_id_tbl[sp->chip_id].io_size);
++#else
+ iounmap((char *)root_speedo_dev->base_addr);
+ #endif
++ acpi_set_pwr_state(sp->pci_dev, sp->acpi_pwr);
+ next_dev = sp->next_module;
+ if (sp->priv_addr)
+ kfree(sp->priv_addr);
+@@ -2194,25 +2102,30 @@
+ }
+ }
+
++#if (LINUX_VERSION_CODE >= 0x020400) && 0
++module_init(init_module);
++module_exit(cleanup_module);
++#endif
++
+ #else /* not MODULE */
+
+-int eepro100_probe(void)
++int eepro100_probe(struct net_device *dev)
+ {
+- int cards_found = 0;
+-
+- cards_found = eepro100_init();
++ int cards_found = pci_drv_register(&eepro100_drv_id, dev);
+
+- if (speedo_debug > 0 && cards_found)
+- printk(version);
++ /* Only emit the version if the driver is being used. */
++ if (cards_found >= 0)
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+
+- return cards_found ? 0 : -ENODEV;
++ return cards_found;
+ }
+ #endif /* MODULE */
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
++ * compile-command: "make KERNVER=`uname -r` eepro100.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c eepro100.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c eepro100.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/epic100.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/epic100.c,v
+retrieving revision 1.1
+diff -u -r1.1 epic100.c
+--- linux/src/drivers/net/epic100.c 26 Apr 1999 05:52:10 -0000 1.1
++++ linux/src/drivers/net/epic100.c 20 Aug 2004 10:32:53 -0000
+@@ -1,135 +1,177 @@
+ /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
+ /*
+- Written 1997-1998 by Donald Becker.
++ Written/copyright 1997-2002 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
+- All other rights reserved.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for the SMC83c170/175 "EPIC" series, as used on the
+ SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
+
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
+
+- Support and updates available at
+- http://cesdis.gsfc.nasa.gov/linux/drivers/epic100.html
++ Information and updates available at
++ http://www.scyld.com/network/epic100.html
+ */
+
+-static const char *version =
+-"epic100.c:v1.03 8/7/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/epic100.html\n";
++/* These identify the driver base version and may not be removed. */
++static const char version[] =
++"epic100.c:v1.18 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/epic100.html\n";
+
+-/* A few user-configurable values. */
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
+
+-/* Keep the ring sizes a power of two for efficiency.
+- Making the Tx ring too large decreases the effectiveness of channel
+- bonding and packet priority.
+- There are no ill effects from too-large receive rings. */
+-#define TX_RING_SIZE 16
+-#define RX_RING_SIZE 32
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 32;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ This chip uses a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Used to set a special media speed or duplex.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+ /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+-static int rx_copybreak = 200;
++static int rx_copybreak = 0;
+
+-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+-static int max_interrupt_work = 10;
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for operational efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ Too-large receive rings only waste memory. */
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 32
+
+ /* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT ((2000*HZ)/1000)
++#define TX_TIMEOUT (6*HZ)
+
+-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
+
+ /* Bytes transferred to chip before transmission starts. */
+-#define TX_FIFO_THRESH 256 /* Rounded down to 4 byte units. */
++/* Initial threshold, increased on underflow, rounded down to 4 byte units. */
++#define TX_FIFO_THRESH 256
+ #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
+
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
+ #include <linux/config.h>
+-#include <linux/version.h> /* Evil, but neccessary */
+-#ifdef MODULE
+-#ifdef MODVERSIONS
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
+ #include <linux/modversions.h>
+ #endif
+ #include <linux/module.h>
+-#else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
+-#endif
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+-#include <linux/ptrace.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+-#if LINUX_VERSION_CODE >= 0x20155
+-#define PCI_SUPPORT_VER2
+-#else
+-#include <linux/bios32.h>
+-#endif
+ #include <linux/delay.h>
+-
+-#include <asm/processor.h> /* Processor type for cache alignment. */
+-#include <asm/bitops.h>
+-#include <asm/io.h>
+-#include <asm/dma.h>
+-
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
++#include <asm/bitops.h>
++#include <asm/io.h>
+
+-/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+- This is only in the support-all-kernels source code. */
++#if LINUX_VERSION_CODE >= 0x20300
++#include <linux/spinlock.h>
++#elif LINUX_VERSION_CODE >= 0x20200
++#include <asm/spinlock.h>
++#endif
+
+-#if ! defined (LINUX_VERSION_CODE) || LINUX_VERSION_CODE < 0x20000
+-#warning This driver version is only for kernel versions 2.0.0 and later.
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
+ #endif
+
+-#define RUN_AT(x) (jiffies + (x))
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
+
+-#if defined(MODULE) && (LINUX_VERSION_CODE >= 0x20115)
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+ MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
++MODULE_LICENSE("GPL");
+ MODULE_PARM(debug, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(rx_copybreak, "i");
+ MODULE_PARM(max_interrupt_work, "i");
+-#endif
+-#if LINUX_VERSION_CODE < 0x20123
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
+-#endif
+-#if LINUX_VERSION_CODE <= 0x20139
+-#define net_device_stats enet_statistics
+-#define NETSTATS_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20159
+-#define DEV_FREE_SKB(skb) dev_kfree_skb(skb, FREE_WRITE);
+-#else /* Grrr, unneeded incompatible change. */
+-#define DEV_FREE_SKB(skb) dev_kfree_skb(skb);
+-#endif
+-
+-/* The I/O extent. */
+-#define EPIC_TOTAL_SIZE 0x100
+-
+-static int epic_debug = 1;
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex.\n"
++"Values are 0x10/0x20/0x100/0x200.");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
+
+ /*
+ Theory of Operation
+
+ I. Board Compatibility
+
+-This device driver is designed for the SMC "EPCI/100", the SMC
++This device driver is designed for the SMC "EPIC/100", the SMC
+ single-chip Ethernet controllers for PCI. This chip is used on
+ the SMC EtherPower II boards.
+
+-
+ II. Board-specific settings
+
+ PCI bus devices are configured by the system at boot time, so no jumpers
+@@ -144,35 +186,61 @@
+
+ IVb. References
+
+-http://www.smc.com/components/catalog/smc83c170.html
+-http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+-http://www.national.com/pf/DP/DP83840.html
++http://www.smsc.com/main/datasheets/83c171.pdf
++http://www.smsc.com/main/datasheets/83c175.pdf
++http://scyld.com/expert/NWay.html
++http://www.national.com/pf/DP/DP83840A.html
+
+ IVc. Errata
+
+ */
+
+-/* The rest of these values should never change. */
++static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int epic_pwr_event(void *dev_instance, int event);
+
+-static struct device *epic_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, int card_idx);
++enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
+
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-struct chip_info {
+- const char *name;
+- u16 vendor_id, device_id, device_id_mask, pci_flags;
+- int io_size, min_latency;
+- struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
+- int chip_idx);
+-} chip_tbl[] = {
+- {"SMSC EPIC/100", 0x10B8, 0x0005, 0x7fff,
+- PCI_USES_IO|PCI_USES_MASTER|PCI_ADDR0, EPIC_TOTAL_SIZE, 32, epic_probe1},
++#define EPIC_TOTAL_SIZE 0x100
++#ifdef USE_IO_OPS
++#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR0
++#else
++#define EPIC_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"SMSC EPIC 83c172", {0x000510B8, 0xffffffff, 0,0, 9,0xff},
++ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
++ {"SMSC EPIC 83c171", {0x000510B8, 0xffffffff, 0,0, 6,0xff},
++ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
++ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff, 0x0ab41092, 0xffffffff},
++ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | NO_MII | MII_PWRDWN, },
++ {"SMSC EPIC/100 83c170", {0x000510B8, 0xffffffff},
++ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR, },
++ {"SMSC EPIC/C 83c175", {0x000610B8, 0xffffffff},
++ EPIC_IOTYPE, EPIC_TOTAL_SIZE, TYPE2_INTR | MII_PWRDWN, },
+ {0,},
+ };
+
++struct drv_id_info epic_drv_id = {
++ "epic100", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ epic_probe1, epic_pwr_event };
++
++#ifndef USE_IO_OPS
++#undef inb
++#undef inw
++#undef inl
++#undef outb
++#undef outw
++#undef outl
++#define inb readb
++#define inw readw
++#define inl readl
++#define outb writeb
++#define outw writew
++#define outl writel
++#endif
++
+ /* Offsets to registers, using the (ugh) SMC names. */
+ enum epic_registers {
+ COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
+@@ -187,38 +255,40 @@
+
+ /* Interrupt register bits, using my own meaningful names. */
+ enum IntrStatus {
+- TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
+- PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
+- RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
+- TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
+- RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
++ TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
++ PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
++ RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
++ TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
++ RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
++};
++enum CommandBits {
++ StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
++ StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
+ };
+
+ /* The EPIC100 Rx and Tx buffer descriptors. */
+
+ struct epic_tx_desc {
+- s16 status;
+- u16 txlength;
++ u32 txstatus;
+ u32 bufaddr;
+- u16 buflength;
+- u16 control;
++ u32 buflength;
+ u32 next;
+ };
+
+ struct epic_rx_desc {
+- s16 status;
+- u16 rxlength;
++ u32 rxstatus;
+ u32 bufaddr;
+ u32 buflength;
+ u32 next;
+ };
+
+-struct epic_private {
+- char devname[8]; /* Used only for kernel debugging. */
+- const char *product_name;
+- struct device *next_module;
++enum desc_status_bits {
++ DescOwn=0x8000,
++};
+
+- /* Rx and Rx rings here so that they remain paragraph aligned. */
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct epic_private {
++ /* Tx and Rx rings first so that they remain paragraph aligned. */
+ struct epic_rx_desc rx_ring[RX_RING_SIZE];
+ struct epic_tx_desc tx_ring[TX_RING_SIZE];
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+@@ -226,168 +296,80 @@
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+
+- /* Ring pointers. */
+- unsigned int cur_rx, cur_tx; /* The next free ring entry */
+- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
++ struct net_device *next_module;
++ void *priv_addr; /* Unaligned address for kfree */
+
+- u8 pci_bus, pci_dev_fn; /* PCI bus location. */
+- u16 chip_id;
++ /* Ring pointers. */
++ spinlock_t lock; /* Group with Tx control cache line. */
++ unsigned int cur_tx, dirty_tx;
++ struct descriptor *last_tx_desc;
++
++ unsigned int cur_rx, dirty_rx;
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ struct descriptor *last_rx_desc;
++ long last_rx_time; /* Last Rx, in jiffies. */
++ int rx_copybreak;
++
++ int msg_level;
++ int max_interrupt_work;
++ struct pci_dev *pci_dev; /* PCI bus location. */
++ int chip_id, chip_flags;
+
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media selection timer. */
++ int tx_threshold;
++ int genctl; /* Including Rx threshold. */
++ u32 cur_rx_mode;
+ unsigned char mc_filter[8];
++ int multicast_filter_limit;
++
+ signed char phys[4]; /* MII device addresses. */
++ u16 mii_bmcr; /* MII control register */
++ u16 advertising; /* NWay media advertisement */
++ int mii_phy_cnt;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Current duplex setting. */
+- unsigned int force_fd:1; /* Full-duplex operation requested. */
+- unsigned int default_port:4; /* Last dev->if_port value. */
++ unsigned int duplex_lock:1; /* Duplex forced by the user. */
++ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+- int pad0, pad1; /* Used for 8-byte alignment */
+ };
+
+-/* Used to pass the full-duplex flag, etc. */
+-#define MAX_UNITS 8
+-static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-
+-static int epic_open(struct device *dev);
++static int epic_open(struct net_device *dev);
+ static int read_eeprom(long ioaddr, int location);
+-static int mdio_read(long ioaddr, int phy_id, int location);
+-static void mdio_write(long ioaddr, int phy_id, int location, int value);
+-static void epic_restart(struct device *dev);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
++static void epic_start(struct net_device *dev, int restart);
++static void check_media(struct net_device *dev);
+ static void epic_timer(unsigned long data);
+-static void epic_tx_timeout(struct device *dev);
+-static void epic_init_ring(struct device *dev);
+-static int epic_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int epic_rx(struct device *dev);
++static void epic_tx_timeout(struct net_device *dev);
++static void epic_init_ring(struct net_device *dev);
++static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static int epic_rx(struct net_device *dev);
+ static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+-static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+-static int epic_close(struct device *dev);
+-static struct net_device_stats *epic_get_stats(struct device *dev);
+-static void set_rx_mode(struct device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int epic_close(struct net_device *dev);
++static struct net_device_stats *epic_get_stats(struct net_device *dev);
++static void set_rx_mode(struct net_device *dev);
+
+
+ /* A list of all installed EPIC devices, for removing the driver module. */
+-static struct device *root_epic_dev = NULL;
++static struct net_device *root_epic_dev = NULL;
+
+-#ifndef CARDBUS
+-int epic100_probe(struct device *dev)
++static void *epic_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
+ {
+- int cards_found = 0;
+- int chip_idx;
+- u16 pci_command, new_command;
+- unsigned char pci_bus, pci_device_fn;
+-
+-#ifdef PCI_SUPPORT_VER2
+- struct pci_dev *pcidev = NULL;
+- while ((pcidev = pci_find_class(PCI_CLASS_NETWORK_ETHERNET << 8, pcidev))
+- != NULL) {
+- long pci_ioaddr = pcidev->base_address[0] & ~3;
+- int vendor = pcidev->vendor;
+- int device = pcidev->device;
+-
+- for (chip_idx = 0; chip_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor == chip_tbl[chip_idx].vendor_id
+- && (device & chip_tbl[chip_idx].device_id_mask) ==
+- chip_tbl[chip_idx].device_id)
+- break;
+- if (chip_tbl[chip_idx].vendor_id == 0 /* Compiled out! */
+- || check_region(pci_ioaddr, chip_tbl[chip_idx].io_size))
+- continue;
+- pci_bus = pcidev->bus->number;
+- pci_device_fn = pcidev->devfn;
+-#else
+- int pci_index;
+-
+- if ( ! pcibios_present())
+- return -ENODEV;
+-
+- for (pci_index = 0; pci_index < 0xff; pci_index++) {
+- u16 vendor, device;
+- u32 pci_ioaddr;
+-
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+- pci_index, &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+-
+- for (chip_idx = 0; chip_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor == chip_tbl[chip_idx].vendor_id
+- && (device & chip_tbl[chip_idx].device_id_mask) ==
+- chip_tbl[chip_idx].device_id)
+- break;
+- if (chip_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
+- continue;
+-
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- /* Remove I/O space marker in bit 0. */
+- pci_ioaddr &= ~3;
+-
+- if (check_region(pci_ioaddr, chip_tbl[chip_idx].io_size))
+- continue;
+-#endif
+-
+- /* EPIC-specific code: Soft-reset the chip ere setting as master. */
+- outl(0x0001, pci_ioaddr + GENCTL);
+-
+- /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled Ethernet"
+- " device %4.4x-%4.4x."
+- " Updating PCI command %4.4x->%4.4x.\n",
+- vendor, device, pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+- dev = chip_tbl[chip_idx].probe1(pci_bus, pci_device_fn,
+- dev, cards_found);
+-
+- /* Check the latency timer. */
+- if (dev) {
+- u8 pci_latency;
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < chip_tbl[chip_idx].min_latency) {
+- printk(KERN_INFO " PCI latency timer (CFLT) value of %d is "
+- "unreasonably low, setting to %d.\n", pci_latency,
+- chip_tbl[chip_idx].min_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER,
+- chip_tbl[chip_idx].min_latency);
+- }
+- dev = 0;
+- cards_found++;
+- }
+- }
+-
+- return cards_found ? 0 : -ENODEV;
+-}
+-#endif /* not CARDBUS */
+-
+-static struct device *epic_probe1(int bus, int devfn, struct device *dev,
+- int card_idx)
+-{
+- static int did_version = 0; /* Already printed version info. */
++ struct net_device *dev;
+ struct epic_private *ep;
++ void *priv_mem;
+ int i, option = 0, duplex = 0;
+- u16 chip_id;
+- u32 ioaddr;
+
+- if (epic_debug > 0 && did_version++ == 0)
+- printk(KERN_INFO "%s", version);
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
+
+- if (dev && dev->mem_start) {
++ if (dev->mem_start) {
+ option = dev->mem_start;
+ duplex = (dev->mem_start & 16) ? 1 : 0;
+ } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
+@@ -397,108 +379,125 @@
+ duplex = full_duplex[card_idx];
+ }
+
+- dev = init_etherdev(dev, 0);
+-
+- { /* Grrrr, badly consider interface change. */
+-#if defined(PCI_SUPPORT_VER2)
+- struct pci_dev *pdev = pci_find_slot(bus, devfn);
+- ioaddr = pdev->base_address[0] & ~3;
+- dev->irq = pdev->irq;
+- chip_id = pdev->device;
+-#else
+- u8 irq;
+- u32 ioaddr0;
+- pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &ioaddr0);
+- pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+- pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &chip_id);
+- ioaddr = ioaddr0 & ~3;
+- dev->irq = irq;
+-#endif
+- }
+-
+ dev->base_addr = ioaddr;
+- printk(KERN_INFO "%s: SMC EPIC/100 (chip ID %4.4x) at %#3x, IRQ %d, ",
+- dev->name, chip_id, ioaddr, dev->irq);
++ dev->irq = irq;
++ printk(KERN_INFO "%s: %s at %#lx, %2.2x:%2.2x IRQ %d, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
++ pci_bus_number(pdev), pci_devfn(pdev)>>3, dev->irq);
+
+ /* Bring the chip out of low-power mode. */
+ outl(0x4200, ioaddr + GENCTL);
+- /* Magic?! If we don't set this bit the MII interface won't work. */
++ /* Magic from SMSC app note 7.15 */
+ outl(0x0008, ioaddr + TEST1);
+
+ /* Turn on the MII transceiver. */
+ outl(0x12, ioaddr + MIICfg);
+- if (chip_id == 6)
++ if (pci_id_tbl[chip_idx].drv_flags & NO_MII)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ outl(0x0200, ioaddr + GENCTL);
+
+- /* This could also be read from the EEPROM. */
++ if (((1 << debug) - 1) & NETIF_MSG_MISC) {
++ printk(KERN_DEBUG "%s: EEPROM contents\n", dev->name);
++ for (i = 0; i < 64; i++)
++ printk(" %4.4x%s", read_eeprom(ioaddr, i),
++ i % 16 == 15 ? "\n" : "");
++ }
++
++ /* Note: the '175 does not have a serial EEPROM. */
+ for (i = 0; i < 3; i++)
+- ((u16 *)dev->dev_addr)[i] = inw(ioaddr + LAN0 + i*4);
++ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(inw(ioaddr + LAN0 + i*4));
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+- if (epic_debug > 1) {
+- printk(KERN_DEBUG "%s: EEPROM contents\n", dev->name);
+- for (i = 0; i < 64; i++)
+- printk(" %4.4x%s", read_eeprom(ioaddr, i),
+- i % 16 == 15 ? "\n" : "");
+- }
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*ep) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+- request_region(ioaddr, EPIC_TOTAL_SIZE, "SMC EPIC/100");
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+- /* The data structures must be quadword aligned. */
+- ep = kmalloc(sizeof(*ep), GFP_KERNEL | GFP_DMA);
++ dev->priv = ep = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(ep, 0, sizeof(*ep));
+- dev->priv = ep;
++ ep->priv_addr = priv_mem;
+
+ ep->next_module = root_epic_dev;
+ root_epic_dev = dev;
+
+- ep->pci_bus = bus;
+- ep->pci_dev_fn = devfn;
+- ep->chip_id = chip_id;
++ ep->pci_dev = pdev;
++ ep->chip_id = chip_idx;
++ ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
++ ep->msg_level = (1 << debug) - 1;
++ ep->rx_copybreak = rx_copybreak;
++ ep->max_interrupt_work = max_interrupt_work;
++ ep->multicast_filter_limit = multicast_filter_limit;
++
++ /* The lower four bits are non-TP media types. */
++ if (option > 0) {
++ if (option & 0x220)
++ ep->duplex_lock = ep->full_duplex = 1;
++ ep->default_port = option & 0xFFFF;
++ ep->medialock = 1;
++ }
++ if (duplex) {
++ ep->duplex_lock = ep->full_duplex = 1;
++ printk(KERN_INFO "%s: Forced full duplex operation requested.\n",
++ dev->name);
++ }
++ dev->if_port = ep->default_port;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+- takes too much time. */
++ takes much time and no cards have external MII. */
+ {
+- int phy, phy_idx;
+- for (phy = 1, phy_idx = 0; phy < 32 && phy_idx < sizeof(ep->phys);
+- phy++) {
+- int mii_status = mdio_read(ioaddr, phy, 1);
+- if (mii_status != 0xffff && mii_status != 0x0000) {
++ int phy, phy_idx = 0;
++ for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
+ ep->phys[phy_idx++] = phy;
+- printk(KERN_INFO "%s: MII transceiver #%d control "
+- "%4.4x status %4.4x.\n"
+- KERN_INFO "%s: Autonegotiation advertising %4.4x "
+- "link partner %4.4x.\n",
+- dev->name, phy, mdio_read(ioaddr, phy, 0), mii_status,
+- dev->name, mdio_read(ioaddr, phy, 4),
+- mdio_read(ioaddr, phy, 5));
++ printk(KERN_INFO "%s: Located MII transceiver #%d control "
++ "%4.4x status %4.4x.\n",
++ dev->name, phy, mdio_read(dev, phy, 0), mii_status);
+ }
+ }
+- if (phy_idx == 0) {
+- printk(KERN_WARNING "%s: ***WARNING***: No MII transceiver found!\n",
+- dev->name);
+- /* Use the known PHY address of the EPII. */
+- ep->phys[0] = 3;
++ ep->mii_phy_cnt = phy_idx;
++ }
++ if (ep->mii_phy_cnt == 0 && ! (ep->chip_flags & NO_MII)) {
++ printk(KERN_WARNING "%s: ***WARNING***: No MII transceiver found!\n",
++ dev->name);
++ /* Use the known PHY address of the EPII. */
++ ep->phys[0] = 3;
++ }
++
++ if (ep->mii_phy_cnt) {
++ int phy = ep->phys[0];
++ int xcvr = ep->default_port & 0x330;
++ if (xcvr) {
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (xcvr & 0x300 ? 100 : 10),
++ (xcvr & 0x220 ? "full" : "half"));
++ ep->mii_bmcr = xcvr & 0x300 ? 0x2000 : 0; /* 10/100mbps? */
++ ep->mii_bmcr |= xcvr & 0x220 ? 0x0100 : 0; /* duplex */
++ mdio_write(dev, phy, 0, ep->mii_bmcr);
++ } else {
++ ep->mii_bmcr = 0x3000;
++ ep->advertising = mdio_read(dev, phy, 4);
++ printk(KERN_INFO "%s: Autonegotiation advertising %4.4x link "
++ "partner %4.4x.\n",
++ dev->name, ep->advertising, mdio_read(dev, phy, 5));
+ }
+ }
+
++#if EPIC_POWER_SAVE
+ /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
+- if (ep->chip_id == 6)
++ if (ep->chip_flags & MII_PWRDWN)
+ outl(inl(ioaddr + NVCTL) & ~0x483C, ioaddr + NVCTL);
++#endif
+ outl(0x0008, ioaddr + GENCTL);
+
+- /* The lower four bits are the media type. */
+- ep->force_fd = duplex;
+- ep->default_port = option;
+- if (ep->default_port)
+- ep->medialock = 1;
+-
+ /* The Epic-specific entries in the device structure. */
+ dev->open = &epic_open;
+ dev->hard_start_xmit = &epic_start_xmit;
+@@ -522,14 +521,10 @@
+ #define EE_ENB (0x0001 | EE_CS)
+
+ /* Delay between EEPROM clock transitions.
+- No extra delay is needed with 33Mhz PCI, but 66Mhz is untested.
++ This serves to flush the operation to the PCI bus.
+ */
+
+-#ifdef _LINUX_DELAY_H
+-#define eeprom_delay(nanosec) udelay(1)
+-#else
+-#define eeprom_delay(nanosec) do { ; } while (0)
+-#endif
++#define eeprom_delay() inl(ee_addr)
+
+ /* The EEPROM commands include the alway-set leading bit. */
+ #define EE_WRITE_CMD (5 << 6)
+@@ -543,9 +538,8 @@
+ int retval = 0;
+ long ee_addr = ioaddr + EECTL;
+ int read_cmd = location |
+- (inl(ee_addr) & 0x40) ? EE_READ64_CMD : EE_READ256_CMD;
++ (inl(ee_addr) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
+
+- printk("EEctrl is %x.\n", inl(ee_addr));
+ outl(EE_ENB & ~EE_CS, ee_addr);
+ outl(EE_ENB, ee_addr);
+
+@@ -553,18 +547,18 @@
+ for (i = 12; i >= 0; i--) {
+ short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
+ outl(EE_ENB | dataval, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+- eeprom_delay(150);
++ eeprom_delay();
+ }
+ outl(EE_ENB, ee_addr);
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outl(EE_ENB, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+@@ -574,24 +568,34 @@
+
+ #define MII_READOP 1
+ #define MII_WRITEOP 2
+-static int mdio_read(long ioaddr, int phy_id, int location)
++static int mdio_read(struct net_device *dev, int phy_id, int location)
+ {
++ long ioaddr = dev->base_addr;
++ int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
+ int i;
+
+- outl((phy_id << 9) | (location << 4) | MII_READOP, ioaddr + MIICtrl);
+- /* Typical operation takes < 50 ticks. */
+- for (i = 4000; i > 0; i--)
+- if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0)
++ outl(read_cmd, ioaddr + MIICtrl);
++ /* Typical operation takes 25 loops. */
++ for (i = 400; i > 0; i--)
++ if ((inl(ioaddr + MIICtrl) & MII_READOP) == 0) {
++ /* Work around read failure bug. */
++ if (phy_id == 1 && location < 6
++ && inw(ioaddr + MIIData) == 0xffff) {
++ outl(read_cmd, ioaddr + MIICtrl);
++ continue;
++ }
+ return inw(ioaddr + MIIData);
++ }
+ return 0xffff;
+ }
+
+-static void mdio_write(long ioaddr, int phy_id, int location, int value)
++static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
+ {
++ long ioaddr = dev->base_addr;
+ int i;
+
+ outw(value, ioaddr + MIIData);
+- outl((phy_id << 9) | (location << 4) | MII_WRITEOP, ioaddr + MIICtrl);
++ outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
+ for (i = 10000; i > 0; i--) {
+ if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
+ break;
+@@ -600,89 +604,25 @@
+ }
+
+
+-static int
+-epic_open(struct device *dev)
++static int epic_open(struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- int i;
+- int mii_reg5;
+- ep->full_duplex = ep->force_fd;
+-
+- /* Soft reset the chip. */
+- outl(0x4001, ioaddr + GENCTL);
+-
+- if (request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, "SMC EPIC/100", dev))
+- return -EAGAIN;
+
+ MOD_INC_USE_COUNT;
+
+- epic_init_ring(dev);
+-
+- outl(0x4000, ioaddr + GENCTL);
+- /* This next magic! line by Ken Yamaguchi.. ?? */
+- outl(0x0008, ioaddr + TEST1);
+-
+- /* Pull the chip out of low-power mode, enable interrupts, and set for
+- PCI read multiple. The MIIcfg setting and strange write order are
+- required by the details of which bits are reset and the transceiver
+- wiring on the Ositech CardBus card.
+- */
+- outl(0x12, ioaddr + MIICfg);
+- if (ep->chip_id == 6)
+- outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+-
+-#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
+- outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+-#else
+- outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
+-#endif
+-
+- for (i = 0; i < 3; i++)
+- outl(((u16*)dev->dev_addr)[i], ioaddr + LAN0 + i*4);
+-
+- outl(TX_FIFO_THRESH, ioaddr + TxThresh);
+-
+- mii_reg5 = mdio_read(ioaddr, ep->phys[0], 5);
+- if (mii_reg5 != 0xffff) {
+- if ((mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040)
+- ep->full_duplex = 1;
+- else if (! (mii_reg5 & 0x4000))
+- mdio_write(ioaddr, ep->phys[0], 0, 0x1200);
+- if (epic_debug > 1)
+- printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
+- " register read of %4.4x.\n", dev->name,
+- ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
++ if (request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
+ }
+
+- outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+- outl(virt_to_bus(ep->rx_ring), ioaddr + PRxCDAR);
+- outl(virt_to_bus(ep->tx_ring), ioaddr + PTxCDAR);
+-
+- /* Start the chip's Rx process. */
+- set_rx_mode(dev);
+- outl(0x000A, ioaddr + COMMAND);
+-
+- dev->tbusy = 0;
+- dev->interrupt = 0;
+- dev->start = 1;
+-
+- /* Enable interrupts by setting the interrupt mask. */
+- outl((ep->chip_id == 6 ? PCIBusErr175 : PCIBusErr170)
+- | CntFull | TxUnderrun | TxDone
+- | RxError | RxOverflow | RxFull | RxHeader | RxDone,
+- ioaddr + INTMASK);
+-
+- if (epic_debug > 1)
+- printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
+- "%s-duplex.\n",
+- dev->name, ioaddr, dev->irq, inl(ioaddr + GENCTL),
+- ep->full_duplex ? "full" : "half");
++ epic_init_ring(dev);
++ check_media(dev);
++ epic_start(dev, 0);
+
+ /* Set the timer to switch to check for link beat and perhaps switch
+ to an alternate media type. */
+ init_timer(&ep->timer);
+- ep->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
++ ep->timer.expires = jiffies + 3*HZ;
+ ep->timer.data = (unsigned long)dev;
+ ep->timer.function = &epic_timer; /* timer handler */
+ add_timer(&ep->timer);
+@@ -692,7 +632,7 @@
+
+ /* Reset the chip to recover from a PCI transaction error.
+ This may occur at interrupt time. */
+-static void epic_pause(struct device *dev)
++static void epic_pause(struct net_device *dev)
+ {
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+@@ -700,7 +640,7 @@
+ /* Disable interrupts by clearing the interrupt mask. */
+ outl(0x00000000, ioaddr + INTMASK);
+ /* Stop the chip's Tx and Rx DMA processes. */
+- outw(0x0061, ioaddr + COMMAND);
++ outw(StopRx | StopTxDMA | StopRxDMA, ioaddr + COMMAND);
+
+ /* Update the error counts. */
+ if (inw(ioaddr + COMMAND) != 0xffff) {
+@@ -713,214 +653,268 @@
+ epic_rx(dev);
+ }
+
+-static void epic_restart(struct device *dev)
++static void epic_start(struct net_device *dev, int restart)
+ {
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+- printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
+- dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
+- /* Soft reset the chip. */
+- outl(0x0001, ioaddr + GENCTL);
+-
+- udelay(1);
+- /* Duplicate code from epic_open(). */
+- outl(0x0008, ioaddr + TEST1);
+-
+-#if defined(__powerpc__) /* Big endian */
+- outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
++ if (restart) {
++ /* Soft reset the chip. */
++ outl(0x4001, ioaddr + GENCTL);
++ printk(KERN_DEBUG "%s: Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
++ dev->name, ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
++ udelay(1);
++
++ /* This magic is documented in SMSC app note 7.15 */
++ for (i = 16; i > 0; i--)
++ outl(0x0008, ioaddr + TEST1);
++ }
++
++#if defined(__powerpc__) || defined(__sparc__) || defined(__BIG_ENDIAN)
++ ep->genctl = 0x0432 | (RX_FIFO_THRESH<<8);
++#elif defined(__LITTLE_ENDIAN) || defined(__i386__)
++ ep->genctl = 0x0412 | (RX_FIFO_THRESH<<8);
+ #else
+- outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
++#error The byte order of this architecture is not defined.
+ #endif
+- outl(0x12, ioaddr + MIICfg);
+- if (ep->chip_id == 6)
++
++ /* Power and reset the PHY. */
++ if (ep->chip_flags & MII_PWRDWN)
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
++ if (restart) {
++ outl(ep->genctl | 0x4000, ioaddr + GENCTL);
++ inl(ioaddr + GENCTL);
++ }
++ outl(ep->genctl, ioaddr + GENCTL);
++
++ if (dev->if_port == 2 || dev->if_port == 5) { /* 10base2 or AUI */
++ outl(0x13, ioaddr + MIICfg);
++ printk(KERN_INFO "%s: Disabling MII PHY to use 10base2/AUI.\n",
++ dev->name);
++ mdio_write(dev, ep->phys[0], 0, 0x0C00);
++ } else {
++ outl(0x12, ioaddr + MIICfg);
++ mdio_write(dev, ep->phys[0], 0, ep->advertising);
++ mdio_write(dev, ep->phys[0], 0, ep->mii_bmcr);
++ check_media(dev);
++ }
+
+ for (i = 0; i < 3; i++)
+- outl(((u16*)dev->dev_addr)[i], ioaddr + LAN0 + i*4);
++ outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
+
+- outl(TX_FIFO_THRESH, ioaddr + TxThresh);
++ ep->tx_threshold = TX_FIFO_THRESH;
++ outl(ep->tx_threshold, ioaddr + TxThresh);
+ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+- outl(virt_to_bus(&ep->rx_ring[ep->cur_rx%RX_RING_SIZE]), ioaddr + PRxCDAR);
+- outl(virt_to_bus(&ep->tx_ring[ep->dirty_tx%TX_RING_SIZE]),
++ outl(virt_to_bus(&ep->rx_ring[ep->cur_rx % RX_RING_SIZE]),
++ ioaddr + PRxCDAR);
++ outl(virt_to_bus(&ep->tx_ring[ep->dirty_tx % TX_RING_SIZE]),
+ ioaddr + PTxCDAR);
+
+ /* Start the chip's Rx process. */
+ set_rx_mode(dev);
+- outl(0x000A, ioaddr + COMMAND);
++ outl(StartRx | RxQueued, ioaddr + COMMAND);
++
++ if ( ! restart)
++ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+- outl((ep->chip_id == 6 ? PCIBusErr175 : PCIBusErr170)
+- | CntFull | TxUnderrun | TxDone
++ outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
++ | CntFull | TxUnderrun | TxDone | TxEmpty
+ | RxError | RxOverflow | RxFull | RxHeader | RxDone,
+ ioaddr + INTMASK);
+- printk(KERN_DEBUG "%s: epic_restart() done, cmd status %4.4x, ctl %4.4x"
+- " interrupt %4.4x.\n",
+- dev->name, inl(ioaddr + COMMAND), inl(ioaddr + GENCTL),
+- inl(ioaddr + INTSTAT));
++ if (ep->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: epic_start() done, cmd status %4.4x, "
++ "ctl %4.4x interrupt %4.4x.\n",
++ dev->name, (int)inl(ioaddr + COMMAND),
++ (int)inl(ioaddr + GENCTL), (int)inl(ioaddr + INTSTAT));
+ return;
+ }
+
++static void check_media(struct net_device *dev)
++{
++ struct epic_private *ep = (struct epic_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int mii_reg5 = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], 5) : 0;
++ int negotiated = mii_reg5 & ep->advertising;
++ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++
++ if (ep->duplex_lock)
++ return;
++ if (mii_reg5 == 0xffff) /* Bogus read */
++ return;
++ if (ep->full_duplex != duplex) {
++ ep->full_duplex = duplex;
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
++ " partner capability of %4.4x.\n", dev->name,
++ ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
++ outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
++ }
++}
++
+ static void epic_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+- int next_tick = 0;
+- int mii_reg5 = mdio_read(ioaddr, ep->phys[0], 5);
++ int next_tick = 5*HZ;
+
+- if (epic_debug > 3) {
+- printk(KERN_DEBUG "%s: Media selection tick, Tx status %8.8x.\n",
+- dev->name, inl(ioaddr + TxSTAT));
++ if (ep->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media monitor tick, Tx status %8.8x.\n",
++ dev->name, (int)inl(ioaddr + TxSTAT));
+ printk(KERN_DEBUG "%s: Other registers are IntMask %4.4x "
+ "IntStatus %4.4x RxStatus %4.4x.\n",
+- dev->name, inl(ioaddr + INTMASK), inl(ioaddr + INTSTAT),
+- inl(ioaddr + RxSTAT));
+- }
+- if (! ep->force_fd && mii_reg5 != 0xffff) {
+- int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
+- if (ep->full_duplex != duplex) {
+- ep->full_duplex = duplex;
+- printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+- " partner capability of %4.4x.\n", dev->name,
+- ep->full_duplex ? "full" : "half", ep->phys[0], mii_reg5);
+- outl(ep->full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
+- }
+- next_tick = 60*HZ;
++ dev->name, (int)inl(ioaddr + INTMASK),
++ (int)inl(ioaddr + INTSTAT), (int)inl(ioaddr + RxSTAT));
+ }
+
+- if (next_tick) {
+- ep->timer.expires = RUN_AT(next_tick);
+- add_timer(&ep->timer);
++ if (ep->cur_tx - ep->dirty_tx > 1 &&
++ jiffies - dev->trans_start > TX_TIMEOUT) {
++ printk(KERN_WARNING "%s: Tx hung, %d vs. %d.\n",
++ dev->name, ep->cur_tx, ep->dirty_tx);
++ epic_tx_timeout(dev);
+ }
++
++ check_media(dev);
++
++ ep->timer.expires = jiffies + next_tick;
++ add_timer(&ep->timer);
+ }
+
+-static void epic_tx_timeout(struct device *dev)
++static void epic_tx_timeout(struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
++ int tx_status = inw(ioaddr + TxSTAT);
+
+- if (epic_debug > 0) {
+- printk(KERN_WARNING "%s: Transmit timeout using MII device, "
+- "Tx status %4.4x.\n",
+- dev->name, inw(ioaddr + TxSTAT));
+- if (epic_debug > 1) {
+- printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
+- dev->name, ep->dirty_tx, ep->cur_tx);
+- }
+- }
+- if (inw(ioaddr + TxSTAT) & 0x10) { /* Tx FIFO underflow. */
++ printk(KERN_WARNING "%s: EPIC transmit timeout, Tx status %4.4x.\n",
++ dev->name, tx_status);
++ if (ep->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Tx indices: dirty_tx %d, cur_tx %d.\n",
++ dev->name, ep->dirty_tx, ep->cur_tx);
++ if (tx_status & 0x10) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+- /* Restart the transmit process. */
+- outl(0x0080, ioaddr + COMMAND);
++ outl(RestartTx, ioaddr + COMMAND);
++ } else {
++ epic_start(dev, 1);
++ outl(TxQueued, dev->base_addr + COMMAND);
+ }
+
+- /* Perhaps stop and restart the chip's Tx processes . */
+- /* Trigger a transmit demand. */
+- outl(0x0004, dev->base_addr + COMMAND);
+-
+ dev->trans_start = jiffies;
+ ep->stats.tx_errors++;
+ return;
+ }
+
+ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+-static void
+-epic_init_ring(struct device *dev)
++static void epic_init_ring(struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+ ep->tx_full = 0;
+- ep->cur_rx = ep->cur_tx = 0;
+- ep->dirty_rx = ep->dirty_tx = 0;
++ ep->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
++ ep->dirty_tx = ep->cur_tx = 0;
++ ep->cur_rx = ep->dirty_rx = 0;
++ ep->last_rx_time = jiffies;
++ ep->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
+
++ /* Initialize all Rx descriptors. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+- ep->rx_ring[i].status = 0x8000; /* Owned by Epic chip */
+- ep->rx_ring[i].buflength = PKT_BUF_SZ;
+- {
+- /* Note the receive buffer must be longword aligned.
+- dev_alloc_skb() provides 16 byte alignment. But do *not*
+- use skb_reserve() to align the IP header! */
+- struct sk_buff *skb;
+- skb = dev_alloc_skb(PKT_BUF_SZ);
+- ep->rx_skbuff[i] = skb;
+- if (skb == NULL)
+- break; /* Bad news! */
+- skb->dev = dev; /* Mark as being used by this device. */
+- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+- ep->rx_ring[i].bufaddr = virt_to_bus(skb->tail);
+- }
++ ep->rx_ring[i].rxstatus = 0;
++ ep->rx_ring[i].buflength = ep->rx_buf_sz;
+ ep->rx_ring[i].next = virt_to_bus(&ep->rx_ring[i+1]);
++ ep->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+ ep->rx_ring[i-1].next = virt_to_bus(&ep->rx_ring[0]);
+
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz);
++ ep->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ ep->rx_ring[i].bufaddr = virt_to_bus(skb->tail);
++ ep->rx_ring[i].rxstatus = DescOwn;
++ }
++ ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
+ /* The Tx buffer descriptor is filled in as needed, but we
+ do need to clear the ownership bit. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ ep->tx_skbuff[i] = 0;
+- ep->tx_ring[i].status = 0x0000;
++ ep->tx_ring[i].txstatus = 0x0000;
+ ep->tx_ring[i].next = virt_to_bus(&ep->tx_ring[i+1]);
+ }
+ ep->tx_ring[i-1].next = virt_to_bus(&ep->tx_ring[0]);
++ return;
+ }
+
+-static int
+-epic_start_xmit(struct sk_buff *skb, struct device *dev)
++static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+- int entry;
+- u32 flag;
+-
+- /* Block a timer-based transmit from overlapping. This could better be
+- done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start < TX_TIMEOUT)
+- return 1;
+- epic_tx_timeout(dev);
++ int entry, free_count;
++ u32 ctrl_word;
++ unsigned long flags;
++
++ /* Block a timer-based transmit from overlapping. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ epic_tx_timeout(dev);
+ return 1;
+ }
+
+- /* Caution: the write order is important here, set the base address
+- with the "ownership" bits last. */
++ /* Caution: the write order is important here, set the field with the
++ "ownership" bit last. */
+
+ /* Calculate the next Tx descriptor entry. */
++ spin_lock_irqsave(&ep->lock, flags);
++ free_count = ep->cur_tx - ep->dirty_tx;
+ entry = ep->cur_tx % TX_RING_SIZE;
+
+ ep->tx_skbuff[entry] = skb;
+- ep->tx_ring[entry].txlength = (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN);
+ ep->tx_ring[entry].bufaddr = virt_to_bus(skb->data);
+- ep->tx_ring[entry].buflength = skb->len;
+
+- if (ep->cur_tx - ep->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+- flag = 0x10; /* No interrupt */
+- clear_bit(0, (void*)&dev->tbusy);
+- } else if (ep->cur_tx - ep->dirty_tx == TX_RING_SIZE/2) {
+- flag = 0x14; /* Tx-done intr. */
+- clear_bit(0, (void*)&dev->tbusy);
+- } else if (ep->cur_tx - ep->dirty_tx < TX_RING_SIZE - 2) {
+- flag = 0x10; /* No Tx-done intr. */
+- clear_bit(0, (void*)&dev->tbusy);
++ if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
++ ctrl_word = 0x100000; /* No interrupt */
++ } else if (free_count == TX_QUEUE_LEN/2) {
++ ctrl_word = 0x140000; /* Tx-done intr. */
++ } else if (free_count < TX_QUEUE_LEN - 1) {
++ ctrl_word = 0x100000; /* No Tx-done intr. */
+ } else {
+- /* Leave room for two additional entries. */
+- flag = 0x14; /* Tx-done intr. */
+- ep->tx_full = 1;
+- }
++ /* Leave room for an additional entry. */
++ ctrl_word = 0x140000; /* Tx-done intr. */
++ ep->tx_full = 1;
++ }
++ ep->tx_ring[entry].buflength = ctrl_word | skb->len;
++ ep->tx_ring[entry].txstatus =
++ ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
++ | DescOwn;
+
+- ep->tx_ring[entry].control = flag;
+- ep->tx_ring[entry].status = 0x8000; /* Pass ownership to the chip. */
+ ep->cur_tx++;
++ if (ep->tx_full) {
++ /* Check for a just-cleared queue. */
++ if (ep->cur_tx - (volatile int)ep->dirty_tx < TX_QUEUE_LEN - 2) {
++ netif_unpause_tx_queue(dev);
++ ep->tx_full = 0;
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev);
++
++ spin_unlock_irqrestore(&ep->lock, flags);
+ /* Trigger an immediate transmit demand. */
+- outl(0x0004, dev->base_addr + COMMAND);
++ outl(TxQueued, dev->base_addr + COMMAND);
+
+ dev->trans_start = jiffies;
+- if (epic_debug > 4)
++ if (ep->msg_level & NETIF_MSG_TX_QUEUED)
+ printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
+ "flag %2.2x Tx status %8.8x.\n",
+- dev->name, (int)skb->len, entry, flag,
+- inl(dev->base_addr + TxSTAT));
++ dev->name, (int)skb->len, entry, ctrl_word,
++ (int)inl(dev->base_addr + TxSTAT));
+
+ return 0;
+ }
+@@ -929,59 +923,48 @@
+ after the Tx thread. */
+ static void epic_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+ {
+- struct device *dev = (struct device *)dev_instance;
+- struct epic_private *ep;
+- int status, ioaddr, boguscnt = max_interrupt_work;
+-
+- ioaddr = dev->base_addr;
+- ep = (struct epic_private *)dev->priv;
+-
+-#if defined(__i386__)
+- /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+- if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+- printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+- dev->name);
+- dev->interrupt = 0; /* Avoid halting machine. */
+- return;
+- }
+-#else
+- if (dev->interrupt) {
+- printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+- return;
+- }
+- dev->interrupt = 1;
+-#endif
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct epic_private *ep = (struct epic_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int status, boguscnt = max_interrupt_work;
+
+ do {
+ status = inl(ioaddr + INTSTAT);
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ outl(status & 0x00007fff, ioaddr + INTSTAT);
+
+- if (epic_debug > 4)
+- printk("%s: interrupt interrupt=%#8.8x new intstat=%#8.8x.\n",
+- dev->name, status, inl(ioaddr + INTSTAT));
++ if (ep->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status=%#8.8x new "
++ "intstat=%#8.8x.\n",
++ dev->name, status, (int)inl(ioaddr + INTSTAT));
+
+ if ((status & IntrSummary) == 0)
+ break;
+
+- if (status & (RxDone | RxStarted | RxEarlyWarn))
++ if (status & (RxDone | RxStarted | RxEarlyWarn | RxOverflow))
+ epic_rx(dev);
+
+ if (status & (TxEmpty | TxDone)) {
+- int dirty_tx;
++ unsigned int dirty_tx, cur_tx;
+
+- for (dirty_tx = ep->dirty_tx; dirty_tx < ep->cur_tx; dirty_tx++) {
++ /* Note: if this lock becomes a problem we can narrow the locked
++ region at the cost of occasionally grabbing the lock more
++ times. */
++ spin_lock(&ep->lock);
++ cur_tx = ep->cur_tx;
++ dirty_tx = ep->dirty_tx;
++ for (; cur_tx - dirty_tx > 0; dirty_tx++) {
+ int entry = dirty_tx % TX_RING_SIZE;
+- int txstatus = ep->tx_ring[entry].status;
++ int txstatus = ep->tx_ring[entry].txstatus;
+
+- if (txstatus < 0)
++ if (txstatus & DescOwn)
+ break; /* It still hasn't been Txed */
+
+ if ( ! (txstatus & 0x0001)) {
+ /* There was an major error, log it. */
+ #ifndef final_version
+- if (epic_debug > 1)
+- printk("%s: Transmit error, Tx status %8.8x.\n",
++ if (ep->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+ #endif
+ ep->stats.tx_errors++;
+@@ -993,39 +976,44 @@
+ if (txstatus & 0x1000) ep->stats.collisions16++;
+ #endif
+ } else {
++ if (ep->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status "
++ "%8.8x.\n", dev->name, txstatus);
+ #ifdef ETHER_STATS
+ if ((txstatus & 0x0002) != 0) ep->stats.tx_deferred++;
+ #endif
+ ep->stats.collisions += (txstatus >> 8) & 15;
+ ep->stats.tx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ ep->stats.tx_bytes += ep->tx_skbuff[entry]->len;
++#endif
+ }
+
+ /* Free the original skb. */
+- DEV_FREE_SKB(ep->tx_skbuff[entry]);
++ dev_free_skb_irq(ep->tx_skbuff[entry]);
+ ep->tx_skbuff[entry] = 0;
+ }
+
+ #ifndef final_version
+- if (ep->cur_tx - dirty_tx > TX_RING_SIZE) {
+- printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+- dev->name, dirty_tx, ep->cur_tx, ep->tx_full);
++ if (cur_tx - dirty_tx > TX_RING_SIZE) {
++ printk(KERN_WARNING "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
++ dev->name, dirty_tx, cur_tx, ep->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+ #endif
+-
+- if (ep->tx_full && dev->tbusy
+- && dirty_tx > ep->cur_tx - TX_RING_SIZE + 2) {
+- /* The ring is no longer full, clear tbusy. */
+- ep->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
+- }
+-
+ ep->dirty_tx = dirty_tx;
++ if (ep->tx_full
++ && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ ep->tx_full = 0;
++ spin_unlock(&ep->lock);
++ netif_resume_tx_queue(dev);
++ } else
++ spin_unlock(&ep->lock);
+ }
+
+ /* Check uncommon events all at once. */
+- if (status & (CntFull | TxUnderrun | RxOverflow |
++ if (status & (CntFull | TxUnderrun | RxOverflow | RxFull |
+ PCIBusErr170 | PCIBusErr175)) {
+ if (status == 0xffffffff) /* Chip failed or removed (CardBus). */
+ break;
+@@ -1036,60 +1024,65 @@
+
+ if (status & TxUnderrun) { /* Tx FIFO underflow. */
+ ep->stats.tx_fifo_errors++;
+- outl(1536, ioaddr + TxThresh);
++ outl(ep->tx_threshold += 128, ioaddr + TxThresh);
+ /* Restart the transmit process. */
+- outl(0x0080, ioaddr + COMMAND);
++ outl(RestartTx, ioaddr + COMMAND);
+ }
+ if (status & RxOverflow) { /* Missed a Rx frame. */
+ ep->stats.rx_errors++;
+ }
++ if (status & (RxOverflow | RxFull))
++ outw(RxQueued, ioaddr + COMMAND);
+ if (status & PCIBusErr170) {
+ printk(KERN_ERR "%s: PCI Bus Error! EPIC status %4.4x.\n",
+ dev->name, status);
+ epic_pause(dev);
+- epic_restart(dev);
++ epic_start(dev, 1);
+ }
+ /* Clear all error sources. */
+ outl(status & 0x7f18, ioaddr + INTSTAT);
+ }
+ if (--boguscnt < 0) {
+- printk(KERN_ERR "%s: Too much work at interrupt, "
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
+ "IntrStatus=0x%8.8x.\n",
+ dev->name, status);
+ /* Clear all interrupt sources. */
+ outl(0x0001ffff, ioaddr + INTSTAT);
++ /* Ill-advised: Slowly stop emitting this message. */
++ max_interrupt_work++;
+ break;
+ }
+ } while (1);
+
+- if (epic_debug > 3)
+- printk(KERN_DEBUG "%s: exiting interrupt, intr_status=%#4.4x.\n",
+- dev->name, inl(ioaddr + INTSTAT));
++ if (ep->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Exiting interrupt, intr_status=%#4.4x.\n",
++ dev->name, status);
+
+-#if defined(__i386__)
+- clear_bit(0, (void*)&dev->interrupt);
+-#else
+- dev->interrupt = 0;
+-#endif
+ return;
+ }
+
+-static int epic_rx(struct device *dev)
++static int epic_rx(struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int entry = ep->cur_rx % RX_RING_SIZE;
++ int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
+ int work_done = 0;
+
+- if (epic_debug > 4)
++ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In epic_rx(), entry %d %8.8x.\n", entry,
+- ep->rx_ring[entry].status);
++ ep->rx_ring[entry].rxstatus);
+ /* If we own the next entry, it's a new packet. Send it up. */
+- while (ep->rx_ring[entry].status >= 0 && ep->rx_skbuff[entry]) {
+- int status = ep->rx_ring[entry].status;
++ while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
++ int status = ep->rx_ring[entry].rxstatus;
+
+- if (epic_debug > 4)
++ if (ep->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " epic_rx() status was %8.8x.\n", status);
++ if (--rx_work_limit < 0)
++ break;
+ if (status & 0x2006) {
++ if (ep->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG "%s: epic_rx() error status was %8.8x.\n",
++ dev->name, status);
+ if (status & 0x2000) {
+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
+ "multiple buffers, status %4.4x!\n", dev->name, status);
+@@ -1100,22 +1093,30 @@
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+- short pkt_len = ep->rx_ring[entry].rxlength - 4;
++ short pkt_len = (status >> 16) - 4;
+ struct sk_buff *skb;
+
++ if (pkt_len > PKT_BUF_SZ - 4) {
++ printk(KERN_ERR "%s: Oversized Ethernet frame, status %x "
++ "%d bytes.\n",
++ dev->name, pkt_len, status);
++ pkt_len = 1514;
++ }
++ if (ep->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ ", bogus_cnt %d.\n", pkt_len, rx_work_limit);
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+ if (pkt_len < rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+-#if 1 /* USE_IP_COPYSUM */
+- eth_copy_and_sum(skb, bus_to_virt(ep->rx_ring[entry].bufaddr),
+- pkt_len, 0);
++#if 1 /* HAS_IP_COPYSUM */
++ eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ #else
+- memcpy(skb_put(skb, pkt_len),
+- bus_to_virt(ep->rx_ring[entry].bufaddr), pkt_len);
++ memcpy(skb_put(skb, pkt_len), ep->rx_skbuff[entry]->tail,
++ pkt_len);
+ #endif
+ } else {
+ skb_put(skb = ep->rx_skbuff[entry], pkt_len);
+@@ -1124,6 +1125,9 @@
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ ep->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ ep->stats.rx_bytes += pkt_len;
++#endif
+ }
+ work_done++;
+ entry = (++ep->cur_rx) % RX_RING_SIZE;
+@@ -1134,7 +1138,7 @@
+ entry = ep->dirty_rx % RX_RING_SIZE;
+ if (ep->rx_skbuff[entry] == NULL) {
+ struct sk_buff *skb;
+- skb = ep->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
++ skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz);
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+@@ -1142,73 +1146,60 @@
+ ep->rx_ring[entry].bufaddr = virt_to_bus(skb->tail);
+ work_done++;
+ }
+- ep->rx_ring[entry].status = 0x8000;
++ ep->rx_ring[entry].rxstatus = DescOwn;
+ }
+ return work_done;
+ }
+
+-static int epic_close(struct device *dev)
++static int epic_close(struct net_device *dev)
+ {
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- dev->tbusy = 1;
+-
+- if (epic_debug > 1)
+- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+- dev->name, inl(ioaddr + INTSTAT));
++ netif_stop_tx_queue(dev);
+
+- /* Disable interrupts by clearing the interrupt mask. */
+- outl(0x00000000, ioaddr + INTMASK);
+- /* Stop the chip's Tx and Rx DMA processes. */
+- outw(0x0061, ioaddr + COMMAND);
+-
+- /* Update the error counts. */
+- ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+- ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+- ep->stats.rx_crc_errors += inb(ioaddr + CRCCNT);
++ if (ep->msg_level & NETIF_MSG_IFDOWN)
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
++ dev->name, (int)inl(ioaddr + INTSTAT));
+
++ epic_pause(dev);
+ del_timer(&ep->timer);
+-
+ free_irq(dev->irq, dev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = ep->rx_skbuff[i];
+ ep->rx_skbuff[i] = 0;
+- ep->rx_ring[i].status = 0; /* Not owned by Epic chip. */
++ ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
+ ep->rx_ring[i].buflength = 0;
+ ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
+ if (skb) {
+ #if LINUX_VERSION_CODE < 0x20100
+ skb->free = 1;
+ #endif
+- DEV_FREE_SKB(skb);
++ dev_free_skb(skb);
+ }
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (ep->tx_skbuff[i])
+- DEV_FREE_SKB(ep->tx_skbuff[i]);
++ dev_free_skb(ep->tx_skbuff[i]);
+ ep->tx_skbuff[i] = 0;
+ }
+
+-
+ /* Green! Leave the chip in low-power mode. */
+- outl(0x0008, ioaddr + GENCTL);
++ outl(0x440008, ioaddr + GENCTL);
+
+ MOD_DEC_USE_COUNT;
+-
+ return 0;
+ }
+
+-static struct net_device_stats *epic_get_stats(struct device *dev)
++static struct net_device_stats *epic_get_stats(struct net_device *dev)
+ {
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+- if (dev->start) {
++ if (netif_running(dev)) {
+ /* Update the error counts. */
+ ep->stats.rx_missed_errors += inb(ioaddr + MPCNT);
+ ep->stats.rx_frame_errors += inb(ioaddr + ALICNT);
+@@ -1244,16 +1235,16 @@
+ return crc;
+ }
+
+-
+-static void set_rx_mode(struct device *dev)
++static void set_rx_mode(struct net_device *dev)
+ {
+ long ioaddr = dev->base_addr;
+ struct epic_private *ep = (struct epic_private *)dev->priv;
+ unsigned char mc_filter[8]; /* Multicast hash filter */
++ u32 new_rx_mode;
+ int i;
+
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+- outl(0x002C, ioaddr + RxCtrl);
++ new_rx_mode = 0x002C;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+@@ -1262,10 +1253,10 @@
+ is never enabled. */
+ /* Too many to filter perfectly -- accept all multicasts. */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+- outl(0x000C, ioaddr + RxCtrl);
++ new_rx_mode = 0x000C;
+ } else if (dev->mc_count == 0) {
+- outl(0x0004, ioaddr + RxCtrl);
+- return;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ new_rx_mode = 0x0004;
+ } else { /* Never executed, for now. */
+ struct dev_mc_list *mclist;
+
+@@ -1274,6 +1265,11 @@
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+ mc_filter);
++ new_rx_mode = 0x000C;
++ }
++ if (ep->cur_rx_mode != new_rx_mode) {
++ ep->cur_rx_mode = new_rx_mode;
++ outl(new_rx_mode, ioaddr + RxCtrl);
+ }
+ /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+ if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
+@@ -1284,48 +1280,125 @@
+ return;
+ }
+
+-static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd)
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
++ struct epic_private *ep = (void *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+- data[0] = ((struct epic_private *)dev->priv)->phys[0] & 0x1f;
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = ep->phys[0] & 0x1f;
+ /* Fall Through */
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+- if (! dev->start) {
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+- data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
+- if (! dev->start) {
+-#ifdef notdef
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++#if defined(PWRDWN_AFTER_IOCTL)
++ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+-#endif
+ }
++#endif
+ return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+- if (!suser())
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+- if (! dev->start) {
++ if (! netif_running(dev)) {
+ outl(0x0200, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);
+ }
+- mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+- if (! dev->start) {
+-#ifdef notdef
++ if (data[0] == ep->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ ep->duplex_lock = (value & 0x9000) ? 0 : 1;
++ if (ep->duplex_lock)
++ ep->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: ep->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++#if defined(PWRDWN_AFTER_IOCTL)
++ if (! netif_running(dev)) {
+ outl(0x0008, ioaddr + GENCTL);
+ outl((inl(ioaddr + NVCTL) & ~0x483C) | 0x0000, ioaddr + NVCTL);
+-#endif
+ }
++#endif
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = ep->msg_level;
++ data32[1] = ep->multicast_filter_limit;
++ data32[2] = ep->max_interrupt_work;
++ data32[3] = ep->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ ep->msg_level = data32[0];
++ ep->multicast_filter_limit = data32[1];
++ ep->max_interrupt_work = data32[2];
++ ep->rx_copybreak = data32[3];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
++static int epic_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct epic_private *ep = (struct epic_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ if (ep->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_SUSPEND:
++ epic_pause(dev);
++ /* Put the chip into low-power mode. */
++ outl(0x0008, ioaddr + GENCTL);
++ break;
++ case DRV_RESUME:
++ epic_start(dev, 1);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_epic_dev; *devp; devp = next) {
++ next = &((struct epic_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (ep->priv_addr)
++ kfree(ep->priv_addr);
++ kfree(dev);
++ /*MOD_DEC_USE_COUNT;*/
++ break;
++ }
++ }
++
++ return 0;
++}
++
+
+ #ifdef CARDBUS
+
+@@ -1333,19 +1406,33 @@
+
+ static dev_node_t *epic_attach(dev_locator_t *loc)
+ {
+- struct device *dev;
++ struct net_device *dev;
+ u16 dev_id;
+- u32 io;
++ u32 pciaddr;
+ u8 bus, devfn, irq;
++ long ioaddr;
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+- printk(KERN_INFO "epic_attach(bus %d, function %d)\n", bus, devfn);
+- pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &io);
++ printk(KERN_DEBUG "epic_attach(bus %d, function %d)\n", bus, devfn);
++#ifdef USE_IO_OPS
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
++ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++#else
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
++ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_id_tbl[1].io_size);
++#endif
+ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+- io &= ~3;
+- dev = epic_probe1(bus, devfn, NULL, -1);
++ if (ioaddr == 0 || irq == 0) {
++ printk(KERN_ERR "The EPIC/C CardBus Ethernet interface at %d/%d was "
++ "not assigned an %s.\n"
++ KERN_ERR " It will not be activated.\n",
++ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
++ return NULL;
++ }
++ dev = epic_probe1(pci_find_slot(bus, devfn), NULL, ioaddr, irq, 1, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+@@ -1359,7 +1446,7 @@
+
+ static void epic_suspend(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_suspend(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+@@ -1374,19 +1461,19 @@
+ }
+ static void epic_resume(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_resume(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
+- epic_restart(*devp);
++ epic_start(*devp, 1);
+ }
+ }
+ static void epic_detach(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "epic_detach(%s)\n", node->dev_name);
+ for (devp = &root_epic_dev; *devp; devp = next) {
+ next = &((struct epic_private *)(*devp)->priv)->next_module;
+@@ -1394,6 +1481,10 @@
+ }
+ if (*devp) {
+ unregister_netdev(*devp);
++ release_region((*devp)->base_addr, EPIC_TOTAL_SIZE);
++#ifndef USE_IO_OPS
++ iounmap((char *)(*devp)->base_addr);
++#endif
+ kfree(*devp);
+ *devp = *next;
+ kfree(node);
+@@ -1410,48 +1501,58 @@
+
+ #ifdef MODULE
+
+-/* An additional parameter that may be passed in... */
+-static int debug = -1;
+-
+-int
+-init_module(void)
++int init_module(void)
+ {
+- if (debug >= 0)
+- epic_debug = debug;
++ /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s", version);
+
+ #ifdef CARDBUS
+ register_driver(&epic_ops);
+ return 0;
+ #else
+- return epic100_probe(0);
++ return pci_drv_register(&epic_drv_id, NULL);
+ #endif
+ }
+
+-void
+-cleanup_module(void)
++void cleanup_module(void)
+ {
+- struct device *next_dev;
++ struct net_device *next_dev;
+
+ #ifdef CARDBUS
+ unregister_driver(&epic_ops);
++#else
++ pci_drv_unregister(&epic_drv_id);
+ #endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_epic_dev) {
+- next_dev = ((struct epic_private *)root_epic_dev->priv)->next_module;
++ struct epic_private *ep = (struct epic_private *)root_epic_dev->priv;
+ unregister_netdev(root_epic_dev);
+- release_region(root_epic_dev->base_addr, EPIC_TOTAL_SIZE);
++ release_region(root_epic_dev->base_addr, pci_id_tbl[ep->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)root_epic_dev->base_addr);
++#endif
++ next_dev = ep->next_module;
++ if (ep->priv_addr)
++ kfree(ep->priv_addr);
+ kfree(root_epic_dev);
+ root_epic_dev = next_dev;
+ }
+ }
+-
++#else
++int epic100_probe(struct net_device *dev)
++{
++ int retval = pci_drv_register(&epic_drv_id, dev);
++ if (retval >= 0)
++ printk(KERN_INFO "%s", version);
++ return retval;
++}
+ #endif /* MODULE */
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c epic100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c epic100.c -o epic_cb.o -I/usr/src/pcmcia-cs-3.0.5/include/"
++ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c"
++ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c epic100.c -o epic_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/hamachi.c
+===================================================================
+RCS file: linux/src/drivers/net/hamachi.c
+diff -N linux/src/drivers/net/hamachi.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/hamachi.c 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,1315 @@
++/* hamachi.c: A Packet Engines GNIC-II Gigabit Ethernet driver for Linux. */
++/*
++ Written 1998-2002 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ This driver is for the Packet Engines GNIC-II PCI Gigabit Ethernet
++ adapter.
++
++ Support and updates available at
++ http://www.scyld.com/network/hamachi.html
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"hamachi.c:v1.04 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/hamachi.html\n";
++
++/* Automatically extracted configuration info:
++probe-func: hamachi_probe
++config-in: tristate 'Packet Engines "Hamachi" PCI Gigabit Ethernet support' CONFIG_HAMACHI
++c-help-name: Packet Engines "Hamachi" PCI Gigabit Ethernet support
++c-help-symbol: CONFIG_HAMACHI
++c-help: This driver is for the Packet Engines "Hamachi" GNIC-2 Gigabit Ethernet
++c-help: adapter.
++c-help: Usage information and updates are available from
++c-help: http://www.scyld.com/network/hamachi.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 40;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ The Hamachi has a 64 element perfect filter. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* A override for the hardware detection of bus width.
++ Set to 1 to force 32 bit PCI bus detection. Set to 4 to force 64 bit.
++ Add 2 to disable parity detection.
++*/
++static int force32 = 0;
++
++/* Used to pass the media type, etc.
++ These exist for driver interoperability.
++ Only 1 Gigabit is supported by the chip.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ There are no ill effects from too-large receive rings. */
++#define TX_RING_SIZE 64
++#define TX_QUEUE_LEN 60 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 128
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++#include <asm/unaligned.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#if ADDRLEN == 64
++#define virt_to_desc(addr) cpu_to_le64(virt_to_bus(addr))
++#else
++#define virt_to_desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++#endif
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Packet Engines 'Hamachi' GNIC-II Gigabit Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(force32, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to force full duplex, non-negotiated link "
++ "(unused, deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++MODULE_PARM_DESC(force32, "Set to 1 to force 32 bit PCI bus use.");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This device driver is designed for the Packet Engines "Hamachi"
++Gigabit Ethernet chip. The only PCA currently supported is the GNIC-II 64-bit
++66Mhz PCI card.
++
++II. Board-specific settings
++
++No jumpers exist on the board. The chip supports software correction of
++various motherboard wiring errors, however this driver does not support
++that feature.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++The Hamachi uses a typical descriptor based bus-master architecture.
++The descriptor list is similar to that used by the Digital Tulip.
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++
++This driver uses a zero-copy receive and transmit scheme similar my other
++network drivers.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the Hamachi as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack and replaced by a newly allocated skbuff.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. Gigabit cards are typically used on generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets.
++
++IIIb/c. Transmit/Receive Structure
++
++The Rx and Tx descriptor structure are straight-forward, with no historical
++baggage that must be explained. Unlike the awkward DBDMA structure, there
++are no unused fields or option bits that had only one allowable setting.
++
++Two details should be noted about the descriptors: The chip supports both 32
++bit and 64 bit address structures, and the length field is overwritten on
++the receive descriptors. The descriptor length is set in the control word
++for each channel. The development driver uses 32 bit addresses only, however
++64 bit addresses may be enabled for 64 bit architectures e.g. the Alpha.
++
++IIId. Synchronization
++
++This driver is very similar to my other network drivers.
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and other software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'hmp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'hmp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IV. Notes
++
++Thanks to Kim Stearns of Packet Engines for providing a pair of GNIC-II boards.
++
++IVb. References
++
++Hamachi Engineering Design Specification, 5/15/97
++(Note: This version was marked "Confidential".)
++
++IVc. Errata
++
++None noted.
++*/
++
++
++/* The table for PCI detection and activation. */
++
++static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++enum chip_capability_flags { CanHaveMII=1, };
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Packet Engines GNIC-II \"Hamachi\"", { 0x09111318, 0xffffffff,},
++ PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR0 | PCI_ADDR_64BITS, 0x400, 0, },
++ { 0,},
++};
++
++struct drv_id_info hamachi_drv_id = {
++ "hamachi", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ hamachi_probe1, 0,
++};
++
++/* Offsets to the Hamachi registers. Various sizes. */
++enum hamachi_offsets {
++ TxDMACtrl=0x00, TxCmd=0x04, TxStatus=0x06, TxPtr=0x08, TxCurPtr=0x10,
++ RxDMACtrl=0x20, RxCmd=0x24, RxStatus=0x26, RxPtr=0x28, RxCurPtr=0x30,
++ PCIClkMeas=0x060, MiscStatus=0x066, ChipRev=0x68, ChipReset=0x06B,
++ LEDCtrl=0x06C, VirtualJumpers=0x06D,
++ TxChecksum=0x074, RxChecksum=0x076,
++ TxIntrCtrl=0x078, RxIntrCtrl=0x07C,
++ InterruptEnable=0x080, InterruptClear=0x084, IntrStatus=0x088,
++ EventStatus=0x08C,
++ MACCnfg=0x0A0, FrameGap0=0x0A2, FrameGap1=0x0A4,
++ /* See enum MII_offsets below. */
++ MACCnfg2=0x0B0, RxDepth=0x0B8, FlowCtrl=0x0BC, MaxFrameSize=0x0CE,
++ AddrMode=0x0D0, StationAddr=0x0D2,
++ /* Gigabit AutoNegotiation. */
++ ANCtrl=0x0E0, ANStatus=0x0E2, ANXchngCtrl=0x0E4, ANAdvertise=0x0E8,
++ ANLinkPartnerAbility=0x0EA,
++ EECmdStatus=0x0F0, EEData=0x0F1, EEAddr=0x0F2,
++ FIFOcfg=0x0F8,
++};
++
++/* Offsets to the MII-mode registers. */
++enum MII_offsets {
++ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
++ MII_Status=0xAE,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrRxDone=0x01, IntrRxPCIFault=0x02, IntrRxPCIErr=0x04,
++ IntrTxDone=0x100, IntrTxPCIFault=0x200, IntrTxPCIErr=0x400,
++ LinkChange=0x10000, NegotiationChange=0x20000, StatsMax=0x40000, };
++
++/* The Hamachi Rx and Tx buffer descriptors. */
++struct hamachi_desc {
++ u32 status_n_length;
++#if ADDRLEN == 64
++ u32 pad;
++ u64 addr;
++#else
++ u32 addr;
++#endif
++};
++
++/* Bits in hamachi_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
++ DescIntr=0x10000000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct hamachi_private {
++ /* Descriptor rings first for alignment. Tx requires a second descriptor
++ for status. */
++ struct hamachi_desc rx_ring[RX_RING_SIZE];
++ struct hamachi_desc tx_ring[TX_RING_SIZE];
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device *next_module;
++ void *priv_addr; /* Unaligned address for kfree */
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++
++ /* Frequently used and paired value: keep adjacent for cache effect. */
++ int msg_level;
++ int max_interrupt_work;
++ long in_interrupt;
++
++ struct hamachi_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++ int multicast_filter_limit;
++ int rx_mode;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++static int read_eeprom(struct net_device *dev, int location);
++static int mdio_read(long ioaddr, int phy_id, int location);
++static void mdio_write(long ioaddr, int phy_id, int location, int value);
++static int hamachi_open(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++#ifdef HAVE_CHANGE_MTU
++static int change_mtu(struct net_device *dev, int new_mtu);
++#endif
++static void hamachi_timer(unsigned long data);
++static void hamachi_tx_timeout(struct net_device *dev);
++static void hamachi_init_ring(struct net_device *dev);
++static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
++static int hamachi_rx(struct net_device *dev);
++static void hamachi_error(struct net_device *dev, int intr_status);
++static int hamachi_close(struct net_device *dev);
++static struct net_device_stats *hamachi_get_stats(struct net_device *dev);
++static void set_rx_mode(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_hamachi_dev = NULL;
++
++#ifndef MODULE
++int hamachi_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&hamachi_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *hamachi_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct hamachi_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ printk(KERN_INFO "%s: %s type %x at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, (int)readl(ioaddr + ChipRev),
++ ioaddr);
++
++ for (i = 0; i < 6; i++)
++ dev->dev_addr[i] = read_eeprom(dev, 4 + i);
++ /* Alternate: readb(ioaddr + StationAddr + i); */
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ i = readb(ioaddr + PCIClkMeas);
++ printk(KERN_INFO "%s: %d-bit %d Mhz PCI bus (%d), Virtual Jumpers "
++ "%2.2x, LPA %4.4x.\n",
++ dev->name, readw(ioaddr + MiscStatus) & 1 ? 64 : 32,
++ i ? 2000/(i&0x7f) : 0, i&0x7f, (int)readb(ioaddr + VirtualJumpers),
++ (int)readw(ioaddr + ANLinkPartnerAbility));
++
++ /* Hmmm, do we really need to reset the chip???. */
++ writeb(1, ioaddr + ChipReset);
++
++ /* If the bus size is misidentified, do the following. */
++ if (force32)
++ writeb(force32, ioaddr + VirtualJumpers);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_hamachi_dev;
++ root_hamachi_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit =
++ multicast_filter_limit < 64 ? multicast_filter_limit : 64;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* The lower four bits are the media type. */
++ if (option > 0) {
++ if (option & 0x2220)
++ np->full_duplex = 1;
++ np->default_port = option & 15;
++ if (np->default_port & 0x3330)
++ np->medialock = 1;
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++
++ /* The Hamachi-specific entries in the device structure. */
++ dev->open = &hamachi_open;
++ dev->hard_start_xmit = &hamachi_start_xmit;
++ dev->stop = &hamachi_close;
++ dev->get_stats = &hamachi_get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++#ifdef HAVE_CHANGE_MTU
++ dev->change_mtu = change_mtu;
++#endif
++
++ if (np->drv_flags & CanHaveMII) {
++ int phy, phy_idx = 0;
++ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(ioaddr, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(ioaddr, phy, 4);
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ np->mii_cnt = phy_idx;
++ }
++#ifdef notyet
++ /* Disable PCI Parity Error (0x02) or PCI 64 Bit (0x01) for miswired
++ motherboards. */
++ if (readb(ioaddr + VirtualJumpers) != 0x30)
++ writeb(0x33, ioaddr + VirtualJumpers)
++#endif
++ /* Configure gigabit autonegotiation. */
++ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
++ writew(0x08e0, ioaddr + ANAdvertise); /* Set our advertise word. */
++ writew(0x1000, ioaddr + ANCtrl); /* Enable negotiation */
++
++ return dev;
++}
++
++static int read_eeprom(struct net_device *dev, int location)
++{
++ struct hamachi_private *np = (void *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int bogus_cnt = 1000;
++
++ writew(location, ioaddr + EEAddr);
++ writeb(0x02, ioaddr + EECmdStatus);
++ while ((readb(ioaddr + EECmdStatus) & 0x40) && --bogus_cnt > 0)
++ ;
++ if (np->msg_level & NETIF_MSG_MISC)
++ printk(KERN_DEBUG " EEPROM status is %2.2x after %d ticks.\n",
++ (int)readb(ioaddr + EECmdStatus), 1000- bogus_cnt);
++ return readb(ioaddr + EEData);
++}
++
++/* MII Managemen Data I/O accesses.
++ These routines assume the MDIO controller is idle, and do not exit until
++ the command is finished. */
++
++static int mdio_read(long ioaddr, int phy_id, int location)
++{
++ int i;
++
++ writew((phy_id<<8) + location, ioaddr + MII_Addr);
++ writew(1, ioaddr + MII_Cmd);
++ for (i = 10000; i >= 0; i--)
++ if ((readw(ioaddr + MII_Status) & 1) == 0)
++ break;
++ return readw(ioaddr + MII_Rd_Data);
++}
++
++static void mdio_write(long ioaddr, int phy_id, int location, int value)
++{
++ int i;
++
++ writew((phy_id<<8) + location, ioaddr + MII_Addr);
++ writew(value, ioaddr + MII_Wr_Data);
++
++ /* Wait for the command to finish. */
++ for (i = 10000; i >= 0; i--)
++ if ((readw(ioaddr + MII_Status) & 1) == 0)
++ break;
++ return;
++}
++
++
++static int hamachi_open(struct net_device *dev)
++{
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ /* Do we need to reset the chip??? */
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (hmp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: hamachi_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ hamachi_init_ring(dev);
++
++#if ADDRLEN == 64
++ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
++ writel(virt_to_bus(hmp->rx_ring) >> 32, ioaddr + RxPtr + 4);
++ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
++ writel(virt_to_bus(hmp->tx_ring) >> 32, ioaddr + TxPtr + 4);
++#else
++ writel(virt_to_bus(hmp->rx_ring), ioaddr + RxPtr);
++ writel(virt_to_bus(hmp->tx_ring), ioaddr + TxPtr);
++#endif
++
++ for (i = 0; i < 6; i++)
++ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
++
++ /* Initialize other registers: with so many this eventually this will
++ converted to an offset/value list. */
++ /* Configure the FIFO for 512K external, 16K used for Tx. */
++ writew(0x0028, ioaddr + FIFOcfg);
++
++ if (dev->if_port == 0)
++ dev->if_port = hmp->default_port;
++ hmp->in_interrupt = 0;
++
++ /* Setting the Rx mode will start the Rx process. */
++ /* We are always in full-duplex mode with gigabit! */
++ hmp->full_duplex = 1;
++ writew(0x0001, ioaddr + RxChecksum); /* Enable Rx IP partial checksum. */
++ writew(0x8000, ioaddr + MACCnfg); /* Soft reset the MAC */
++ writew(0x215F, ioaddr + MACCnfg);
++ writew(0x000C, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
++ writew(0x1018, ioaddr + FrameGap1);
++ writew(0x2780, ioaddr + MACCnfg2); /* Upper 16 bits control LEDs. */
++ /* Enable automatic generation of flow control frames, period 0xffff. */
++ writel(0x0030FFFF, ioaddr + FlowCtrl);
++ writew(dev->mtu+19, ioaddr + MaxFrameSize); /* hmp->rx_buf_sz ??? */
++
++ /* Enable legacy links. */
++ writew(0x0400, ioaddr + ANXchngCtrl); /* Enable legacy links. */
++ /* Initial Link LED to blinking red. */
++ writeb(0x03, ioaddr + LEDCtrl);
++
++ /* Configure interrupt mitigation. This has a great effect on
++ performance, so systems tuning should start here!. */
++ writel(0x00080000, ioaddr + TxIntrCtrl);
++ writel(0x00000020, ioaddr + RxIntrCtrl);
++
++ hmp->rx_mode = 0; /* Force Rx mode write. */
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ writel(0x80878787, ioaddr + InterruptEnable);
++ writew(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */
++
++ /* Configure and start the DMA channels. */
++ /* Burst sizes are in the low three bits: size = 4<<(val&7) */
++#if ADDRLEN == 64
++ writew(0x0055, ioaddr + RxDMACtrl); /* 128 dword bursts */
++ writew(0x0055, ioaddr + TxDMACtrl);
++#else
++ writew(0x0015, ioaddr + RxDMACtrl);
++ writew(0x0015, ioaddr + TxDMACtrl);
++#endif
++ writew(1, dev->base_addr + RxCmd);
++
++ if (hmp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done hamachi_open(), status: Rx %x Tx %x.\n",
++ dev->name, (int)readw(ioaddr + RxStatus),
++ (int)readw(ioaddr + TxStatus));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&hmp->timer);
++ hmp->timer.expires = jiffies + 3*HZ;
++ hmp->timer.data = (unsigned long)dev;
++ hmp->timer.function = &hamachi_timer; /* timer handler */
++ add_timer(&hmp->timer);
++
++ return 0;
++}
++
++static void hamachi_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (hmp->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_INFO "%s: Hamachi Autonegotiation status %4.4x, LPA "
++ "%4.4x.\n", dev->name, (int)readw(ioaddr + ANStatus),
++ (int)readw(ioaddr + ANLinkPartnerAbility));
++ printk(KERN_INFO "%s: Autonegotiation regs %4.4x %4.4x %4.4x "
++ "%4.4x %4.4x %4.4x.\n", dev->name,
++ (int)readw(ioaddr + 0x0e0),
++ (int)readw(ioaddr + 0x0e2),
++ (int)readw(ioaddr + 0x0e4),
++ (int)readw(ioaddr + 0x0e6),
++ (int)readw(ioaddr + 0x0e8),
++ (int)readw(ioaddr + 0x0eA));
++ }
++ /* This has a small false-trigger window. */
++ if (netif_queue_paused(dev) &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT
++ && hmp->cur_tx - hmp->dirty_tx > 1) {
++ hamachi_tx_timeout(dev);
++ }
++ /* We could do something here... nah. */
++ hmp->timer.expires = jiffies + next_tick;
++ add_timer(&hmp->timer);
++}
++
++static void hamachi_tx_timeout(struct net_device *dev)
++{
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Hamachi transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
++
++ if (hmp->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", hmp->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)hmp->rx_ring[i].status_n_length);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", hmp->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %4.4x", hmp->tx_ring[i].status_n_length);
++ printk("\n");
++ }
++
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++ writew(2, dev->base_addr + TxCmd);
++ writew(1, dev->base_addr + TxCmd);
++ writew(1, dev->base_addr + RxCmd);
++
++ dev->trans_start = jiffies;
++ hmp->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void hamachi_init_ring(struct net_device *dev)
++{
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ int i;
++
++ hmp->tx_full = 0;
++ hmp->cur_rx = hmp->cur_tx = 0;
++ hmp->dirty_rx = hmp->dirty_tx = 0;
++
++ /* Size of each temporary Rx buffer. Add 8 if you do Rx checksumming! */
++ hmp->rx_buf_sz = dev->mtu + 18 + 8;
++ /* Match other driver's allocation size when possible. */
++ if (hmp->rx_buf_sz < PKT_BUF_SZ)
++ hmp->rx_buf_sz = PKT_BUF_SZ;
++ hmp->rx_head_desc = &hmp->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ hmp->rx_ring[i].status_n_length = 0;
++ hmp->rx_skbuff[i] = 0;
++ }
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz);
++ hmp->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ hmp->rx_ring[i].addr = virt_to_desc(skb->tail);
++ hmp->rx_ring[i].status_n_length =
++ cpu_to_le32(DescOwn | DescEndPacket | DescIntr | hmp->rx_buf_sz);
++ }
++ hmp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++ /* Mark the last entry as wrapping the ring. */
++ hmp->rx_ring[i-1].status_n_length |= cpu_to_le32(DescEndRing);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ hmp->tx_skbuff[i] = 0;
++ hmp->tx_ring[i].status_n_length = 0;
++ }
++ return;
++}
++
++static int hamachi_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. This could better be
++ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ hamachi_tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = hmp->cur_tx % TX_RING_SIZE;
++
++ hmp->tx_skbuff[entry] = skb;
++
++ hmp->tx_ring[entry].addr = virt_to_desc(skb->data);
++ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
++ hmp->tx_ring[entry].status_n_length =
++ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | skb->len);
++ else
++ hmp->tx_ring[entry].status_n_length =
++ cpu_to_le32(DescOwn|DescEndPacket | skb->len);
++ hmp->cur_tx++;
++
++ /* Architecture-specific: explicitly flush cache lines here. */
++
++ /* Wake the potentially-idle transmit channel. */
++ writew(1, dev->base_addr + TxCmd);
++
++ if (hmp->cur_tx - hmp->dirty_tx >= TX_QUEUE_LEN - 1) {
++ hmp->tx_full = 1;
++ if (hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 1) {
++ netif_unpause_tx_queue(dev);
++ hmp->tx_full = 0;
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++ dev->trans_start = jiffies;
++
++ if (hmp->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Hamachi transmit frame #%d length %d queued "
++ "in slot %d.\n", dev->name, hmp->cur_tx, (int)skb->len, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void hamachi_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct hamachi_private *hmp;
++ long ioaddr;
++ int boguscnt = max_interrupt_work;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "hamachi_interrupt(): irq %d for unknown device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ hmp = (struct hamachi_private *)dev->priv;
++ if (test_and_set_bit(0, (void*)&hmp->in_interrupt)) {
++ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
++ hmp->in_interrupt = 0; /* Avoid future hang on bug */
++ return;
++ }
++
++ do {
++ u32 intr_status = readl(ioaddr + InterruptClear);
++
++ if (hmp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Hamachi interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0)
++ break;
++
++ if (intr_status & IntrRxDone)
++ hamachi_rx(dev);
++
++ for (; hmp->cur_tx - hmp->dirty_tx > 0; hmp->dirty_tx++) {
++ int entry = hmp->dirty_tx % TX_RING_SIZE;
++ if (!(hmp->tx_ring[entry].status_n_length & cpu_to_le32(DescOwn)))
++ break;
++ if (hmp->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, hmp->tx_ring[entry].status_n_length);
++ /* Free the original skb. */
++ dev_free_skb_irq(hmp->tx_skbuff[entry]);
++ hmp->tx_skbuff[entry] = 0;
++ hmp->stats.tx_packets++;
++ }
++ if (hmp->tx_full
++ && hmp->cur_tx - hmp->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, clear tbusy. */
++ hmp->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status &
++ (IntrTxPCIFault | IntrTxPCIErr | IntrRxPCIFault | IntrRxPCIErr |
++ LinkChange | NegotiationChange | StatsMax))
++ hamachi_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ break;
++ }
++ } while (1);
++
++ if (hmp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++ clear_bit(0, (void*)&hmp->in_interrupt);
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int hamachi_rx(struct net_device *dev)
++{
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ int entry = hmp->cur_rx % RX_RING_SIZE;
++ int boguscnt = hmp->dirty_rx + RX_RING_SIZE - hmp->cur_rx;
++
++ if (hmp->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In hamachi_rx(), entry %d status %4.4x.\n",
++ entry, hmp->rx_ring[entry].status_n_length);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while ( ! (hmp->rx_head_desc->status_n_length & cpu_to_le32(DescOwn))) {
++ struct hamachi_desc *desc = hmp->rx_head_desc;
++ u32 desc_status = le32_to_cpu(desc->status_n_length);
++ u16 data_size = desc_status; /* Implicit truncate */
++ u8 *buf_addr = hmp->rx_skbuff[entry]->tail;
++ s32 frame_status =
++ le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
++
++ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
++ frame_status);
++ if (--boguscnt < 0)
++ break;
++ if ( ! (desc_status & DescEndPacket)) {
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
++ "multiple buffers, entry %#x length %d status %4.4x!\n",
++ dev->name, hmp->cur_rx, data_size, desc_status);
++ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
++ dev->name, desc, &hmp->rx_ring[hmp->cur_rx % RX_RING_SIZE]);
++ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status"
++ " %x last status %x.\n", dev->name,
++ hmp->rx_ring[(hmp->cur_rx+1) % RX_RING_SIZE].status_n_length,
++ hmp->rx_ring[(hmp->cur_rx-1) % RX_RING_SIZE].status_n_length);
++ hmp->stats.rx_length_errors++;
++ } /* else Omit for prototype errata??? */
++ if (frame_status & 0x00380000) {
++ /* There was a error. */
++ if (hmp->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " hamachi_rx() Rx error was %8.8x.\n",
++ frame_status);
++ hmp->stats.rx_errors++;
++ if (frame_status & 0x00600000) hmp->stats.rx_length_errors++;
++ if (frame_status & 0x00080000) hmp->stats.rx_frame_errors++;
++ if (frame_status & 0x00100000) hmp->stats.rx_crc_errors++;
++ if (frame_status < 0) hmp->stats.rx_dropped++;
++ } else {
++ struct sk_buff *skb;
++ u16 pkt_len = (frame_status & 0x07ff) - 4; /* Omit CRC */
++
++#if ! defined(final_version) && 0
++ if (hmp->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " hamachi_rx() normal Rx pkt length %d"
++ " of %d, bogus_cnt %d.\n",
++ pkt_len, data_size, boguscnt);
++ if (hmp->msg_level & NETIF_MSG_PKTDATA)
++ printk(KERN_DEBUG"%s: rx status %8.8x %8.8x %8.8x %8.8x %8.8x.\n",
++ dev->name,
++ *(s32*)&(buf_addr[data_size - 20]),
++ *(s32*)&(buf_addr[data_size - 16]),
++ *(s32*)&(buf_addr[data_size - 12]),
++ *(s32*)&(buf_addr[data_size - 8]),
++ *(s32*)&(buf_addr[data_size - 4]));
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++ eth_copy_and_sum(skb, hmp->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++ } else {
++ char *temp = skb_put(skb = hmp->rx_skbuff[entry], pkt_len);
++ hmp->rx_skbuff[entry] = NULL;
++#if ! defined(final_version)
++ if (bus_to_virt(desc->addr) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in hamachi_rx: %p vs. %p / %p.\n",
++ dev->name, bus_to_virt(desc->addr),
++ skb->head, temp);
++#endif
++ }
++ skb->protocol = eth_type_trans(skb, dev);
++ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ hmp->stats.rx_packets++;
++ }
++ entry = (++hmp->cur_rx) % RX_RING_SIZE;
++ hmp->rx_head_desc = &hmp->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; hmp->cur_rx - hmp->dirty_rx > 0; hmp->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = hmp->dirty_rx % RX_RING_SIZE;
++ if (hmp->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(hmp->rx_buf_sz);
++ hmp->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
++ hmp->rx_ring[entry].addr = virt_to_desc(skb->tail);
++ }
++ if (entry >= RX_RING_SIZE-1) /* Wrap ring */
++ hmp->rx_ring[entry].status_n_length =
++ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing|DescIntr | hmp->rx_buf_sz);
++ else
++ hmp->rx_ring[entry].status_n_length =
++ cpu_to_le32(DescOwn|DescEndPacket|DescIntr | hmp->rx_buf_sz);
++ }
++
++ /* Restart Rx engine if stopped. */
++ writew(1, dev->base_addr + RxCmd);
++ return 0;
++}
++
++/* This is more properly named "uncommon interrupt events", as it covers more
++ than just errors. */
++static void hamachi_error(struct net_device *dev, int intr_status)
++{
++ long ioaddr = dev->base_addr;
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++
++ if (intr_status & (LinkChange|NegotiationChange)) {
++ if (hmp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Link changed: AutoNegotiation Ctrl"
++ " %4.4x, Status %4.4x %4.4x Intr status %4.4x.\n",
++ dev->name, (int)readw(ioaddr + 0x0E0),
++ (int)readw(ioaddr + 0x0E2),
++ (int)readw(ioaddr + ANLinkPartnerAbility),
++ (int)readl(ioaddr + IntrStatus));
++ if (readw(ioaddr + ANStatus) & 0x20) {
++ writeb(0x01, ioaddr + LEDCtrl);
++ netif_link_up(dev);
++ } else {
++ writeb(0x03, ioaddr + LEDCtrl);
++ netif_link_down(dev);
++ }
++ }
++ if (intr_status & StatsMax) {
++ hamachi_get_stats(dev);
++ /* Read the overflow bits to clear. */
++ readl(ioaddr + 0x36C);
++ readl(ioaddr + 0x3F0);
++ }
++ if ((intr_status & ~(LinkChange|StatsMax|NegotiationChange))
++ && (hmp->msg_level & NETIF_MSG_DRV))
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
++ hmp->stats.tx_fifo_errors++;
++ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
++ hmp->stats.rx_fifo_errors++;
++}
++
++static int hamachi_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
++ "Rx %4.4x Int %2.2x.\n",
++ dev->name, (int)readw(ioaddr + TxStatus),
++ (int)readw(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, hmp->cur_tx, hmp->dirty_tx, hmp->cur_rx,
++ hmp->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(0x0000, ioaddr + InterruptEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(2, ioaddr + RxCmd);
++ writew(2, ioaddr + TxCmd);
++
++ del_timer(&hmp->timer);
++
++#ifdef __i386__
++ if (hmp->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(hmp->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %c #%d desc. %8.8x %8.8x.\n",
++ readl(ioaddr + TxCurPtr) == (long)&hmp->tx_ring[i] ? '>' : ' ',
++ i, hmp->tx_ring[i].status_n_length, hmp->tx_ring[i].addr);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(hmp->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x\n",
++ readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
++ i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
++ if (*(u8*)hmp->rx_ring[i].addr != 0x69) {
++ int j;
++ for (j = 0; j < 0x50; j++)
++ printk(" %4.4x", ((u16*)hmp->rx_ring[i].addr)[j]);
++ printk("\n");
++ }
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ hmp->rx_ring[i].status_n_length = 0;
++ hmp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
++ if (hmp->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ hmp->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(hmp->rx_skbuff[i]);
++ }
++ hmp->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (hmp->tx_skbuff[i])
++ dev_free_skb(hmp->tx_skbuff[i]);
++ hmp->tx_skbuff[i] = 0;
++ }
++
++ writeb(0x00, ioaddr + LEDCtrl);
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static struct net_device_stats *hamachi_get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct hamachi_private *hmp = (struct hamachi_private *)dev->priv;
++
++ /* We should lock this segment of code for SMP eventually, although
++ the vulnerability window is very small and statistics are
++ non-critical. */
++#if LINUX_VERSION_CODE >= 0x20119
++ hmp->stats.rx_bytes += readl(ioaddr + 0x330); /* Total Uni+Brd+Multi */
++ hmp->stats.tx_bytes += readl(ioaddr + 0x3B0); /* Total Uni+Brd+Multi */
++#endif
++ hmp->stats.multicast += readl(ioaddr + 0x320); /* Multicast Rx */
++
++ hmp->stats.rx_length_errors += readl(ioaddr + 0x368); /* Over+Undersized */
++ hmp->stats.rx_over_errors += readl(ioaddr + 0x35C); /* Jabber */
++ hmp->stats.rx_crc_errors += readl(ioaddr + 0x360);
++ hmp->stats.rx_frame_errors += readl(ioaddr + 0x364); /* Symbol Errs */
++ hmp->stats.rx_missed_errors += readl(ioaddr + 0x36C); /* Dropped */
++
++ return &hmp->stats;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ struct hamachi_private *np = (void *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int new_rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ new_rx_mode = 0x000F;
++ } else if (dev->mc_count > np->multicast_filter_limit ||
++ (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ new_rx_mode = 0x000B;
++ } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
++ struct dev_mc_list *mclist;
++ int i;
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
++ writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
++ ioaddr + 0x104 + i*8);
++ }
++ /* Clear remaining entries. */
++ for (; i < 64; i++)
++ writel(0, ioaddr + 0x104 + i*8);
++ new_rx_mode = 0x0003;
++ } else { /* Normal, unicast/broadcast-only mode. */
++ new_rx_mode = 0x0001;
++ }
++ if (np->rx_mode != new_rx_mode) {
++ np->rx_mode = new_rx_mode;
++ writew(new_rx_mode, ioaddr + AddrMode);
++ }
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct hamachi_private *np = (void *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ /* We are always full duplex. Skip recording the advertised value. */
++ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS: {
++ /* Set rx,tx intr params, from Eric Kasten. */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->max_interrupt_work = data32[2];
++ writel(data32[1], dev->base_addr + TxIntrCtrl);
++ writel(data32[3], dev->base_addr + RxIntrCtrl);
++ printk(KERN_INFO "%s: Set interrupt mitigate paramters tx %08x, "
++ "rx %08x.\n", dev->name,
++ (int) readl(dev->base_addr + TxIntrCtrl),
++ (int) readl(dev->base_addr + RxIntrCtrl));
++ return 0;
++ }
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++#ifdef HAVE_CHANGE_MTU
++static int change_mtu(struct net_device *dev, int new_mtu)
++{
++ if ((new_mtu < 68) || (new_mtu > 1536))
++ return -EINVAL;
++ if (netif_running(dev))
++ return -EBUSY;
++ printk(KERN_NOTICE "%s: Changing MTU to %d.\n", dev->name, new_mtu);
++ dev->mtu = new_mtu;
++ return 0;
++}
++#endif
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&hamachi_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&hamachi_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_hamachi_dev) {
++ struct hamachi_private *hmp = (void *)(root_hamachi_dev->priv);
++ unregister_netdev(root_hamachi_dev);
++ iounmap((char *)root_hamachi_dev->base_addr);
++ next_dev = hmp->next_module;
++ if (hmp->priv_addr)
++ kfree(hmp->priv_addr);
++ kfree(root_hamachi_dev);
++ root_hamachi_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` hamachi.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c hamachi.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c hamachi.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/intel-gige.c
+===================================================================
+RCS file: linux/src/drivers/net/intel-gige.c
+diff -N linux/src/drivers/net/intel-gige.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/intel-gige.c 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,1451 @@
++/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
++/*
++ Written 2000-2002 by Donald Becker.
++ Copyright Scyld Computing Corporation.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ You should have received a copy of the GPL with this file.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/ethernet.html
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"intel-gige.c:v0.14 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/ethernet.html\n";
++
++/* Automatically extracted configuration info:
++probe-func: igige_probe
++config-in: tristate 'Intel PCI Gigabit Ethernet support' CONFIG_IGIGE
++
++c-help-name: Intel PCI Gigabit Ethernet support
++c-help-symbol: CONFIG_IGIGE
++c-help: This driver is for the Intel PCI Gigabit Ethernet
++c-help: adapter series.
++c-help: More specific information and updates are available from
++c-help: http://www.scyld.com/network/drivers.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ This chip has a 16 element perfect filter, and an unusual 4096 bit
++ hash filter based directly on address bits, not the Ethernet CRC.
++ It is costly to recalculate a large, frequently changing table.
++ However even a large table may useful in some nearly-static environments.
++*/
++static int multicast_filter_limit = 15;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ The media type is passed in 'options[]'. The full_duplex[] table only
++ allows the duplex to be forced on, implicitly disabling autonegotiation.
++ Setting the entry to zero still allows a link to autonegotiate to full
++ duplex.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* The delay before announcing a Rx or Tx has completed. */
++static int rx_intr_holdoff = 0;
++static int tx_intr_holdoff = 128;
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two to avoid divides.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ There are no ill effects from too-large receive rings. */
++#if ! defined(final_version) /* Stress the driver. */
++#define TX_RING_SIZE 8
++#define TX_QUEUE_LEN 5
++#define RX_RING_SIZE 4
++#else
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 32
++#endif
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Intel Gigabit Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to set forced full duplex (deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is for the Intel Gigabit Ethernet adapter.
++
++II. Board-specific settings
++
++III. Driver operation
++
++IIIa. Descriptor Rings
++
++This driver uses two statically allocated fixed-size descriptor arrays
++treated as rings by the hardware. The ring sizes are set at compile time
++by RX/TX_RING_SIZE.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that the IP header at offset 14 in an
++ethernet frame isn't longword aligned for further processing.
++When unaligned buffers are permitted by the hardware (and always on copies)
++frames are put into the skbuff at an offset of "+2", 16-byte aligning
++the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control.
++One is the send-packet routine which is single-threaded by the queue
++layer. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring. At the
++start of a transmit attempt netif_pause_tx_queue(dev) is called. If the
++transmit attempt fills the Tx queue controlled by the chip, the driver
++informs the software queue layer by not calling
++netif_unpause_tx_queue(dev) on exit.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IIId. SMP semantics
++
++The following are serialized with respect to each other via the "xmit_lock".
++ dev->hard_start_xmit() Transmit a packet
++ dev->tx_timeout() Transmit watchdog for stuck Tx
++ dev->set_multicast_list() Set the recieve filter.
++Note: The Tx timeout watchdog code is implemented by the timer routine in
++kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
++driver interface.
++
++The following fall under the global kernel lock. The module will not be
++unloaded during the call, unless a call with a potential reschedule e.g.
++kmalloc() is called. No other synchronization assertion is made.
++ dev->open()
++ dev->do_ioctl()
++ dev->get_stats()
++Caution: The lock for dev->open() is commonly broken with request_irq() or
++kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
++get_stats(), or additional module locking code must be implemented.
++
++The following is self-serialized (no simultaneous entry)
++ An handler registered with request_irq().
++
++IV. Notes
++
++IVb. References
++
++Intel has also released a Linux driver for this product, "e1000".
++
++IVc. Errata
++
++*/
++
++
++
++static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int netdev_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags { CanHaveMII=1, };
++#define PCI_IOTYPE ()
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Intel Gigabit Ethernet adapter", {0x10008086, 0xffffffff, },
++ PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0, 0x1ffff, 0},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info igige_drv_id = {
++ "intel-gige", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ igige_probe1, netdev_pwr_event };
++
++/* This hardware only has a PCI memory space BAR, not I/O space. */
++#ifdef USE_IO_OPS
++#error This driver only works with PCI memory space access.
++#endif
++
++/* Offsets to the device registers.
++*/
++enum register_offsets {
++ ChipCtrl=0x00, ChipStatus=0x08, EECtrl=0x10,
++ FlowCtrlAddrLo=0x028, FlowCtrlAddrHi=0x02c, FlowCtrlType=0x030,
++ VLANetherType=0x38,
++
++ RxAddrCAM=0x040,
++ IntrStatus=0x0C0, /* Interrupt, Clear on Read, AKA ICR */
++ IntrEnable=0x0D0, /* Set enable mask when '1' AKA IMS */
++ IntrDisable=0x0D8, /* Clear enable mask when '1' */
++
++ RxControl=0x100,
++ RxQ0IntrDelay=0x108, /* Rx list #0 interrupt delay timer. */
++ RxRingPtr=0x110, /* Rx Desc. list #0 base address, 64bits */
++ RxRingLen=0x118, /* Num bytes of Rx descriptors in ring. */
++ RxDescHead=0x120,
++ RxDescTail=0x128,
++
++ RxQ1IntrDelay=0x130, /* Rx list #1 interrupt delay timer. */
++ RxRing1Ptr=0x138, /* Rx Desc. list #1 base address, 64bits */
++ RxRing1Len=0x140, /* Num bytes of Rx descriptors in ring. */
++ RxDesc1Head=0x148,
++ RxDesc1Tail=0x150,
++
++ FlowCtrlTimer=0x170, FlowCtrlThrshHi=0x160, FlowCtrlThrshLo=0x168,
++ TxConfigReg=0x178,
++ RxConfigReg=0x180,
++ MulticastArray=0x200,
++
++ TxControl=0x400,
++ TxQState=0x408, /* 64 bit queue state */
++ TxIPG=0x410, /* Inter-Packet Gap */
++ TxRingPtr=0x420, TxRingLen=0x428,
++ TxDescHead=0x430, TxDescTail=0x438, TxIntrDelay=0x440,
++
++ RxCRCErrs=0x4000, RxMissed=0x4010,
++
++ TxStatus=0x408,
++ RxStatus=0x180,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrTxDone=0x0001, /* Tx packet queued */
++ IntrLinkChange=0x0004, /* Link Status Change */
++ IntrRxSErr=0x0008, /* Rx Symbol/Sequence error */
++ IntrRxEmpty=0x0010, /* Rx queue 0 Empty */
++ IntrRxQ1Empty=0x0020, /* Rx queue 1 Empty */
++ IntrRxDone=0x0080, /* Rx Done, Queue 0*/
++ IntrRxDoneQ1=0x0100, /* Rx Done, Queue 0*/
++ IntrPCIErr=0x0200, /* PCI Bus Error */
++
++ IntrTxEmpty=0x0002, /* Guess */
++ StatsMax=0x1000, /* Unknown */
++};
++
++/* Bits in the RxFilterMode register. */
++enum rx_mode_bits {
++ RxCtrlReset=0x01, RxCtrlEnable=0x02, RxCtrlAllUnicast=0x08,
++ RxCtrlAllMulticast=0x10,
++ RxCtrlLoopback=0xC0, /* We never configure loopback */
++ RxCtrlAcceptBroadcast=0x8000,
++ /* Aliased names.*/
++ AcceptAllPhys=0x08, AcceptAllMulticast=0x10, AcceptBroadcast=0x8000,
++ AcceptMyPhys=0,
++ AcceptMulticast=0,
++};
++
++/* The Rx and Tx buffer descriptors. */
++struct rx_desc {
++ u32 buf_addr;
++ u32 buf_addr_hi;
++ u32 csum_length; /* Checksum and length */
++ u32 status; /* Errors and status. */
++};
++
++struct tx_desc {
++ u32 buf_addr;
++ u32 buf_addr_hi;
++ u32 cmd_length;
++ u32 status; /* And errors */
++};
++
++/* Bits in tx_desc.cmd_length */
++enum tx_cmd_bits {
++ TxDescEndPacket=0x02000000, TxCmdIntrDelay=0x80000000,
++ TxCmdAddCRC=0x02000000, TxCmdDoTx=0x13000000,
++};
++enum tx_status_bits {
++ TxDescDone=0x0001, TxDescEndPkt=0x0002,
++};
++
++/* Bits in tx_desc.status */
++enum rx_status_bits {
++ RxDescDone=0x0001, RxDescEndPkt=0x0002,
++};
++
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
++ within the structure. */
++struct netdev_private {
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Keep frequently used values adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ int max_interrupt_work;
++ int intr_enable;
++ long in_interrupt; /* Word-long for SMP locks. */
++
++ struct rx_desc *rx_ring;
++ struct rx_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ struct tx_desc *tx_ring;
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++
++ unsigned int rx_mode;
++ unsigned int tx_config;
++ int multicast_filter_limit;
++ /* These values track the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++};
++
++static int eeprom_read(long ioaddr, int location);
++static int netdev_open(struct net_device *dev);
++static int change_mtu(struct net_device *dev, int new_mtu);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++/* You *must* rename this! */
++int skel_netdev_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&igige_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ for (i = 0; i < 3; i++)
++ ((u16*)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ /* Do bogusness checks before this point.
++ We do a request_region() only to register /proc/ioports info. */
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(0x04000000, ioaddr + ChipCtrl);
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* The lower four bits are the media type. */
++ if (option > 0) {
++ if (option & 0x2220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3330;
++ if (np->default_port)
++ np->medialock = 1;
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex)
++ np->duplex_lock = 1;
++
++#if ! defined(final_version) /* Dump the EEPROM contents during development. */
++ if (np->msg_level & NETIF_MSG_MISC) {
++ int sum = 0;
++ for (i = 0; i < 0x40; i++) {
++ int eeval = eeprom_read(ioaddr, i);
++ printk("%4.4x%s", eeval, i % 16 != 15 ? " " : "\n");
++ sum += eeval;
++ }
++ printk(KERN_DEBUG "%s: EEPROM checksum %4.4X (expected value 0xBABA).\n",
++ dev->name, sum & 0xffff);
++ }
++#endif
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++ dev->change_mtu = &change_mtu;
++
++ /* Turn off VLAN and clear the VLAN filter. */
++ writel(0x04000000, ioaddr + VLANetherType);
++ for (i = 0x600; i < 0x800; i+=4)
++ writel(0, ioaddr + i);
++ np->tx_config = 0x80000020;
++ writel(np->tx_config, ioaddr + TxConfigReg);
++ {
++ int eeword10 = eeprom_read(ioaddr, 10);
++ writel(((eeword10 & 0x01e0) << 17) | ((eeword10 & 0x0010) << 3),
++ ioaddr + ChipCtrl);
++ }
++
++ return dev;
++}
++
++
++/* Read the EEPROM interface with a serial bit streams generated by the
++ host processor.
++ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
++
++/* Delay between EEPROM clock transitions.
++ The effectivly flushes the write cache to prevent quick double-writes.
++*/
++#define eeprom_delay(ee_addr) readl(ee_addr)
++
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x01, EE_ChipSelect=0x02, EE_DataIn=0x08, EE_DataOut=0x04,
++};
++#define EE_Write0 (EE_ChipSelect)
++#define EE_Write1 (EE_ChipSelect | EE_DataOut)
++
++/* The EEPROM commands include the alway-set leading bit. */
++enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
++
++static int eeprom_read(long addr, int location)
++{
++ int i;
++ int retval = 0;
++ long ee_addr = addr + EECtrl;
++ int read_cmd = ((EE_ReadCmd<<6) | location) << 16 ;
++ int cmd_len = 2+6+16;
++ u32 baseval = readl(ee_addr) & ~0x0f;
++
++ writel(EE_Write0 | baseval, ee_addr);
++
++ /* Shift the read command bits out. */
++ for (i = cmd_len; i >= 0; i--) {
++ int dataval = baseval |
++ ((read_cmd & (1 << i)) ? EE_Write1 : EE_Write0);
++ writel(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ writel(dataval | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(baseval | EE_Write0, ee_addr);
++ writel(baseval & ~EE_ChipSelect, ee_addr);
++ return retval;
++}
++
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ /* Some chips may need to be reset. */
++
++ MOD_INC_USE_COUNT;
++
++ if (np->tx_ring == 0)
++ np->tx_ring = (void *)get_free_page(GFP_KERNEL);
++ if (np->tx_ring == 0)
++ return -ENOMEM;
++ if (np->rx_ring == 0)
++ np->rx_ring = (void *)get_free_page(GFP_KERNEL);
++ if (np->tx_ring == 0) {
++ free_page((long)np->tx_ring);
++ return -ENOMEM;
++ }
++
++ /* Note that both request_irq() and init_ring() call kmalloc(), which
++ break the global kernel lock protecting this routine. */
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++ writel(0, ioaddr + RxControl);
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++#if ADDRLEN == 64
++ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
++#else
++ writel(0, ioaddr + RxRingPtr + 4);
++#endif
++
++ writel(RX_RING_SIZE * sizeof(struct rx_desc), ioaddr + RxRingLen);
++ writel(0x80000000 | rx_intr_holdoff, ioaddr + RxQ0IntrDelay);
++ writel(0, ioaddr + RxDescHead);
++ writel(np->dirty_rx + RX_RING_SIZE, ioaddr + RxDescTail);
++
++ /* Zero the unused Rx ring #1. */
++ writel(0, ioaddr + RxQ1IntrDelay);
++ writel(0, ioaddr + RxRing1Ptr);
++ writel(0, ioaddr + RxRing1Ptr + 4);
++ writel(0, ioaddr + RxRing1Len);
++ writel(0, ioaddr + RxDesc1Head);
++ writel(0, ioaddr + RxDesc1Tail);
++
++ /* Use 0x002000FA for half duplex. */
++ writel(0x000400FA, ioaddr + TxControl);
++
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++#if ADDRLEN == 64
++ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
++#else
++ writel(0, ioaddr + TxRingPtr + 4);
++#endif
++
++ writel(TX_RING_SIZE * sizeof(struct tx_desc), ioaddr + TxRingLen);
++ writel(0, ioaddr + TxDescHead);
++ writel(0, ioaddr + TxDescTail);
++ writel(0, ioaddr + TxQState);
++ writel(0, ioaddr + TxQState + 4);
++
++ /* Set IPG register with Ethernet standard values. */
++ writel(0x00A0080A, ioaddr + TxIPG);
++ /* The delay before announcing a Tx has completed. */
++ writel(tx_intr_holdoff, ioaddr + TxIntrDelay);
++
++ writel(((u32*)dev->dev_addr)[0], ioaddr + RxAddrCAM);
++ writel(0x80000000 | ((((u32*)dev->dev_addr)[1]) & 0xffff),
++ ioaddr + RxAddrCAM + 4);
++
++ /* Initialize other registers. */
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->in_interrupt = 0;
++
++ np->rx_mode = RxCtrlEnable;
++ set_rx_mode(dev);
++
++ /* Tx mode */
++ np->tx_config = 0x80000020;
++ writel(np->tx_config, ioaddr + TxConfigReg);
++
++ /* Flow control */
++ writel(0x00C28001, ioaddr + FlowCtrlAddrLo);
++ writel(0x00000100, ioaddr + FlowCtrlAddrHi);
++ writel(0x8808, ioaddr + FlowCtrlType);
++ writel(0x0100, ioaddr + FlowCtrlTimer);
++ writel(0x8000, ioaddr + FlowCtrlThrshHi);
++ writel(0x4000, ioaddr + FlowCtrlThrshLo);
++
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ writel(IntrTxDone | IntrLinkChange | IntrRxDone | IntrPCIErr
++ | IntrRxEmpty | IntrRxSErr, ioaddr + IntrEnable);
++
++ /* writel(1, dev->base_addr + RxCmd);*/
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x Rx %x Tx %x.\n",
++ dev->name, (int)readl(ioaddr + ChipStatus),
++ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + TxStatus));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++/* Update for jumbo frames...
++ Changing the MTU while active is not allowed.
++ */
++static int change_mtu(struct net_device *dev, int new_mtu)
++{
++ if ((new_mtu < 68) || (new_mtu > 1500))
++ return -EINVAL;
++ if (netif_running(dev))
++ return -EBUSY;
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int chip_ctrl = readl(ioaddr + ChipCtrl);
++ int rx_cfg = readl(ioaddr + RxConfigReg);
++ int tx_cfg = readl(ioaddr + TxConfigReg);
++#if 0
++ int chip_status = readl(ioaddr + ChipStatus);
++#endif
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Link changed status. Ctrl %x rxcfg %8.8x "
++ "txcfg %8.8x.\n",
++ dev->name, chip_ctrl, rx_cfg, tx_cfg);
++ if (np->medialock) {
++ if (np->full_duplex)
++ ;
++ }
++ /* writew(new_tx_mode, ioaddr + TxMode); */
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
++ "Tx %x Rx %x.\n",
++ dev->name, (int)readl(ioaddr + ChipStatus),
++ (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
++ }
++ /* This will either have a small false-trigger window or will not catch
++ tbusy incorrectly set when the queue is empty. */
++ if ((jiffies - dev->trans_start) > TX_TIMEOUT &&
++ (np->cur_tx - np->dirty_tx > 0 ||
++ netif_queue_paused(dev)) ) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + ChipStatus));
++
++#ifndef __alpha__
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Tx registers: ");
++ for (i = 0x400; i < 0x444; i += 8)
++ printk(" %8.8x", (int)readl(ioaddr + i));
++ printk("\n"KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %4.4x", np->tx_ring[i].status);
++ printk("\n");
++ }
++#endif
++
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_skbuff[i] = 0;
++ }
++
++ /* The number of ring descriptors is set by the ring length register,
++ thus the chip does not use 'next_desc' chains. */
++
++ /* Fill in the Rx buffers. Allocation failures are acceptable. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
++ np->rx_ring[i].buf_addr_hi = 0;
++ np->rx_ring[i].status = 0;
++ }
++ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ }
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ /* Note: Descriptors may be uncached. Write each field only once. */
++ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
++ np->tx_ring[entry].buf_addr_hi = 0;
++ np->tx_ring[entry].cmd_length = cpu_to_le32(TxCmdDoTx | skb->len);
++ np->tx_ring[entry].status = 0;
++
++ /* Non-CC architectures: explicitly flush descriptor and packet.
++ cache_flush(np->tx_ring[entry], sizeof np->tx_ring[entry]);
++ cache_flush(skb->data, skb->len);
++ */
++
++ np->cur_tx++;
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile int)np->dirty_tx < TX_QUEUE_LEN - 2) {
++ netif_unpause_tx_queue(dev);
++ np->tx_full = 0;
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++
++ /* Inform the chip we have another Tx. */
++ if (np->msg_level & NETIF_MSG_TX_QUEUED)
++ printk(KERN_DEBUG "%s: Tx queued to slot %d, desc tail now %d "
++ "writing %d.\n",
++ dev->name, entry, (int)readl(dev->base_addr + TxDescTail),
++ np->cur_tx % TX_RING_SIZE);
++ writel(np->cur_tx % TX_RING_SIZE, dev->base_addr + TxDescTail);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d (%x) queued in slot %d.\n",
++ dev->name, np->cur_tx, (int)virt_to_bus(&np->tx_ring[entry]),
++ entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int work_limit;
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ work_limit = np->max_interrupt_work;
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
++ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
++ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
++ dev->name);
++ dev->interrupt = 0; /* Avoid halting machine. */
++ return;
++ }
++#endif
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0 || intr_status == 0xffffffff)
++ break;
++
++ if (intr_status & IntrRxDone)
++ netdev_rx(dev);
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ if (np->tx_ring[entry].status == 0)
++ break;
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, np->tx_ring[entry].status);
++ np->stats.tx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ /* Note the 4 slot hysteresis to mark the queue non-full. */
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & (IntrPCIErr | IntrLinkChange | StatsMax))
++ netdev_error(dev, intr_status);
++
++ if (--work_limit < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ clear_bit(0, (void*)&dev->interrupt);
++#endif
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
++ entry, np->rx_ring[entry].status);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while (np->rx_head_desc->status & cpu_to_le32(RxDescDone)) {
++ struct rx_desc *desc = np->rx_head_desc;
++ u32 desc_status = le32_to_cpu(desc->status);
++ int data_size = le32_to_cpu(desc->csum_length);
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
++ desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ( ! (desc_status & RxDescEndPkt)) {
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
++ "multiple buffers, entry %#x length %d status %4.4x!\n",
++ dev->name, np->cur_rx, data_size, desc_status);
++ np->stats.rx_length_errors++;
++ } else {
++ struct sk_buff *skb;
++ /* Reported length should omit the CRC. */
++ int pkt_len = (data_size & 0xffff) - 4;
++
++#ifndef final_version
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ " of %d, bogus_cnt %d.\n",
++ pkt_len, data_size, boguscnt);
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++#ifndef final_version /* Remove after testing. */
++ if (le32desc_to_virt(np->rx_ring[entry].buf_addr) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in netdev_rx: %p vs. %p / %p.\n",
++ dev->name,
++ le32desc_to_virt(np->rx_ring[entry].buf_addr),
++ skb->head, temp);
++#endif
++ }
++#ifndef final_version /* Remove after testing. */
++ /* You will want this info for the initial debug. */
++ if (np->msg_level & NETIF_MSG_PKTDATA)
++ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
++ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
++ "%d.%d.%d.%d.\n",
++ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
++ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
++ skb->data[8], skb->data[9], skb->data[10],
++ skb->data[11], skb->data[12], skb->data[13],
++ skb->data[14], skb->data[15], skb->data[16],
++ skb->data[17]);
++#endif
++ skb->protocol = eth_type_trans(skb, dev);
++ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
++ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
++ }
++ np->rx_ring[entry].status = 0;
++ }
++
++ /* Restart Rx engine if stopped. */
++ /* writel(1, dev->base_addr + RxCmd); */
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ if (intr_status & IntrLinkChange) {
++ int chip_ctrl = readl(ioaddr + ChipCtrl);
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_ERR "%s: Link changed: Autonegotiation on-going.\n",
++ dev->name);
++ if (chip_ctrl & 1)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if ((intr_status & ~(IntrLinkChange|StatsMax))
++ && (np->msg_level & NETIF_MSG_DRV))
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrPCIErr)
++ np->stats.tx_fifo_errors++;
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int crc_errs = readl(ioaddr + RxCRCErrs);
++
++ if (crc_errs != 0xffffffff) {
++ /* We need not lock this segment of code for SMP.
++ The non-atomic-add vulnerability is very small
++ and statistics are non-critical. */
++ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
++ }
++
++ return &np->stats;
++}
++
++/* The little-endian AUTODIN II ethernet CRC calculations.
++ A big-endian version is also available.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and should be moved to net/core/crc.c.
++ Chips may use the upper or lower CRC bits, and may reverse and/or invert
++ them. Select the endian-ness that results in minimal calculations.
++*/
++static unsigned const ethernet_polynomial_le = 0xedb88320U;
++static inline unsigned ether_crc_le(int length, unsigned char *data)
++{
++ unsigned int crc = 0xffffffff; /* Initial value. */
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 8; --bit >= 0; current_octet >>= 1) {
++ if ((crc ^ current_octet) & 1) {
++ crc >>= 1;
++ crc ^= ethernet_polynomial_le;
++ } else
++ crc >>= 1;
++ }
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u32 new_mc_filter[128]; /* Multicast filter table */
++ u32 new_rx_mode = np->rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ new_rx_mode |=
++ RxCtrlAcceptBroadcast | RxCtrlAllMulticast | RxCtrlAllUnicast;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ new_rx_mode &= ~RxCtrlAllUnicast;
++ new_rx_mode |= RxCtrlAcceptBroadcast | RxCtrlAllMulticast;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ memset(new_mc_filter, 0, sizeof(new_mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < 15;
++ i++, mclist = mclist->next) {
++ writel(((u32*)mclist->dmi_addr)[0], ioaddr + RxAddrCAM + 8 + i*8);
++ writel((((u32*)mclist->dmi_addr)[1] & 0xffff) | 0x80000000,
++ ioaddr + RxAddrCAM + 12 + i*8);
++ }
++ for (; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
++ set_bit(((u32*)mclist->dmi_addr)[1] & 0xfff,
++ new_mc_filter);
++ }
++ new_rx_mode &= ~RxCtrlAllUnicast | RxCtrlAllMulticast;
++ new_rx_mode |= RxCtrlAcceptBroadcast;
++ if (dev->mc_count > 15)
++ for (i = 0; i < 128; i++)
++ writel(new_mc_filter[i], ioaddr + MulticastArray + (i<<2));
++ }
++ if (np->rx_mode != new_rx_mode)
++ writel(np->rx_mode = new_rx_mode, ioaddr + RxControl);
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
++ "Rx %4.4x Int %2.2x.\n",
++ dev->name, (int)readl(ioaddr + TxStatus),
++ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(~0, ioaddr + IntrDisable);
++ readl(ioaddr + IntrStatus);
++
++ /* Reset everything. */
++ writel(0x04000000, ioaddr + ChipCtrl);
++
++ del_timer(&np->timer);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" #%d desc. buf %8.8x, length %8.8x, status %8.8x.\n",
++ i, np->tx_ring[i].buf_addr, np->tx_ring[i].cmd_length,
++ np->tx_ring[i].status);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
++ i, np->rx_ring[i].csum_length,
++ np->rx_ring[i].status, np->rx_ring[i].buf_addr);
++ if (np->rx_ring[i].buf_addr) {
++ if (*(u8*)np->rx_skbuff[i]->tail != 0x69) {
++ u16 *pkt_buf = (void *)np->rx_skbuff[i]->tail;
++ int j;
++ for (j = 0; j < 0x50; j++)
++ printk(" %4.4x", pkt_buf[j]);
++ printk("\n");
++ }
++ }
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int netdev_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writel(~0, ioaddr + IntrDisable);
++ /* writel(2, ioaddr + RxCmd); */
++ /* writew(2, ioaddr + TxCmd); */
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the actions are very chip specific. */
++ set_rx_mode(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++ iounmap((char *)dev->base_addr);
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&igige_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&igige_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++ release_region(root_net_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++ iounmap((char *)(root_net_dev->base_addr));
++ next_dev = np->next_module;
++ if (np->tx_ring == 0)
++ free_page((long)np->tx_ring);
++ if (np->rx_ring == 0)
++ free_page((long)np->rx_ring);
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` intel-gige.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c intel-gige.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c intel-gige.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/kern_compat.h
+===================================================================
+RCS file: linux/src/drivers/net/kern_compat.h
+diff -N linux/src/drivers/net/kern_compat.h
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/kern_compat.h 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,285 @@
++#ifndef _KERN_COMPAT_H
++#define _KERN_COMPAT_H
++/* kern_compat.h: Linux PCI network adapter backward compatibility code. */
++/*
++ $Revision: 1.18 $ $Date: 2003/07/22 17:40:21 $
++
++ Kernel compatibility defines.
++ This file provides macros to mask the difference between kernel versions.
++ It is designed primarily to allow device drivers to be written so that
++ they work with a range of kernel versions.
++
++ Written 1999-2003 Donald Becker, Scyld Computing Corporation
++ This software may be used and distributed according to the terms
++ of the GNU General Public License (GPL), incorporated herein by
++ reference. Drivers interacting with these functions are derivative
++ works and thus are covered the GPL. They must include an explicit
++ GPL notice.
++
++ This code also provides inline scan and activate functions for PCI network
++ interfaces. It has an interface identical to pci-scan.c, but is
++ intended as an include file to simplify using updated drivers with older
++ kernel versions.
++ This code version matches pci-scan.c:v0.05 9/16/99
++
++ The author may be reached as becker@scyld.com, or
++ Donald Becker
++ Penguin Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Other contributers:
++ <none>
++*/
++
++/* We try to use defined values to decide when an interface has changed or
++ added features, but we must have the kernel version number for a few. */
++#if ! defined(LINUX_VERSION_CODE) || (LINUX_VERSION_CODE < 0x10000)
++#include <linux/version.h>
++#endif
++/* Older kernel versions didn't include modversions automatically. */
++#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++
++/* There was no support for PCI address space mapping in 2.0, but the
++ Alpha needed it. See the 2.2 documentation. */
++#if LINUX_VERSION_CODE < 0x20100 && ! defined(__alpha__)
++#define ioremap(a,b)\
++ (((unsigned long)(a) >= 0x100000) ? vremap(a,b) : (void*)(a))
++#define iounmap(v)\
++ do { if ((unsigned long)(v) >= 0x100000) vfree(v);} while (0)
++#endif
++
++/* Support for adding info about the purpose of and parameters for kernel
++ modules was added in 2.1. */
++#if LINUX_VERSION_CODE < 0x20115
++#define MODULE_AUTHOR(name) extern int nonesuch
++#define MODULE_DESCRIPTION(string) extern int nonesuch
++#define MODULE_PARM(varname, typestring) extern int nonesuch
++#define MODULE_PARM_DESC(var,desc) extern int nonesuch
++#endif
++#if !defined(MODULE_LICENSE)
++#define MODULE_LICENSE(license) \
++static const char __module_license[] __attribute__((section(".modinfo"))) = \
++"license=" license
++#endif
++#if !defined(MODULE_PARM_DESC)
++#define MODULE_PARM_DESC(var,desc) \
++const char __module_parm_desc_##var[] \
++__attribute__((section(".modinfo"))) = \
++"parm_desc_" __MODULE_STRING(var) "=" desc
++#endif
++
++/* SMP and better multiarchitecture support were added.
++ Using an older kernel means we assume a little-endian uniprocessor.
++*/
++#if LINUX_VERSION_CODE < 0x20123
++#define hard_smp_processor_id() smp_processor_id()
++#define test_and_set_bit(val, addr) set_bit(val, addr)
++#define cpu_to_le16(val) (val)
++#define cpu_to_le32(val) (val)
++#define le16_to_cpu(val) (val)
++#define le16_to_cpus(val) /* In-place conversion. */
++#define le32_to_cpu(val) (val)
++#define cpu_to_be16(val) ((((val) & 0xff) << 8) + (((val) >> 8) & 0xff))
++#define cpu_to_be32(val) ((cpu_to_be16(val) << 16) + cpu_to_be16((val) >> 16))
++typedef long spinlock_t;
++#define SPIN_LOCK_UNLOCKED 0
++#define spin_lock(lock)
++#define spin_unlock(lock)
++#define spin_lock_irqsave(lock, flags) do {save_flags(flags); cli();} while(0)
++#define spin_unlock_irqrestore(lock, flags) restore_flags(flags)
++#endif
++
++#if LINUX_VERSION_CODE <= 0x20139
++#define net_device_stats enet_statistics
++#else
++#define NETSTATS_VER2
++#endif
++
++/* These are used by the netdrivers to report values from the
++ MII (Media Indpendent Interface) management registers.
++*/
++#ifndef SIOCGMIIPHY
++#define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Get the PHY in use. */
++#define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read a PHY register. */
++#define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write a PHY register. */
++#endif
++#ifndef SIOCGPARAMS
++#define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters. */
++#define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters. */
++#endif
++
++#if !defined(HAVE_NETIF_MSG)
++enum {
++ NETIF_MSG_DRV = 0x0001,
++ NETIF_MSG_PROBE = 0x0002,
++ NETIF_MSG_LINK = 0x0004,
++ NETIF_MSG_TIMER = 0x0008,
++ NETIF_MSG_IFDOWN = 0x0010,
++ NETIF_MSG_IFUP = 0x0020,
++ NETIF_MSG_RX_ERR = 0x0040,
++ NETIF_MSG_TX_ERR = 0x0080,
++ NETIF_MSG_TX_QUEUED = 0x0100,
++ NETIF_MSG_INTR = 0x0200,
++ NETIF_MSG_TX_DONE = 0x0400,
++ NETIF_MSG_RX_STATUS = 0x0800,
++ NETIF_MSG_PKTDATA = 0x1000,
++ /* 2000 is reserved. */
++ NETIF_MSG_WOL = 0x4000,
++ NETIF_MSG_MISC = 0x8000,
++ NETIF_MSG_RXFILTER = 0x10000,
++};
++#define NETIF_MSG_MAX 0x10000
++#endif
++
++#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x8000
++#define NETIF_MSG_MISC 0x8000
++#endif
++#if !defined(NETIF_MSG_MAX) || NETIF_MSG_MAX < 0x10000
++#define NETIF_MSG_RXFILTER 0x10000
++#endif
++
++#if LINUX_VERSION_CODE < 0x20155
++#include <linux/bios32.h>
++#define PCI_SUPPORT_VER1
++/* A minimal version of the 2.2.* PCI support that handles configuration
++ space access.
++ Drivers that actually use pci_dev fields must do explicit compatibility.
++ Note that the struct pci_dev * "pointer" is actually a byte mapped integer!
++*/
++#if LINUX_VERSION_CODE < 0x20014
++struct pci_dev { int not_used; };
++#endif
++
++#define pci_find_slot(bus, devfn) (struct pci_dev*)((bus<<8) | devfn | 0xf0000)
++#define bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
++#define devfn_number(pci_dev) (((int)(pci_dev)) & 0xff)
++#define pci_bus_number(pci_dev) ((((int)(pci_dev))>>8) & 0xff)
++#define pci_devfn(pci_dev) (((int)(pci_dev)) & 0xff)
++
++#ifndef CONFIG_PCI
++extern inline int pci_present(void) { return 0; }
++#else
++#define pci_present pcibios_present
++#endif
++
++#define pci_read_config_byte(pdev, where, valp)\
++ pcibios_read_config_byte(bus_number(pdev), devfn_number(pdev), where, valp)
++#define pci_read_config_word(pdev, where, valp)\
++ pcibios_read_config_word(bus_number(pdev), devfn_number(pdev), where, valp)
++#define pci_read_config_dword(pdev, where, valp)\
++ pcibios_read_config_dword(bus_number(pdev), devfn_number(pdev), where, valp)
++#define pci_write_config_byte(pdev, where, val)\
++ pcibios_write_config_byte(bus_number(pdev), devfn_number(pdev), where, val)
++#define pci_write_config_word(pdev, where, val)\
++ pcibios_write_config_word(bus_number(pdev), devfn_number(pdev), where, val)
++#define pci_write_config_dword(pdev, where, val)\
++ pcibios_write_config_dword(bus_number(pdev), devfn_number(pdev), where, val)
++#else
++#define PCI_SUPPORT_VER2
++#define pci_bus_number(pci_dev) ((pci_dev)->bus->number)
++#define pci_devfn(pci_dev) ((pci_dev)->devfn)
++#endif
++
++/* The arg count changed, but function name did not.
++ We cover that bad choice by defining a new name.
++*/
++#if LINUX_VERSION_CODE < 0x20159
++#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE)
++#define dev_free_skb_irq(skb) dev_kfree_skb(skb, FREE_WRITE)
++#elif LINUX_VERSION_CODE < 0x20400
++#define dev_free_skb(skb) dev_kfree_skb(skb)
++#define dev_free_skb_irq(skb) dev_kfree_skb(skb)
++#else
++#define dev_free_skb(skb) dev_kfree_skb(skb)
++#define dev_free_skb_irq(skb) dev_kfree_skb_irq(skb)
++#endif
++
++/* Added at the suggestion of Jes Sorensen. */
++#if LINUX_VERSION_CODE > 0x20153
++#include <linux/init.h>
++#else
++#define __init
++#define __initdata
++#define __initfunc(__arginit) __arginit
++#endif
++
++/* The old 'struct device' used a too-generic name. */
++#if LINUX_VERSION_CODE < 0x2030d
++#define net_device device
++#endif
++
++/* More changes for the 2.4 kernel, some in the zillion 2.3.99 releases. */
++#if LINUX_VERSION_CODE < 0x20363
++#define DECLARE_MUTEX(name) struct semaphore (name) = MUTEX;
++#define down_write(semaphore_p) down(semaphore_p)
++#define down_read(semaphore_p) down(semaphore_p)
++#define up_write(semaphore_p) up(semaphore_p)
++#define up_read(semaphore_p) up(semaphore_p)
++/* Note that the kernel version has a broken time_before()! */
++#define time_after(a,b) ((long)(b) - (long)(a) < 0)
++#define time_before(a,b) ((long)(a) - (long)(b) < 0)
++#else
++#define get_free_page get_zeroed_page
++#endif
++
++/* The 2.2 kernels added the start of capability-based security for operations
++ that formerally could only be done by root.
++*/
++#if ! defined(CAP_NET_ADMIN)
++#define capable(CAP_XXX) (suser())
++#endif
++
++#if ! defined(HAVE_NETIF_QUEUE)
++#define netif_wake_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
++#define netif_start_tx_queue(dev) do { (dev)->tbusy = 0; dev->start = 1; } while (0)
++#define netif_stop_tx_queue(dev) do { (dev)->tbusy = 1; dev->start = 0; } while (0)
++#define netif_queue_paused(dev) ((dev)->tbusy != 0)
++/* Splitting these lines exposes a bug in some preprocessors. */
++#define netif_pause_tx_queue(dev) (test_and_set_bit( 0, (void*)&(dev)->tbusy))
++#define netif_unpause_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); } while (0)
++#define netif_resume_tx_queue(dev) do { clear_bit( 0, (void*)&(dev)->tbusy); mark_bh(NET_BH); } while (0)
++
++#define netif_running(dev) ((dev)->start != 0)
++#define netif_device_attach(dev) do {; } while (0)
++#define netif_device_detach(dev) do {; } while (0)
++#define netif_device_present(dev) (1)
++#define netif_set_tx_timeout(dev, func, deltajiffs) do {; } while (0)
++#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
++#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
++
++#else
++
++#define netif_start_tx_queue(dev) netif_start_queue(dev)
++#define netif_stop_tx_queue(dev) netif_stop_queue(dev)
++#define netif_queue_paused(dev) netif_queue_stopped(dev)
++#define netif_resume_tx_queue(dev) netif_wake_queue(dev)
++/* Only used in transmit path. No function in 2.4. */
++#define netif_pause_tx_queue(dev) 0
++#define netif_unpause_tx_queue(dev) do {; } while (0)
++
++#ifdef __LINK_STATE_NOCARRIER
++#define netif_link_down(dev) netif_carrier_off(dev)
++#define netif_link_up(dev) netif_carrier_on(dev)
++#else
++#define netif_link_down(dev) (dev)->flags &= ~IFF_RUNNING
++#define netif_link_up(dev) (dev)->flags |= IFF_RUNNING
++#endif
++
++#endif
++#ifndef PCI_DMA_BUS_IS_PHYS
++#define pci_dma_sync_single(pci_dev, base_addr, extent, tofrom) do {; } while (0)
++#define pci_map_single(pci_dev, base_addr, extent, dir) virt_to_bus(base_addr)
++#define pci_unmap_single(pci_dev, base_addr, extent, dir) do {; } while (0)
++#endif
++
++#endif
++/*
++ * Local variables:
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/myson803.c
+===================================================================
+RCS file: linux/src/drivers/net/myson803.c
+diff -N linux/src/drivers/net/myson803.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/myson803.c 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,1650 @@
++/* myson803.c: A Linux device driver for the Myson mtd803 Ethernet chip. */
++/*
++ Written 1998-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/myson803.html
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"myson803.c:v1.05 3/10/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/drivers.html\n";
++
++/* Automatically extracted configuration info:
++probe-func: myson803_probe
++config-in: tristate 'Myson MTD803 series Ethernet support' CONFIG_MYSON_ETHER
++
++c-help-name: Myson MTD803 PCI Ethernet support
++c-help-symbol: CONFIG_MYSON_ETHER
++c-help: This driver is for the Myson MTD803 Ethernet adapter series.
++c-help: More specific information and updates are available from
++c-help: http://www.scyld.com/network/drivers.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 40;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ This chip uses a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ There are no ill effects from too-large receive rings. */
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit Tx ring entries actually used. */
++#define RX_RING_SIZE 32
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/delay.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++#include <asm/unaligned.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
++#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
++#define USE_IO_OPS
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Myson mtd803 Ethernet driver");
++MODULE_LICENSE("GPL");
++/* List in order of common use. */
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(full_duplex, "Non-zero to force full duplex, "
++ "non-negotiated link (deprecated).");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Maximum events handled per interrupt");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is for the Myson mtd803 chip.
++It should work with other Myson 800 series chips.
++
++II. Board-specific settings
++
++None.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++Some chips explicitly use only 2^N sized rings, while others use a
++'next descriptor' pointer that the driver forms into rings.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that the IP header at offset 14 in an
++ethernet frame isn't longword aligned for further processing.
++When unaligned buffers are permitted by the hardware (and always on copies)
++frames are put into the skbuff at an offset of "+2", 16-byte aligning
++the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IIId. SMP semantics
++
++The following are serialized with respect to each other via the "xmit_lock".
++ dev->hard_start_xmit() Transmit a packet
++ dev->tx_timeout() Transmit watchdog for stuck Tx
++ dev->set_multicast_list() Set the recieve filter.
++Note: The Tx timeout watchdog code is implemented by the timer routine in
++kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
++driver interface.
++
++The following fall under the global kernel lock. The module will not be
++unloaded during the call, unless a call with a potential reschedule e.g.
++kmalloc() is called. No other synchronization assertion is made.
++ dev->open()
++ dev->do_ioctl()
++ dev->get_stats()
++Caution: The lock for dev->open() is commonly broken with request_irq() or
++kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
++get_stats(), or additional module locking code must be implemented.
++
++The following is self-serialized (no simultaneous entry)
++ An handler registered with request_irq().
++
++IV. Notes
++
++IVb. References
++
++http://www.scyld.com/expert/100mbps.html
++http://scyld.com/expert/NWay.html
++http://www.myson.com.hk/mtd/datasheet/mtd803.pdf
++ Myson does not require a NDA to read the datasheet.
++
++IVc. Errata
++
++No undocumented errata.
++*/
++
++
++
++/* PCI probe routines. */
++
++static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int netdev_pwr_event(void *dev_instance, int event);
++
++/* Chips prior to the 803 have an external MII transceiver. */
++enum chip_capability_flags { HasMIIXcvr=1, HasChipXcvr=2 };
++
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#define PCI_IOSIZE 256
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#define PCI_IOSIZE 1024
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Myson mtd803 Fast Ethernet", {0x08031516, 0xffffffff, },
++ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
++ {"Myson mtd891 Gigabit Ethernet", {0x08911516, 0xffffffff, },
++ PCI_IOTYPE, PCI_IOSIZE, HasChipXcvr},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info myson803_drv_id = {
++ "myson803", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, myson_probe1,
++ netdev_pwr_event };
++
++/* This driver was written to use PCI memory space, however x86-oriented
++ hardware sometimes works only with I/O space accesses. */
++#ifdef USE_IO_OPS
++#undef readb
++#undef readw
++#undef readl
++#undef writeb
++#undef writew
++#undef writel
++#define readb inb
++#define readw inw
++#define readl inl
++#define writeb outb
++#define writew outw
++#define writel outl
++#endif
++
++/* Offsets to the various registers.
++ Most accesses must be longword aligned. */
++enum register_offsets {
++ StationAddr=0x00, MulticastFilter0=0x08, MulticastFilter1=0x0C,
++ FlowCtrlAddr=0x10, RxConfig=0x18, TxConfig=0x1a, PCIBusCfg=0x1c,
++ TxStartDemand=0x20, RxStartDemand=0x24,
++ RxCurrentPtr=0x28, TxRingPtr=0x2c, RxRingPtr=0x30,
++ IntrStatus=0x34, IntrEnable=0x38,
++ FlowCtrlThreshold=0x3c,
++ MIICtrl=0x40, EECtrl=0x40, RxErrCnts=0x44, TxErrCnts=0x48,
++ PHYMgmt=0x4c,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrRxErr=0x0002, IntrRxDone=0x0004, IntrTxDone=0x0008,
++ IntrTxEmpty=0x0010, IntrRxEmpty=0x0020, StatsMax=0x0040, RxEarly=0x0080,
++ TxEarly=0x0100, RxOverflow=0x0200, TxUnderrun=0x0400,
++ IntrPCIErr=0x2000, NWayDone=0x4000, LinkChange=0x8000,
++};
++
++/* Bits in the RxMode (np->txrx_config) register. */
++enum rx_mode_bits {
++ RxEnable=0x01, RxFilter=0xfe,
++ AcceptErr=0x02, AcceptRunt=0x08, AcceptBroadcast=0x40,
++ AcceptMulticast=0x20, AcceptAllPhys=0x80, AcceptMyPhys=0x00,
++ RxFlowCtrl=0x2000,
++ TxEnable=0x40000, TxModeFDX=0x00100000, TxThreshold=0x00e00000,
++};
++
++/* Misc. bits. */
++enum misc_bits {
++ BCR_Reset=1, /* PCIBusCfg */
++ TxThresholdInc=0x200000,
++};
++
++/* The Rx and Tx buffer descriptors. */
++/* Note that using only 32 bit fields simplifies conversion to big-endian
++ architectures. */
++struct netdev_desc {
++ u32 status;
++ u32 ctrl_length;
++ u32 buf_addr;
++ u32 next_desc;
++};
++
++/* Bits in network_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000,
++ RxDescStartPacket=0x0800, RxDescEndPacket=0x0400, RxDescWholePkt=0x0c00,
++ RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
++ RxErrCRC=0x08, RxErrCode=0x04,
++ TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
++ TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
++ TxColls=0x00ff,
++};
++/* Bits in network_desc.ctrl_length */
++enum ctrl_length_bits {
++ TxIntrOnDone=0x80000000, TxIntrOnFIFO=0x40000000,
++ TxDescEndPacket=0x20000000, TxDescStartPacket=0x10000000,
++ TxAppendCRC=0x08000000, TxPadTo64=0x04000000, TxNormalPkt=0x3C000000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
++ within the structure. */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct netdev_desc rx_ring[RX_RING_SIZE];
++ struct netdev_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int max_interrupt_work;
++ int intr_enable;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++
++ struct netdev_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++ unsigned int rx_died:1;
++ unsigned int txrx_config;
++
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++
++ unsigned int mcast_filter[2];
++ int multicast_filter_limit;
++
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++static int eeprom_read(long ioaddr, int location);
++static int mdio_read(struct net_device *dev, int phy_id,
++ unsigned int location);
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value);
++static int netdev_open(struct net_device *dev);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++int myson803_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&myson803_drv_id, dev) < 0)
++ return -ENODEV;
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *myson_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ for (i = 0; i < 3; i++)
++ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i + 8));
++ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
++ /* Fill a temp addr with the "locally administered" bit set. */
++ memcpy(dev->dev_addr, ">Linux", 6);
++ }
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++#if ! defined(final_version) /* Dump the EEPROM contents during development. */
++ if (debug > 4)
++ for (i = 0; i < 0x40; i++)
++ printk("%4.4x%s",
++ eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
++#endif
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ /* Do bogusness checks before this point.
++ We do a request_region() only to register /proc/ioports info. */
++#ifdef USE_IO_OPS
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++#endif
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(BCR_Reset, ioaddr + PCIBusCfg);
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* The lower four bits are the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port)
++ np->medialock = 1;
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++
++ if (np->drv_flags & HasMIIXcvr) {
++ int phy, phy_idx = 0;
++ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ np->mii_cnt = phy_idx;
++ }
++ if (np->drv_flags & HasChipXcvr) {
++ np->phys[np->mii_cnt++] = 32;
++ printk(KERN_INFO "%s: Internal PHY status 0x%4.4x"
++ " advertising %4.4x.\n",
++ dev->name, mdio_read(dev, 32, 1), mdio_read(dev, 32, 4));
++ }
++ /* Allow forcing the media type. */
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (option & 0x220)
++ np->full_duplex = 1;
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ if (np->mii_cnt)
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++
++ return dev;
++}
++
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. These are
++ often serial bit streams generated by the host processor.
++ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
++
++/* This "delay" forces out buffered PCI writes.
++ The udelay() is unreliable for timing, but some Myson NICs shipped with
++ absurdly slow EEPROMs.
++ */
++#define eeprom_delay(ee_addr) readl(ee_addr); udelay(2); readl(ee_addr)
++
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x04<<16, EE_ChipSelect=0x88<<16,
++ EE_DataOut=0x02<<16, EE_DataIn=0x01<<16,
++ EE_Write0=0x88<<16, EE_Write1=0x8a<<16,
++};
++
++/* The EEPROM commands always start with 01.. preamble bits.
++ Commands are prepended to the variable-length address. */
++enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
++
++static int eeprom_read(long addr, int location)
++{
++ int i;
++ int retval = 0;
++ long ee_addr = addr + EECtrl;
++ int read_cmd = location | (EE_ReadCmd<<6);
++
++ writel(EE_ChipSelect, ee_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 10; i >= 0; i--) {
++ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
++ writel(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ writel(dataval | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++
++ for (i = 16; i > 0; i--) {
++ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(EE_ChipSelect, ee_addr);
++ writel(0, ee_addr);
++ return retval;
++}
++
++/* MII transceiver control section.
++ Read and write the MII registers using software-generated serial
++ MDIO protocol. See the MII specifications or DP83840A data sheet
++ for details.
++
++ The maximum data clock rate is 2.5 Mhz.
++ The timing is decoupled from the processor clock by flushing the write
++ from the CPU write buffer with a following read, and using PCI
++ transaction timing. */
++#define mdio_in(mdio_addr) readl(mdio_addr)
++#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
++#define mdio_delay(mdio_addr) readl(mdio_addr)
++
++/* Set iff a MII transceiver on any interface requires mdio preamble.
++ This only set with older tranceivers, so the extra
++ code size of a per-interface flag is not worthwhile. */
++static char mii_preamble_required = 0;
++
++enum mii_reg_bits {
++ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
++};
++#define MDIO_EnbIn (0)
++#define MDIO_WRITE0 (MDIO_EnbOutput)
++#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
++
++/* Generate the preamble required for initial synchronization and
++ a few older transceivers. */
++static void mdio_sync(long mdio_addr)
++{
++ int bits = 32;
++
++ /* Establish sync by sending at least 32 logic ones. */
++ while (--bits >= 0) {
++ mdio_out(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++}
++
++static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
++{
++ long ioaddr = dev->base_addr;
++ long mdio_addr = ioaddr + MIICtrl;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int i, retval = 0;
++
++ if (location >= 32)
++ return 0xffff;
++ if (phy_id >= 32) {
++ if (location < 6)
++ return readw(ioaddr + PHYMgmt + location*2);
++ else if (location == 16)
++ return readw(ioaddr + PHYMgmt + 6*2);
++ else if (location == 17)
++ return readw(ioaddr + PHYMgmt + 7*2);
++ else if (location == 18)
++ return readw(ioaddr + PHYMgmt + 10*2);
++ else
++ return 0;
++ }
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 15; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Read the two transition, 16 data, and wire-idle bits. */
++ for (i = 19; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return (retval>>1) & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ long mdio_addr = ioaddr + MIICtrl;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
++ int i;
++
++ if (location == 4 && phy_id == np->phys[0])
++ np->advertising = value;
++ else if (location >= 32)
++ return;
++
++ if (phy_id == 32) {
++ if (location < 6)
++ writew(value, ioaddr + PHYMgmt + location*2);
++ else if (location == 16)
++ writew(value, ioaddr + PHYMgmt + 6*2);
++ else if (location == 17)
++ writew(value, ioaddr + PHYMgmt + 7*2);
++ return;
++ }
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ /* Some chips may need to be reset. */
++
++ MOD_INC_USE_COUNT;
++
++ writel(~0, ioaddr + IntrStatus);
++
++ /* Note that both request_irq() and init_ring() call kmalloc(), which
++ break the global kernel lock protecting this routine. */
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ /* Address register must be written as words. */
++ writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
++ ioaddr + StationAddr);
++ writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
++ ioaddr + StationAddr + 4);
++ /* Set the flow control address, 01:80:c2:00:00:01. */
++ writel(0x00c28001, ioaddr + FlowCtrlAddr);
++ writel(0x00000100, ioaddr + FlowCtrlAddr + 4);
++
++ /* Initialize other registers. */
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++ writel(0x01f8, ioaddr + PCIBusCfg);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->txrx_config = TxEnable | RxEnable | RxFlowCtrl | 0x00600000;
++ np->mcast_filter[0] = np->mcast_filter[1] = 0;
++ np->rx_died = 0;
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty | IntrTxDone
++ | IntrTxEmpty | StatsMax | RxOverflow | TxUnderrun | IntrPCIErr
++ | NWayDone | LinkChange;
++ writel(np->intr_enable, ioaddr + IntrEnable);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), PHY status: %x %x.\n",
++ dev->name, (int)readw(ioaddr + PHYMgmt),
++ (int)readw(ioaddr + PHYMgmt + 2));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int new_tx_mode = np->txrx_config;
++
++ if (np->medialock) {
++ } else {
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
++ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++ if (np->duplex_lock || mii_reg5 == 0xffff)
++ return;
++ if (duplex)
++ new_tx_mode |= TxModeFDX;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
++ " negotiated capability %4.4x.\n", dev->name,
++ duplex ? "full" : "half", np->phys[0], negotiated);
++ }
++ }
++ if (np->txrx_config != new_tx_mode)
++ writel(new_tx_mode, ioaddr + RxConfig);
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
++ dev->name, (int)readw(ioaddr + PHYMgmt + 10));
++ }
++ /* This will either have a small false-trigger window or will not catch
++ tbusy incorrectly set when the queue is empty. */
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ /* It's dead Jim, no race condition. */
++ if (np->rx_died)
++ netdev_rx(dev);
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
++
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %8.8x", np->tx_ring[i].status);
++ printk("\n");
++ }
++
++ /* Stop and restart the chip's Tx processes . */
++ writel(np->txrx_config & ~TxEnable, ioaddr + RxConfig);
++ writel(virt_to_bus(np->tx_ring + (np->dirty_tx%TX_RING_SIZE)),
++ ioaddr + TxRingPtr);
++ writel(np->txrx_config, ioaddr + RxConfig);
++ /* Trigger an immediate transmit demand. */
++ writel(0, dev->base_addr + TxStartDemand);
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 4);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].ctrl_length = cpu_to_le32(np->rx_buf_sz);
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
++
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
++ np->rx_ring[i].status = cpu_to_le32(DescOwn);
++ }
++ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
++ }
++ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
++ np->tx_ring[entry].ctrl_length =
++ cpu_to_le32(TxIntrOnDone | TxNormalPkt | (skb->len << 11) | skb->len);
++ np->tx_ring[entry].status = cpu_to_le32(DescOwn);
++ np->cur_tx++;
++
++ /* On some architectures: explicitly flushing cache lines here speeds
++ operation. */
++
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 2) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++ /* Wake the potentially-idle transmit channel. */
++ writel(0, dev->base_addr + TxStartDemand);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
++ dev->name, np->cur_tx, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
++ "device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
++ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
++ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
++ dev->name);
++ dev->interrupt = 0; /* Avoid halting machine. */
++ return;
++ }
++#endif
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ /* Acknowledge all of the current interrupt sources ASAP. */
++ writel(intr_status, ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0)
++ break;
++
++ if (intr_status & IntrRxDone)
++ netdev_rx(dev);
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ int tx_status = le32_to_cpu(np->tx_ring[entry].status);
++ if (tx_status & DescOwn)
++ break;
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
++ | TxErr16Colls | TxErrHeartbeat)) {
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ np->stats.tx_errors++;
++ if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
++ if (tx_status & TxErrLate) np->stats.tx_window_errors++;
++ if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
++#ifdef ETHER_STATS
++ if (tx_status & TxErr16Colls) np->stats.collisions16++;
++ if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
++#else
++ if (tx_status & (TxErr16Colls|TxErrAbort))
++ np->stats.tx_aborted_errors++;
++#endif
++ } else {
++ np->stats.tx_packets++;
++ np->stats.collisions += tx_status & TxColls;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++#ifdef ETHER_STATS
++ if (tx_status & TxErrDefer) np->stats.tx_deferred++;
++#endif
++ }
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ /* Note the 4 slot hysteresis to mark the queue non-full. */
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & (IntrRxErr | IntrRxEmpty | StatsMax | RxOverflow
++ | TxUnderrun | IntrPCIErr | NWayDone | LinkChange))
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ clear_bit(0, (void*)&dev->interrupt);
++#endif
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++ int refilled = 0;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
++ entry, np->rx_ring[entry].status);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
++ struct netdev_desc *desc = np->rx_head_desc;
++ u32 desc_status = le32_to_cpu(desc->status);
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
++ desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
++ "multiple buffers, entry %#x length %d status %4.4x!\n",
++ dev->name, np->cur_rx, desc_status >> 16, desc_status);
++ np->stats.rx_length_errors++;
++ } else if (desc_status & RxDescErrSum) {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ desc_status);
++ np->stats.rx_errors++;
++ if (desc_status & (RxErrLong|RxErrRunt))
++ np->stats.rx_length_errors++;
++ if (desc_status & (RxErrFrame|RxErrCode))
++ np->stats.rx_frame_errors++;
++ if (desc_status & RxErrCRC)
++ np->stats.rx_crc_errors++;
++ } else {
++ struct sk_buff *skb;
++ /* Reported length should omit the CRC. */
++ u16 pkt_len = ((desc_status >> 16) & 0xfff) - 4;
++
++#ifndef final_version
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ " of %d, bogus_cnt %d.\n",
++ pkt_len, pkt_len, boguscnt);
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++ } else {
++ skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++ }
++#ifndef final_version /* Remove after testing. */
++ /* You will want this info for the initial debug. */
++ if (np->msg_level & NETIF_MSG_PKTDATA)
++ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
++ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
++ "%d.%d.%d.%d.\n",
++ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
++ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
++ skb->data[8], skb->data[9], skb->data[10],
++ skb->data[11], skb->data[12], skb->data[13],
++ skb->data[14], skb->data[15], skb->data[16],
++ skb->data[17]);
++#endif
++ skb->mac.raw = skb->data;
++ /* Protocol lookup disabled until verified with all kernels. */
++ if (0 && ntohs(skb->mac.ethernet->h_proto) >= 0x0800) {
++ struct ethhdr *eth = skb->mac.ethernet;
++ skb->protocol = eth->h_proto;
++ if (desc_status & 0x1000) {
++ if ((dev->flags & IFF_PROMISC) &&
++ memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN))
++ skb->pkt_type = PACKET_OTHERHOST;
++ } else if (desc_status & 0x2000)
++ skb->pkt_type = PACKET_BROADCAST;
++ else if (desc_status & 0x4000)
++ skb->pkt_type = PACKET_MULTICAST;
++ } else
++ skb->protocol = eth_type_trans(skb, dev);
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
++ }
++ np->rx_ring[entry].ctrl_length = cpu_to_le32(np->rx_buf_sz);
++ np->rx_ring[entry].status = cpu_to_le32(DescOwn);
++ refilled++;
++ }
++
++ /* Restart Rx engine if stopped. */
++ if (refilled) { /* Perhaps "&& np->rx_died" */
++ writel(0, dev->base_addr + RxStartDemand);
++ np->rx_died = 0;
++ }
++ return refilled;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (intr_status & (LinkChange | NWayDone)) {
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ mdio_read(dev, np->phys[0], 4),
++ mdio_read(dev, np->phys[0], 5));
++ /* Clear sticky bit first. */
++ readw(ioaddr + PHYMgmt + 2);
++ if (readw(ioaddr + PHYMgmt + 2) & 0x0004)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if ((intr_status & TxUnderrun)
++ && (np->txrx_config & TxThreshold) != TxThreshold) {
++ np->txrx_config += TxThresholdInc;
++ writel(np->txrx_config, ioaddr + RxConfig);
++ np->stats.tx_fifo_errors++;
++ }
++ if (intr_status & IntrRxEmpty) {
++ printk(KERN_WARNING "%s: Out of receive buffers: no free memory.\n",
++ dev->name);
++ /* Refill Rx descriptors */
++ np->rx_died = 1;
++ netdev_rx(dev);
++ }
++ if (intr_status & RxOverflow) {
++ printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
++ np->stats.rx_over_errors++;
++ netdev_rx(dev); /* Refill Rx descriptors */
++ get_stats(dev); /* Empty dropped counter. */
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if ((intr_status & ~(LinkChange|NWayDone|StatsMax|TxUnderrun|RxOverflow
++ |TxEarly|RxEarly|0x001e))
++ && (np->msg_level & NETIF_MSG_DRV))
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrPCIErr) {
++ const char *const pcierr[4] =
++ { "Parity Error", "Master Abort", "Target Abort", "Unknown Error" };
++ if (np->msg_level & NETIF_MSG_DRV)
++ printk(KERN_WARNING "%s: PCI Bus %s, %x.\n",
++ dev->name, pcierr[(intr_status>>11) & 3], intr_status);
++ }
++}
++
++/* We do not bother to spinlock statistics.
++ A window only exists if we have non-atomic adds, the error counts are
++ typically zero, and statistics are non-critical. */
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned int rxerrs = readl(ioaddr + RxErrCnts);
++ unsigned int txerrs = readl(ioaddr + TxErrCnts);
++
++ /* The chip only need report frames silently dropped. */
++ np->stats.rx_crc_errors += rxerrs >> 16;
++ np->stats.rx_missed_errors += rxerrs & 0xffff;
++
++ /* These stats are required when the descriptor is closed before Tx. */
++ np->stats.tx_aborted_errors += txerrs >> 24;
++ np->stats.tx_window_errors += (txerrs >> 16) & 0xff;
++ np->stats.collisions += txerrs & 0xffff;
++
++ return &np->stats;
++}
++
++/* Big-endian AUTODIN II ethernet CRC calculations.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and may be in the kernel with Linux 2.5+.
++*/
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static inline u32 ether_crc(int length, unsigned char *data)
++{
++ u32 crc = ~0;
++
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
++ crc = (crc << 1) ^
++ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ u32 rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ mc_filter[1] = mc_filter[0] = ~0;
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
++ | AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ mc_filter[1] = mc_filter[0] = ~0;
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ mc_filter[1] = mc_filter[0] = 0;
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) & 0x3f,
++ mc_filter);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ }
++ if (mc_filter[0] != np->mcast_filter[0] ||
++ mc_filter[1] != np->mcast_filter[1]) {
++ writel(mc_filter[0], ioaddr + MulticastFilter0);
++ writel(mc_filter[1], ioaddr + MulticastFilter1);
++ np->mcast_filter[0] = mc_filter[0];
++ np->mcast_filter[1] = mc_filter[1];
++ }
++ if ((np->txrx_config & RxFilter) != rx_mode) {
++ np->txrx_config &= ~RxFilter;
++ np->txrx_config |= rx_mode;
++ writel(np->txrx_config, ioaddr + RxConfig);
++ }
++}
++
++/*
++ Handle user-level ioctl() calls.
++ We must use two numeric constants as the key because some clueless person
++ changed the value for the symbolic name.
++*/
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0];
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0], data[1]);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(dev, data[0], data[1], data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x.\n",
++ dev->name, (int)readl(ioaddr + RxConfig));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(0x0000, ioaddr + IntrEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ np->txrx_config = 0;
++ writel(0, ioaddr + RxConfig);
++
++ del_timer(&np->timer);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" #%d desc. %x %x %8.8x.\n",
++ i, np->tx_ring[i].status, np->tx_ring[i].ctrl_length,
++ np->tx_ring[i].buf_addr);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
++ i, np->rx_ring[i].status, np->rx_ring[i].ctrl_length,
++ np->rx_ring[i].buf_addr);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int netdev_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writel(0, ioaddr + IntrEnable);
++ writel(0, ioaddr + RxConfig);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the actions are very chip specific. */
++ set_rx_mode(dev);
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&myson803_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&myson803_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++#ifdef USE_IO_OPS
++ release_region(root_net_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++#else
++ iounmap((char *)(root_net_dev->base_addr));
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` myson803.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c myson803.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c myson803.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/natsemi.c
+===================================================================
+RCS file: linux/src/drivers/net/natsemi.c
+diff -N linux/src/drivers/net/natsemi.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/natsemi.c 20 Aug 2004 10:32:53 -0000
+@@ -0,0 +1,1448 @@
++/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP83810 series. */
++/*
++ Written/copyright 1999-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL. License for under other terms may be
++ available. Contact the original author for details.
++
++ The original author may be reached as becker@scyld.com, or at
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/natsemi.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"natsemi.c:v1.17a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/natsemi.html\n";
++/* Updated to recommendations in pci-skeleton v2.11. */
++
++/* Automatically extracted configuration info:
++probe-func: natsemi_probe
++config-in: tristate 'National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI
++
++c-help-name: National Semiconductor DP8381x series PCI Ethernet support
++c-help-symbol: CONFIG_NATSEMI
++c-help: This driver is for the National Semiconductor DP83810 series,
++c-help: including the 83815 chip.
++c-help: Usage information and updates are available from
++c-help: http://www.scyld.com/network/natsemi.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ This chip uses a 512 element hash table based on the Ethernet CRC.
++ Some chip versions are reported to have unreliable multicast filter
++ circuitry. To work around an observed problem set this value to '0',
++ which will immediately switch to Rx-all-multicast.
++*/
++static int multicast_filter_limit = 100;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature.
++ This chip can only receive into aligned buffers, so architectures such
++ as the Alpha AXP might benefit from a copy-align.
++*/
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability, however setting full_duplex[] is deprecated.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ Understand the implications before changing these settings!
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ Too-large receive rings waste memory and confound network buffer limits. */
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
++#define RX_RING_SIZE 32
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung.
++ Re-autonegotiation may take up to 3 seconds.
++ */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("National Semiconductor DP83810 series PCI Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to force full duplex, non-negotiated link "
++ "(deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
++It also works with other chips in in the DP83810 series.
++The most common board is the Netgear FA311 using the 83815.
++
++II. Board-specific settings
++
++This driver requires the PCI interrupt line to be valid.
++It honors the EEPROM-set values.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++The NatSemi design uses a 'next descriptor' pointer that the driver forms
++into a list, thus rings can be arbitrarily sized. Before changing the
++ring sizes you should understand the flow and cache effects of the
++full/available/empty hysteresis.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that unaligned buffers are not permitted
++by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
++longword aligned for further processing. On copies frames are put into the
++skbuff at an offset of "+2", 16-byte aligning the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IV. Notes
++
++The older dp83810 chips are so uncommon that support is not relevant.
++No NatSemi datasheet was publically available at the initial release date,
++but the dp83815 has now been published.
++
++IVb. References
++
++http://www.scyld.com/expert/100mbps.html
++http://www.scyld.com/expert/NWay.html
++
++
++IVc. Errata
++
++Qustionable multicast filter implementation.
++The EEPROM format is obviously the result of a chip bug.
++*/
++
++
++
++static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int power_event(void *dev_instance, int event);
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Netgear FA311 (NatSemi DP83815)",
++ { 0x0020100B, 0xffffffff, 0xf3111385, 0xffffffff, },
++ PCI_IOTYPE, 256, 0},
++ {"NatSemi DP83815", { 0x0020100B, 0xffffffff },
++ PCI_IOTYPE, 256, 0},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info natsemi_drv_id = {
++ "natsemi", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ natsemi_probe1, power_event };
++
++/* Offsets to the device registers.
++ Unlike software-only systems, device drivers interact with complex hardware.
++ It's not useful to define symbolic names for every register bit in the
++ device. Please do not change these names without good reason.
++*/
++enum register_offsets {
++ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
++ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18,
++ TxRingPtr=0x20, TxConfig=0x24,
++ RxRingPtr=0x30, RxConfig=0x34, ClkRunCtrl=0x3C,
++ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
++ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
++ StatsCtrl=0x5C, StatsData=0x60,
++ RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
++ NS_Xcvr_Mgmt = 0x80, NS_MII_BMCR=0x80, NS_MII_BMSR=0x84,
++ NS_MII_Advert=0x90, NS_MIILinkPartner=0x94,
++};
++
++/* Bits in ChipCmd. */
++enum ChipCmdBits {
++ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
++ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
++};
++
++/* Bits in ChipConfig. */
++enum ChipConfigBits {
++ CfgLinkGood=0x80000000, CfgFDX=0x20000000,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
++ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
++ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
++ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
++ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
++ RxStatusOverrun=0x10000,
++ RxResetDone=0x1000000, TxResetDone=0x2000000,
++ IntrPCIErr=0x00f00000,
++ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
++};
++
++/* Bits in the RxMode register. */
++enum rx_mode_bits {
++ AcceptErr=0x20, AcceptRunt=0x10,
++ AcceptBroadcast=0xC0000000,
++ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
++ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
++};
++
++/* The Rx and Tx buffer descriptors. */
++/* Note that using only 32 bit fields simplifies conversion to big-endian
++ architectures. */
++struct netdev_desc {
++ u32 next_desc;
++ s32 cmd_status;
++ u32 buf_addr;
++ u32 software_use;
++};
++
++/* Bits in network_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
++ DescNoCRC=0x10000000,
++ DescPktOK=0x08000000, RxTooLong=0x00400000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct netdev_desc rx_ring[RX_RING_SIZE];
++ struct netdev_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ long in_interrupt; /* Word-long for SMP locks. */
++ int max_interrupt_work;
++ int intr_enable;
++ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
++ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
++
++ struct netdev_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* Rx filter. */
++ u32 cur_rx_mode;
++ u16 rx_filter[32];
++ int multicast_filter_limit;
++ /* FIFO and PCI burst thresholds. */
++ int tx_config, rx_config;
++ /* MII transceiver section. */
++ u16 advertising; /* NWay media advertisement */
++};
++
++static int eeprom_read(long ioaddr, int location);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location,
++ int value);
++static int netdev_open(struct net_device *dev);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static int rx_ring_fill(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++int natsemi_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&natsemi_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *natsemi_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++ int prev_eedata;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ /* Perhaps NETIF_MSG_PROBE */
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ /* Work around the dropped serial bit. */
++ prev_eedata = eeprom_read(ioaddr, 6);
++ for (i = 0; i < 3; i++) {
++ int eedata = eeprom_read(ioaddr, i + 7);
++ dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
++ dev->dev_addr[i*2+1] = eedata >> 7;
++ prev_eedata = eedata;
++ }
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(ChipReset, ioaddr + ChipCmd);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* 0x10/0x20/0x100/0x200 set forced speed&duplex modes. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ writew(((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0), /* Full duplex? */
++ ioaddr + NS_MII_BMCR);
++ }
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++
++ /* Override the PME enable from the EEPROM. */
++ writel(0x8000, ioaddr + ClkRunCtrl);
++
++ if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000) {
++ u32 chip_config = readl(ioaddr + ChipConfig);
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
++ "10%s %s duplex.\n",
++ dev->name, chip_config & 0x2000 ? "enabled, advertise"
++ : "disabled, force", chip_config & 0x4000 ? "0" : "",
++ chip_config & 0x8000 ? "full" : "half");
++ }
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Transceiver status 0x%4.4x partner %4.4x.\n",
++ dev->name, (int)readl(ioaddr + NS_MII_BMSR),
++ (int)readl(ioaddr + NS_MIILinkPartner));
++
++ return dev;
++}
++
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
++ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
++ Update to the code in other drivers for 8/10 bit addresses.
++*/
++
++/* Delay between EEPROM clock transitions.
++ This "delay" forces out buffered PCI writes, which is sufficient to meet
++ the timing requirements of most EEPROMs.
++*/
++#define eeprom_delay(ee_addr) readl(ee_addr)
++
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
++};
++#define EE_Write0 (EE_ChipSelect)
++#define EE_Write1 (EE_ChipSelect | EE_DataIn)
++
++/* The EEPROM commands include the preamble. */
++enum EEPROM_Cmds {
++ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
++};
++
++static int eeprom_read(long addr, int location)
++{
++ int i;
++ int retval = 0;
++ long ee_addr = addr + EECtrl;
++ int read_cmd = location | EE_ReadCmd;
++ writel(EE_Write0, ee_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 10; i >= 0; i--) {
++ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
++ writel(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ writel(dataval | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++
++ for (i = 0; i < 16; i++) {
++ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(EE_Write0, ee_addr);
++ writel(0, ee_addr);
++ return retval;
++}
++
++/* MII transceiver control section.
++ The 83815 series has an internal, directly accessable transceiver.
++ We present the management registers as if they were MII connected. */
++
++static int mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ if (phy_id == 1 && location < 32)
++ return readw(dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
++ else
++ return 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id, int location,
++ int value)
++{
++ if (phy_id == 1 && location < 32)
++ writew(value, dev->base_addr + NS_Xcvr_Mgmt + (location<<2));
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ /* We do not need to reset the '815 chip. */
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ for (i = 0; i < 6; i += 2) {
++ writel(i, ioaddr + RxFilterAddr);
++ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
++ ioaddr + RxFilterData);
++ }
++
++ /* Initialize other registers. */
++ /* See the datasheet for this correction. */
++ if (readl(ioaddr + ChipRevReg) == 0x0203) {
++ writew(0x0001, ioaddr + 0xCC);
++ writew(0x18C9, ioaddr + 0xE4);
++ writew(0x0000, ioaddr + 0xFC);
++ writew(0x5040, ioaddr + 0xF4);
++ writew(0x008C, ioaddr + 0xF8);
++ }
++
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++ /* Configure for standard, in-spec Ethernet. */
++
++ if (readl(ioaddr + ChipConfig) & CfgFDX) { /* Full duplex */
++ np->tx_config = 0xD0801002;
++ np->rx_config = 0x10000020;
++ } else {
++ np->tx_config = 0x10801002;
++ np->rx_config = 0x0020;
++ }
++ if (dev->mtu > 1500)
++ np->rx_config |= 0x08000000;
++ writel(np->tx_config, ioaddr + TxConfig);
++ writel(np->rx_config, ioaddr + RxConfig);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->in_interrupt = 0;
++
++ check_duplex(dev);
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
++ writel(np->intr_enable, ioaddr + IntrMask);
++ writel(1, ioaddr + IntrEnable);
++
++ writel(RxOn | TxOn, ioaddr + ChipCmd);
++ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
++ dev->name, (int)readl(ioaddr + ChipCmd));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int duplex;
++
++ if (np->duplex_lock)
++ return;
++ duplex = readl(ioaddr + ChipConfig) & 0x20000000 ? 1 : 0;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
++ " capability.\n", dev->name,
++ duplex ? "full" : "half");
++ if (duplex) {
++ np->rx_config |= 0x10000000;
++ np->tx_config |= 0xC0000000;
++ } else {
++ np->rx_config &= ~0x10000000;
++ np->tx_config &= ~0xC0000000;
++ }
++ writel(np->tx_config, ioaddr + TxConfig);
++ writel(np->rx_config, ioaddr + RxConfig);
++ }
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++ if (np->rx_q_empty) {
++ /* Trigger an interrupt to refill. */
++ writel(SoftIntr, ioaddr + ChipCmd);
++ }
++ /* This will either have a small false-trigger window or will not catch
++ tbusy incorrectly set when the queue is empty. */
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
++
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %4.4x", np->tx_ring[i].cmd_status);
++ printk("\n");
++ }
++
++ /* Reinitialize the hardware here. */
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++/* Refill the Rx ring buffers, returning non-zero if not full. */
++static int rx_ring_fill(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned int entry;
++
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ return 1; /* Better luck next time. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
++ }
++ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
++ }
++ return 0;
++}
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
++ /* I know you _want_ to change this without understanding it. Don't. */
++ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
++ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
++ np->tx_ring[i].cmd_status = 0;
++ }
++ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
++
++ /* Fill in the Rx buffers.
++ Allocation failure just leaves a "negative" np->dirty_rx. */
++ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
++ rx_ring_fill(dev);
++
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned int entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx.
++ No spinlock is needed for either Tx or Rx.
++ */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
++ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
++ np->cur_tx++;
++
++ /* For some architectures explicitly flushing np->tx_ring,sizeof(tx_ring)
++ and skb->data,skb->len improves performance. */
++
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 4) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++ /* Wake the potentially-idle transmit channel. */
++ writel(TxOn, dev->base_addr + ChipCmd);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
++ dev->name, np->cur_tx, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
++ "device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ if (intr_status == 0 || intr_status == 0xffffffff)
++ break;
++
++ /* Acknowledge all of the current interrupt sources ASAP.
++ Nominally the read above accomplishes this, but... */
++ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
++ dev->name, intr_status);
++
++ if (intr_status & (IntrRxDone | IntrRxIntr)) {
++ netdev_rx(dev);
++ np->rx_q_empty = rx_ring_fill(dev);
++ }
++
++ if (intr_status & (IntrRxIdle | IntrDrv)) {
++ unsigned int old_dirty_rx = np->dirty_rx;
++ if (rx_ring_fill(dev) == 0)
++ np->rx_q_empty = 0;
++ /* Restart Rx engine iff we did add a buffer. */
++ if (np->dirty_rx != old_dirty_rx)
++ writel(RxOn, dev->base_addr + ChipCmd);
++ }
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
++ if (tx_status & DescOwn)
++ break;
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ if (tx_status & 0x08000000) {
++ np->stats.tx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++ } else { /* Various Tx errors */
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
++ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
++ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
++ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
++ np->stats.tx_errors++;
++ }
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ /* Note the 4 slot hysteresis to mark the queue non-full. */
++ if (np->tx_full
++ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & IntrAbnormalSummary)
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ np->restore_intr_enable = 1;
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
++
++ /* If the driver owns the next entry it's a new packet. Send it up. */
++ while (desc_status < 0) { /* e.g. & DescOwn */
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
++ entry, desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
++ if (desc_status & DescMore) {
++ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
++ "multiple buffers, entry %#x status %x.\n",
++ dev->name, np->cur_rx, desc_status);
++ np->stats.rx_length_errors++;
++ } else {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ desc_status);
++ np->stats.rx_errors++;
++ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
++ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
++ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
++ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
++ }
++ } else {
++ struct sk_buff *skb;
++ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if defined(HAS_IP_COPYSUM) || (LINUX_VERSION_CODE >= 0x20100)
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++ }
++ skb->protocol = eth_type_trans(skb, dev);
++ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
++ }
++
++ /* Refill is now done in the main interrupt loop. */
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (intr_status & LinkChange) {
++ int chip_config = readl(ioaddr + ChipConfig);
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ (int)readw(ioaddr + NS_MII_Advert),
++ (int)readw(ioaddr + NS_MIILinkPartner));
++ if (chip_config & CfgLinkGood)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if (intr_status & IntrTxUnderrun) {
++ /* Increase the Tx threshold, 32 byte units. */
++ if ((np->tx_config & 0x3f) < 62)
++ np->tx_config += 2; /* +64 bytes */
++ writel(np->tx_config, ioaddr + TxConfig);
++ }
++ if (intr_status & WOLPkt) {
++ int wol_status = readl(ioaddr + WOLCmd);
++ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
++ dev->name, wol_status);
++ }
++ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
++ if (np->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: Rx overflow! ns815 %8.8x.\n",
++ dev->name, intr_status);
++ np->stats.rx_fifo_errors++;
++ }
++ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
++ RxStatusOverrun|0xA7ff)) {
++ if (np->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: Something Wicked happened! natsemi %8.8x.\n",
++ dev->name, intr_status);
++ }
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrPCIErr) {
++ np->stats.tx_fifo_errors++;
++ np->stats.rx_fifo_errors++;
++ }
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int crc_errs = readl(ioaddr + RxCRCErrs);
++
++ if (crc_errs != 0xffffffff) {
++ /* We need not lock this segment of code for SMP.
++ There is no atomic-add vulnerability for most CPUs,
++ and statistics are non-critical. */
++ /* The chip only need report frame silently dropped. */
++ np->stats.rx_crc_errors += crc_errs;
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
++ }
++
++ return &np->stats;
++}
++
++/* The big-endian AUTODIN II ethernet CRC calculations.
++ See ns820.c for how to fill the table on new chips.
++ */
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static inline u32 ether_crc(int length, unsigned char *data)
++{
++ int crc = -1;
++
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
++ crc = (crc << 1) ^
++ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u8 mc_filter[64]; /* Multicast hash filter */
++ u32 rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
++ | AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ int filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr);
++ set_bit(filterbit & 0x1ff, mc_filter);
++ if (np->msg_level & NETIF_MSG_RXFILTER)
++ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
++ "%2.2x:%2.2x:%2.2x crc %8.8x bit %d.\n", dev->name,
++ mclist->dmi_addr[0], mclist->dmi_addr[1],
++ mclist->dmi_addr[2], mclist->dmi_addr[3],
++ mclist->dmi_addr[4], mclist->dmi_addr[5],
++ filterbit, filterbit & 0x1ff);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ for (i = 0; i < 64; i += 2) {
++ u16 filterword = (mc_filter[i+1]<<8) + mc_filter[i];
++ if (filterword != np->rx_filter[i>>2]) {
++ writel(0x200 + i, ioaddr + RxFilterAddr);
++ writel(filterword, ioaddr + RxFilterData);
++ np->rx_filter[i>>2] = filterword;
++ }
++ }
++ }
++ writel(rx_mode, ioaddr + RxFilterAddr);
++ np->cur_rx_mode = rx_mode;
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = 1;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == 1) {
++ u16 miireg = data[1] & 0x1f;
++ u16 value = data[2];
++ mdio_write(dev, 1, miireg, value);
++ switch (miireg) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->duplex_lock = (value & 0x9000) ? 0 : 1;
++ if (np->duplex_lock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ }
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
++ "Int %2.2x.\n",
++ dev->name, (int)readl(ioaddr + ChipCmd),
++ (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* We don't want the timer to re-start anything. */
++ del_timer(&np->timer);
++
++ /* Disable interrupts using the mask. */
++ writel(0, ioaddr + IntrMask);
++ writel(0, ioaddr + IntrEnable);
++ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(RxOff | TxOff, ioaddr + ChipCmd);
++
++ get_stats(dev);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" #%d desc. %8.8x %8.8x.\n",
++ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
++ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].cmd_status = 0;
++ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++#if 0
++ writel(0x0200, ioaddr + ChipConfig); /* Power down Xcvr. */
++#endif
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int power_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, freeze stats, stop Tx and Rx. */
++ writel(0, ioaddr + IntrEnable);
++ writel(2, ioaddr + StatsCtrl);
++ writel(RxOff | TxOff, ioaddr + ChipCmd);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the open() actions should be repeated. */
++ set_rx_mode(dev);
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ writel(1, ioaddr + IntrEnable);
++ writel(RxOn | TxOn, ioaddr + ChipCmd);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++#ifdef CARDBUS
++ register_driver(&etherdev_ops);
++ return 0;
++#else
++ return pci_drv_register(&natsemi_drv_id, NULL);
++#endif
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++#ifdef CARDBUS
++ unregister_driver(&etherdev_ops);
++#else
++ pci_drv_unregister(&natsemi_drv_id);
++#endif
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++ iounmap((char *)root_net_dev->base_addr);
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` natsemi.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c natsemi.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c natsemi.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/ne2k-pci.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/ne2k-pci.c,v
+retrieving revision 1.1
+diff -u -r1.1 ne2k-pci.c
+--- linux/src/drivers/net/ne2k-pci.c 26 Apr 1999 05:52:27 -0000 1.1
++++ linux/src/drivers/net/ne2k-pci.c 20 Aug 2004 10:32:53 -0000
+@@ -2,39 +2,81 @@
+ /*
+ A Linux device driver for PCI NE2000 clones.
+
+- Authorship and other copyrights:
+- 1992-1998 by Donald Becker, NE2000 core and various modifications.
++ Authors and other copyright holders:
++ 1992-2002 by Donald Becker, NE2000 core and various modifications.
+ 1995-1998 by Paul Gortmaker, core modifications and PCI support.
+-
+ Copyright 1993 assigned to the United States Government as represented
+ by the Director, National Security Agency.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
+-
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+-
+- People are making PCI ne2000 clones! Oh the horror, the horror...
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
+
+ Issues remaining:
+- No full-duplex support.
++ People are making PCI ne2000 clones! Oh the horror, the horror...
++ Limited full-duplex support.
+ */
+
+-/* Our copyright info must remain in the binary. */
+-static const char *version =
+-"ne2k-pci.c:v0.99L 2/7/98 D. Becker/P. Gortmaker http://cesdis.gsfc.nasa.gov/linux/drivers/ne2k-pci.html\n";
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"ne2k-pci.c:v1.05 6/13/2002 D. Becker/P. Gortmaker\n";
++static const char version2[] =
++" http://www.scyld.com/network/ne2k-pci.html\n";
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
++
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++/* Used to pass the full-duplex flag, etc. */
++static int full_duplex[MAX_UNITS] = {0, };
++static int options[MAX_UNITS] = {0, };
+
+-#ifdef MODVERSIONS
+-#include <linux/modversions.h>
++/* Force a non std. amount of memory. Units are 256 byte pages. */
++/* #define PACKETBUF_MEMSIZE 0x40 */
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
+ #endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
+ #include <linux/module.h>
++#if LINUX_VERSION_CODE < 0x20300 && defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/errno.h>
+ #include <linux/pci.h>
+-#include <linux/bios32.h>
++#if LINUX_VERSION_CODE < 0x20200
++#define lock_8390_module()
++#define unlock_8390_module()
++#else
++#include <linux/init.h>
++#endif
++
+ #include <asm/system.h>
+ #include <asm/io.h>
+ #include <asm/irq.h>
+@@ -43,8 +85,18 @@
+ #include <linux/etherdevice.h>
+ #include "8390.h"
+
+-/* Set statically or when loading the driver module. */
+-static int debug = 1;
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++MODULE_AUTHOR("Donald Becker / Paul Gortmaker");
++MODULE_DESCRIPTION("PCI NE2000 clone driver");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+
+ /* Some defines that people can play with if so inclined. */
+
+@@ -52,27 +104,52 @@
+ #ifdef LOAD_8390_BY_KERNELD
+ #include <linux/kerneld.h>
+ #endif
+-/* Use 32 bit data-movement operations instead of 16 bit. */
+-#define USE_LONGIO
+
+-/* Do we implement the read before write bugfix ? */
+-/* #define NE_RW_BUGFIX */
+-
+-/* Do we have a non std. amount of memory? (in units of 256 byte pages) */
+-/* #define PACKETBUF_MEMSIZE 0x40 */
++static void *ne2k_pci_probe1(struct pci_dev *pdev, void *dev,
++ long ioaddr, int irq, int chip_idx, int fnd_cnt);
++/* Flags. We rename an existing ei_status field to store flags! */
++/* Thus only the low 8 bits are usable for non-init-time flags. */
++#define ne2k_flags reg0
++enum {
++ ONLY_16BIT_IO=8, ONLY_32BIT_IO=4, /* Chip can do only 16/32-bit xfers. */
++ FORCE_FDX=0x20, /* User override. */
++ REALTEK_FDX=0x40, HOLTEK_FDX=0x80,
++ STOP_PG_0x60=0x100,
++};
++#define NE_IO_EXTENT 0x20
++#ifndef USE_MEMORY_OPS
++#define PCI_IOTYPE (PCI_USES_IO | PCI_ADDR0)
++#else
++#warning When using PCI memory mode the 8390 core must be compiled for memory
++#warning operations as well.
++#warning Not all PCI NE2000 clones support memory mode access.
++#define PCI_IOTYPE (PCI_USES_MEM | PCI_ADDR1)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"RealTek RTL-8029",{ 0x802910ec, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT,
++ REALTEK_FDX },
++ {"Winbond 89C940", { 0x09401050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {"Winbond w89c940", { 0x5a5a1050, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {"KTI ET32P2", { 0x30008e2e, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {"NetVin NV5000SC", { 0x50004a14, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {"Via 86C926", { 0x09261106, 0xffffffff},
++ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO},
++ {"SureCom NE34", { 0x0e3410bd, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {"Holtek HT80232", { 0x005812c3, 0xffffffff},
++ PCI_IOTYPE, NE_IO_EXTENT, ONLY_16BIT_IO | HOLTEK_FDX},
++ {"Holtek HT80229", { 0x559812c3, 0xffffffff},
++ PCI_IOTYPE, NE_IO_EXTENT, ONLY_32BIT_IO | HOLTEK_FDX | STOP_PG_0x60},
++ {"Compex RL2000",
++ { 0x140111f6, 0xffffffff}, PCI_IOTYPE, NE_IO_EXTENT, 0},
++ /* A mutant board: Winbond chip with a RTL format EEPROM. */
++ {"Winbond w89c940 (misprogrammed type 0x1980)", { 0x19808c4a, 0xffffffff},
++ PCI_IOTYPE, NE_IO_EXTENT, 0},
++ {0,}, /* 0 terminated list. */
++};
+
+-static struct {
+- unsigned short vendor, dev_id;
+- char *name;
+-}
+-pci_clone_list[] = {
+- {0x10ec, 0x8029, "RealTek RTL-8029"},
+- {0x1050, 0x0940, "Winbond 89C940"},
+- {0x11f6, 0x1401, "Compex RL2000"},
+- {0x8e2e, 0x3000, "KTI ET32P2"},
+- {0x4a14, 0x5000, "NetVin NV5000SC"},
+- {0x1106, 0x0926, "Via 82C926"},
+- {0,}
++struct drv_id_info ne2k_pci_drv_id = {
++ "ne2k-pci", 0, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl, ne2k_pci_probe1,
+ };
+
+ /* ---- No user-serviceable parts below ---- */
+@@ -81,41 +158,40 @@
+ #define NE_CMD 0x00
+ #define NE_DATAPORT 0x10 /* NatSemi-defined port window offset. */
+ #define NE_RESET 0x1f /* Issue a read to reset, a write to clear. */
+-#define NE_IO_EXTENT 0x20
+
+ #define NESM_START_PG 0x40 /* First page of TX buffer */
+ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
+
+-int ne2k_pci_probe(struct device *dev);
+-static struct device *ne2k_pci_probe1(struct device *dev, int ioaddr, int irq);
++int ne2k_pci_probe(struct net_device *dev);
+
+-static int ne_open(struct device *dev);
+-static int ne_close(struct device *dev);
++static int ne2k_pci_open(struct net_device *dev);
++static int ne2k_pci_close(struct net_device *dev);
+
+-static void ne_reset_8390(struct device *dev);
+-static void ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr,
++static void ne2k_pci_reset_8390(struct net_device *dev);
++static void ne2k_pci_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
+ int ring_page);
+-static void ne_block_input(struct device *dev, int count,
++static void ne2k_pci_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+-static void ne_block_output(struct device *dev, const int count,
++static void ne2k_pci_block_output(struct net_device *dev, const int count,
+ const unsigned char *buf, const int start_page);
+
+
+
+-/* No room in the standard 8390 structure for extra info we need. */
++/* There is no room in the standard 8390 structure for extra info we need,
++ so we build a meta/outer-wrapper structure.. */
+ struct ne2k_pci_card {
+ struct ne2k_pci_card *next;
+- struct device *dev;
+- unsigned char pci_bus, pci_device_fn;
++ struct net_device *dev;
++ struct pci_dev *pci_dev;
+ };
+ /* A list of all installed devices, for removing the driver module. */
+ static struct ne2k_pci_card *ne2k_card_list = NULL;
+
+ #ifdef LOAD_8390_BY_KERNELD
+-static int (*Lethdev_init)(struct device *dev);
+-static void (*LNS8390_init)(struct device *dev, int startp);
+-static int (*Lei_open)(struct device *dev);
+-static int (*Lei_close)(struct device *dev);
++static int (*Lethdev_init)(struct net_device *dev);
++static void (*LNS8390_init)(struct net_device *dev, int startp);
++static int (*Lei_open)(struct net_device *dev);
++static int (*Lei_close)(struct net_device *dev);
+ static void (*Lei_interrupt)(int irq, void *dev_id, struct pt_regs *regs);
+ #else
+ #define Lethdev_init ethdev_init
+@@ -126,23 +202,28 @@
+ #endif
+
+ #ifdef MODULE
+-
+-int
+-init_module(void)
++int init_module(void)
+ {
+- /* We must emit version information. */
+- if (debug)
+- printk(KERN_INFO "%s", version);
++ int found_cnt;
+
+- return ne2k_pci_probe(0);
++ if (debug) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
++ if (found_cnt < 0) {
++ printk(KERN_NOTICE "ne2k-pci.c: No useable cards found, driver NOT installed.\n");
++ return -ENODEV;
++ }
++ lock_8390_module();
++ return 0;
+ }
+
+-void
+-cleanup_module(void)
++void cleanup_module(void)
+ {
+- struct device *dev;
++ struct net_device *dev;
+ struct ne2k_pci_card *this_card;
+
++ pci_drv_unregister(&ne2k_pci_drv_id);
++
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (ne2k_card_list) {
+ dev = ne2k_card_list->dev;
+@@ -156,131 +237,32 @@
+
+ #ifdef LOAD_8390_BY_KERNELD
+ release_module("8390", 0);
++#else
++ unlock_8390_module();
+ #endif
+ }
+
+-#endif /* MODULE */
+-
+-/*
+- NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
+- buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
+- 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
+- detected by their SA prefix.
+-
+- Reading the SAPROM from a word-wide card with the 8390 set in byte-wide
+- mode results in doubled values, which can be detected and compensated for.
+-
+- The probe is also responsible for initializing the card and filling
+- in the 'dev' and 'ei_status' structures.
+-*/
+-
+-#ifdef HAVE_DEVLIST
+-struct netdev_entry netcard_drv =
+-{"ne2k_pci", ne2k_pci_probe1, NE_IO_EXTENT, 0};
+-#endif
++#else
+
+-int ne2k_pci_probe(struct device *dev)
++int ne2k_pci_probe(struct net_device *dev)
+ {
+- static int pci_index = 0; /* Static, for multiple calls. */
+- int cards_found = 0;
+- int i;
+-
+- if ( ! pcibios_present())
+- return -ENODEV;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- unsigned char pci_bus, pci_device_fn;
+- u8 pci_irq_line;
+- u16 pci_command, new_command, vendor, device;
+- u32 pci_ioaddr;
+-
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+-
+- /* Note: some vendor IDs (RealTek) have non-NE2k cards as well. */
+- for (i = 0; pci_clone_list[i].vendor != 0; i++)
+- if (pci_clone_list[i].vendor == vendor
+- && pci_clone_list[i].dev_id == device)
+- break;
+- if (pci_clone_list[i].vendor == 0)
+- continue;
+-
+-#ifndef MODULE
+- {
+- static unsigned version_printed = 0;
+- if (version_printed++ == 0)
+- printk(KERN_INFO "%s", version);
+- }
+-#endif
+-
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+-
+- /* Remove I/O space marker in bit 0. */
+- pci_ioaddr &= PCI_BASE_ADDRESS_IO_MASK;
+-
+- /* Avoid already found cards from previous calls */
+- if (check_region(pci_ioaddr, NE_IO_EXTENT))
+- continue;
+-
+- /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+- new_command = pci_command | PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled this"
+- " NE2k clone! Updating PCI command %4.4x->%4.4x.\n",
+- pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+- if (pci_irq_line <= 0 || pci_irq_line >= NR_IRQS)
+- printk(KERN_WARNING " WARNING: The PCI BIOS assigned this PCI NE2k"
+- " card to IRQ %d, which is unlikely to work!.\n"
+- KERN_WARNING " You should use the PCI BIOS setup to assign"
+- " a valid IRQ line.\n", pci_irq_line);
+-
+- printk("ne2k-pci.c: PCI NE2000 clone '%s' at I/O %#x, IRQ %d.\n",
+- pci_clone_list[i].name, pci_ioaddr, pci_irq_line);
+- dev = ne2k_pci_probe1(dev, pci_ioaddr, pci_irq_line);
+- if (dev == 0) {
+- /* Should not happen. */
+- printk(KERN_ERR "ne2k-pci: Probe of PCI card at %#x failed.\n",
+- pci_ioaddr);
+- continue;
+- } else {
+- struct ne2k_pci_card *ne2k_card =
+- kmalloc(sizeof(struct ne2k_pci_card), GFP_KERNEL);
+- ne2k_card->next = ne2k_card_list;
+- ne2k_card_list = ne2k_card;
+- ne2k_card->dev = dev;
+- ne2k_card->pci_bus = pci_bus;
+- ne2k_card->pci_device_fn = pci_device_fn;
+- }
+- dev = 0;
+-
+- cards_found++;
+- }
+-
+- return cards_found ? 0 : -ENODEV;
++ int found_cnt = pci_drv_register(&ne2k_pci_drv_id, NULL);
++ if (found_cnt >= 0 && debug)
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return found_cnt;
+ }
++#endif /* MODULE */
+
+-static struct device *ne2k_pci_probe1(struct device *dev, int ioaddr, int irq)
++static void *ne2k_pci_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int fnd_cnt)
+ {
++ struct net_device *dev;
+ int i;
+ unsigned char SA_prom[32];
+- const char *name = NULL;
+ int start_page, stop_page;
+ int reg0 = inb(ioaddr);
++ int flags = pci_id_tbl[chip_idx].drv_flags;
++ struct ne2k_pci_card *ne2k_card;
+
+ if (reg0 == 0xFF)
+ return 0;
+@@ -300,7 +282,18 @@
+ }
+ }
+
+- dev = init_etherdev(dev, 0);
++ dev = init_etherdev(init_dev, 0);
++
++ if (dev == NULL)
++ return 0;
++ ne2k_card = kmalloc(sizeof(struct ne2k_pci_card), GFP_KERNEL);
++ if (ne2k_card == NULL)
++ return 0;
++
++ ne2k_card->next = ne2k_card_list;
++ ne2k_card_list = ne2k_card;
++ ne2k_card->dev = dev;
++ ne2k_card->pci_dev = pdev;
+
+ /* Reset card. Who knows what dain-bramaged state it was left in. */
+ {
+@@ -363,34 +356,23 @@
+
+ }
+
+-#ifdef notdef
+- /* Some broken PCI cards don't respect the byte-wide
+- request in program_seq above, and hence don't have doubled up values.
+- */
+- for(i = 0; i < 32 /*sizeof(SA_prom)*/; i+=2) {
+- SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+- SA_prom[i+1] = inb(ioaddr + NE_DATAPORT);
+- if (SA_prom[i] != SA_prom[i+1])
+- sa_prom_doubled = 0;
+- }
++ /* Note: all PCI cards have at least 16 bit access, so we don't have
++ to check for 8 bit cards. Most cards permit 32 bit access. */
+
+- if (sa_prom_doubled)
+- for (i = 0; i < 16; i++)
+- SA_prom[i] = SA_prom[i+i];
+-#else
+- for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
+- SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+-
+-#endif
++ if (flags & ONLY_32BIT_IO) {
++ for (i = 0; i < 8; i++)
++ ((u32 *)SA_prom)[i] = le32_to_cpu(inl(ioaddr + NE_DATAPORT));
++ } else
++ for(i = 0; i < 32 /*sizeof(SA_prom)*/; i++)
++ SA_prom[i] = inb(ioaddr + NE_DATAPORT);
+
+ /* We always set the 8390 registers for word mode. */
+ outb(0x49, ioaddr + EN0_DCFG);
+ start_page = NESM_START_PG;
+- stop_page = NESM_STOP_PG;
+
+- /* Set up the rest of the parameters. */
+- name = "PCI NE2000";
++ stop_page = flags & STOP_PG_0x60 ? 0x60 : NESM_STOP_PG;
+
++ /* Set up the rest of the parameters. */
+ dev->irq = irq;
+ dev->base_addr = ioaddr;
+
+@@ -402,17 +384,24 @@
+
+ request_region(ioaddr, NE_IO_EXTENT, dev->name);
+
+- printk("%s: %s found at %#x, IRQ %d, ",
+- dev->name, name, ioaddr, dev->irq);
++ printk("%s: %s found at %#lx, IRQ %d, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->irq);
+ for(i = 0; i < 6; i++) {
+ printk("%2.2X%s", SA_prom[i], i == 5 ? ".\n": ":");
+ dev->dev_addr[i] = SA_prom[i];
+ }
+
+- ei_status.name = name;
++ ei_status.name = pci_id_tbl[chip_idx].name;
+ ei_status.tx_start_page = start_page;
+ ei_status.stop_page = stop_page;
+ ei_status.word16 = 1;
++ ei_status.ne2k_flags = flags;
++ if (fnd_cnt < MAX_UNITS) {
++ if (full_duplex[fnd_cnt] > 0 || (options[fnd_cnt] & FORCE_FDX)) {
++ printk("%s: Full duplex set by user option.\n", dev->name);
++ ei_status.ne2k_flags |= FORCE_FDX;
++ }
++ }
+
+ ei_status.rx_start_page = start_page + TX_PAGES;
+ #ifdef PACKETBUF_MEMSIZE
+@@ -420,28 +409,37 @@
+ ei_status.stop_page = ei_status.tx_start_page + PACKETBUF_MEMSIZE;
+ #endif
+
+- ei_status.reset_8390 = &ne_reset_8390;
+- ei_status.block_input = &ne_block_input;
+- ei_status.block_output = &ne_block_output;
+- ei_status.get_8390_hdr = &ne_get_8390_hdr;
+- dev->open = &ne_open;
+- dev->stop = &ne_close;
++ ei_status.reset_8390 = &ne2k_pci_reset_8390;
++ ei_status.block_input = &ne2k_pci_block_input;
++ ei_status.block_output = &ne2k_pci_block_output;
++ ei_status.get_8390_hdr = &ne2k_pci_get_8390_hdr;
++ dev->open = &ne2k_pci_open;
++ dev->stop = &ne2k_pci_close;
+ LNS8390_init(dev, 0);
+ return dev;
+ }
+
+-static int
+-ne_open(struct device *dev)
++static int ne2k_pci_open(struct net_device *dev)
+ {
+- if (request_irq(dev->irq, Lei_interrupt, SA_SHIRQ, dev->name, dev))
++ MOD_INC_USE_COUNT;
++ if (request_irq(dev->irq, Lei_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
++ }
++ /* Set full duplex for the chips that we know about. */
++ if (ei_status.ne2k_flags & FORCE_FDX) {
++ long ioaddr = dev->base_addr;
++ if (ei_status.ne2k_flags & REALTEK_FDX) {
++ outb(0xC0 + E8390_NODMA, ioaddr + NE_CMD); /* Page 3 */
++ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
++ } else if (ei_status.ne2k_flags & HOLTEK_FDX)
++ outb(inb(ioaddr + 0x20) | 0x80, ioaddr + 0x20);
++ }
+ Lei_open(dev);
+- MOD_INC_USE_COUNT;
+ return 0;
+ }
+
+-static int
+-ne_close(struct device *dev)
++static int ne2k_pci_close(struct net_device *dev)
+ {
+ Lei_close(dev);
+ free_irq(dev->irq, dev);
+@@ -451,8 +449,7 @@
+
+ /* Hard reset the card. This used to pause for the same period that a
+ 8390 reset command required, but that shouldn't be necessary. */
+-static void
+-ne_reset_8390(struct device *dev)
++static void ne2k_pci_reset_8390(struct net_device *dev)
+ {
+ unsigned long reset_start_time = jiffies;
+
+@@ -467,7 +464,7 @@
+ /* This check _should_not_ be necessary, omit eventually. */
+ while ((inb(NE_BASE+EN0_ISR) & ENISR_RESET) == 0)
+ if (jiffies - reset_start_time > 2) {
+- printk("%s: ne_reset_8390() did not complete.\n", dev->name);
++ printk("%s: ne2k_pci_reset_8390() did not complete.\n", dev->name);
+ break;
+ }
+ outb(ENISR_RESET, NE_BASE + EN0_ISR); /* Ack intr. */
+@@ -477,18 +474,18 @@
+ we don't need to be concerned with ring wrap as the header will be at
+ the start of a page, so we optimize accordingly. */
+
+-static void
+-ne_get_8390_hdr(struct device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
++static void ne2k_pci_get_8390_hdr(struct net_device *dev,
++ struct e8390_pkt_hdr *hdr, int ring_page)
+ {
+
+- int nic_base = dev->base_addr;
++ long nic_base = dev->base_addr;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+- printk("%s: DMAing conflict in ne_get_8390_hdr "
++ printk("%s: DMAing conflict in ne2k_pci_get_8390_hdr "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+- dev->interrupt);
++ (int)dev->interrupt);
+ return;
+ }
+
+@@ -500,11 +497,12 @@
+ outb(ring_page, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+-#if defined(USE_LONGIO)
+- *(u32*)hdr = inl(NE_BASE + NE_DATAPORT);
+-#else
+- insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
+-#endif
++ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
++ insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1);
++ } else {
++ *(u32*)hdr = le32_to_cpu(inl(NE_BASE + NE_DATAPORT));
++ le16_to_cpus(&hdr->count);
++ }
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+@@ -515,21 +513,23 @@
+ The NEx000 doesn't share the on-board packet memory -- you have to put
+ the packet out through the "remote DMA" dataport using outb. */
+
+-static void
+-ne_block_input(struct device *dev, int count, struct sk_buff *skb, int ring_offset)
++static void ne2k_pci_block_input(struct net_device *dev, int count,
++ struct sk_buff *skb, int ring_offset)
+ {
+- int nic_base = dev->base_addr;
++ long nic_base = dev->base_addr;
+ char *buf = skb->data;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+- printk("%s: DMAing conflict in ne_block_input "
++ printk("%s: DMAing conflict in ne2k_pci_block_input "
+ "[DMAstat:%d][irqlock:%d][intr:%d].\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+- dev->interrupt);
++ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
++ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
++ count = (count + 3) & 0xFFFC;
+ outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base+ NE_CMD);
+ outb(count & 0xff, nic_base + EN0_RCNTLO);
+ outb(count >> 8, nic_base + EN0_RCNTHI);
+@@ -537,44 +537,47 @@
+ outb(ring_offset >> 8, nic_base + EN0_RSARHI);
+ outb(E8390_RREAD+E8390_START, nic_base + NE_CMD);
+
+-#if defined(USE_LONGIO)
+- insl(NE_BASE + NE_DATAPORT, buf, count>>2);
+- if (count & 3) {
+- buf += count & ~3;
+- if (count & 2)
+- *((u16*)buf)++ = inw(NE_BASE + NE_DATAPORT);
+- if (count & 1)
+- *buf = inb(NE_BASE + NE_DATAPORT);
+- }
+-#else
+- insw(NE_BASE + NE_DATAPORT,buf,count>>1);
+- if (count & 0x01) {
+- buf[count-1] = inb(NE_BASE + NE_DATAPORT);
++ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
++ insw(NE_BASE + NE_DATAPORT,buf,count>>1);
++ if (count & 0x01) {
++ buf[count-1] = inb(NE_BASE + NE_DATAPORT);
++ }
++ } else {
++ insl(NE_BASE + NE_DATAPORT, buf, count>>2);
++ if (count & 3) {
++ buf += count & ~3;
++ if (count & 2)
++ *((u16*)buf)++ = le16_to_cpu(inw(NE_BASE + NE_DATAPORT));
++ if (count & 1)
++ *buf = inb(NE_BASE + NE_DATAPORT);
++ }
+ }
+-#endif
+
+ outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */
+ ei_status.dmaing &= ~0x01;
+ }
+
+-static void
+-ne_block_output(struct device *dev, int count,
+- const unsigned char *buf, const int start_page)
++static void ne2k_pci_block_output(struct net_device *dev, int count,
++ const unsigned char *buf,
++ const int start_page)
+ {
+ int nic_base = NE_BASE;
+ unsigned long dma_start;
+
+ /* On little-endian it's always safe to round the count up for
+ word writes. */
+- if (count & 0x01)
+- count++;
++ if (ei_status.ne2k_flags & ONLY_32BIT_IO)
++ count = (count + 3) & 0xFFFC;
++ else
++ if (count & 0x01)
++ count++;
+
+ /* This *shouldn't* happen. If it does, it's the last thing you'll see */
+ if (ei_status.dmaing) {
+- printk("%s: DMAing conflict in ne_block_output."
++ printk("%s: DMAing conflict in ne2k_pci_block_output."
+ "[DMAstat:%d][irqlock:%d][intr:%d]\n",
+ dev->name, ei_status.dmaing, ei_status.irqlock,
+- dev->interrupt);
++ (int)dev->interrupt);
+ return;
+ }
+ ei_status.dmaing |= 0x01;
+@@ -600,23 +603,23 @@
+ outb(0x00, nic_base + EN0_RSARLO);
+ outb(start_page, nic_base + EN0_RSARHI);
+ outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD);
+-#if defined(USE_LONGIO)
+- outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
+- if (count & 3) {
+- buf += count & ~3;
+- if (count & 2)
+- outw(*((u16*)buf)++, NE_BASE + NE_DATAPORT);
++ if (ei_status.ne2k_flags & ONLY_16BIT_IO) {
++ outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
++ } else {
++ outsl(NE_BASE + NE_DATAPORT, buf, count>>2);
++ if (count & 3) {
++ buf += count & ~3;
++ if (count & 2)
++ outw(cpu_to_le16(*((u16*)buf)++), NE_BASE + NE_DATAPORT);
++ }
+ }
+-#else
+- outsw(NE_BASE + NE_DATAPORT, buf, count>>1);
+-#endif
+
+ dma_start = jiffies;
+
+ while ((inb(nic_base + EN0_ISR) & ENISR_RDC) == 0)
+ if (jiffies - dma_start > 2) { /* Avoid clock roll-over. */
+ printk("%s: timeout waiting for Tx RDC.\n", dev->name);
+- ne_reset_8390(dev);
++ ne2k_pci_reset_8390(dev);
+ LNS8390_init(dev,1);
+ break;
+ }
+@@ -629,8 +632,8 @@
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODVERSIONS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/ -c ne2k-pci.c"
+- * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer -I/usr/src/linux/drivers/net/ -c ne2k-pci.c"
++ * compile-command: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ne2k-pci.c -I/usr/src/linux/drivers/net/"
++ * alt-compile-command: "gcc -DMODULE -O6 -c ne2k-pci.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/ns820.c
+===================================================================
+RCS file: linux/src/drivers/net/ns820.c
+diff -N linux/src/drivers/net/ns820.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/ns820.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,1547 @@
++/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
++/*
++ Written/copyright 1999-2003 by Donald Becker.
++ Copyright 2002-2003 by Scyld Computing Corporation.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL. License for under other terms may be
++ available. Contact the original author for details.
++
++ The original author may be reached as becker@scyld.com, or at
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/natsemi.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"ns820.c:v1.03a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/natsemi.html\n";
++/* Updated to recommendations in pci-skeleton v2.13. */
++
++/* Automatically extracted configuration info:
++probe-func: ns820_probe
++config-in: tristate 'National Semiconductor DP8382x series PCI Ethernet support' CONFIG_NATSEMI820
++
++c-help-name: National Semiconductor DP8382x series PCI Ethernet support
++c-help-symbol: CONFIG_NATSEMI820
++c-help: This driver is for the National Semiconductor DP83820 Gigabit Ethernet
++c-help: adapter series.
++c-help: More specific information and updates are available from
++c-help: http://www.scyld.com/network/natsemi.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ This chip uses a 2048 element hash table based on the Ethernet CRC.
++ Previous natsemi chips had unreliable multicast filter circuitry.
++ To work around an observed problem set this value to '0',
++ which will immediately switch to Rx-all-multicast.
++ */
++static int multicast_filter_limit = 100;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature.
++ This chip can only receive into aligned buffers, so architectures such
++ as the Alpha AXP might benefit from a copy-align.
++*/
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability, however setting full_duplex[] is deprecated.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++ Use 0x1000 or 0x2000 for gigabit.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ Understand the implications before changing these settings!
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority, confuses the system network buffer limits,
++ and wastes memory.
++ Too-large receive rings waste memory and confound network buffer limits.
++*/
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
++#define RX_RING_SIZE 64
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung.
++ Re-autonegotiation may take up to 3 seconds.
++ */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("National Semiconductor DP83820 series PCI Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to force full duplex, non-negotiated link "
++ "(deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is designed for National Semiconductor DP83820 10/100/1000
++Ethernet NIC. It is superficially similar to the 810 series "natsemi.c"
++driver, however the register layout, descriptor layout and element
++length of the new chip series is different.
++
++II. Board-specific settings
++
++This driver requires the PCI interrupt line to be configured.
++It honors the EEPROM-set values.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++The NatSemi design uses a 'next descriptor' pointer that the driver forms
++into a list, thus rings can be arbitrarily sized. Before changing the
++ring sizes you should understand the flow and cache effects of the
++full/available/empty hysteresis.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that unaligned buffers are not permitted
++by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
++longword aligned for further processing. On copies frames are put into the
++skbuff at an offset of "+2", 16-byte aligning the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IV. Notes
++
++The NatSemi 820 series PCI gigabit chips are very common on low-cost NICs.
++The '821 appears to be the same as '820 chip, only with pins for the upper
++32 bits marked "N/C".
++
++IVb. References
++
++http://www.scyld.com/expert/100mbps.html
++http://www.scyld.com/expert/NWay.html
++The NatSemi dp83820 datasheet is available: search www.natsemi.com
++
++IVc. Errata
++
++None characterised.
++
++*/
++
++
++
++static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int power_event(void *dev_instance, int event);
++enum chip_capability_flags {FDXActiveLow=1, InvertGbXcvrPwr=2, };
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ { "D-Link DGE-500T (DP83820)",
++ { 0x0022100B, 0xffffffff, 0x49001186, 0xffffffff, },
++ PCI_IOTYPE, 256, FDXActiveLow},
++ {"NatSemi DP83820", { 0x0022100B, 0xffffffff },
++ PCI_IOTYPE, 256, 0},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info natsemi_drv_id = {
++ "ns820", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ ns820_probe1, power_event };
++
++/* Offsets to the device registers.
++ Unlike software-only systems, device drivers interact with complex hardware.
++ It's not useful to define symbolic names for every register bit in the
++ device. Please do not change these names without good reason.
++*/
++enum register_offsets {
++ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
++ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18, IntrHoldoff=0x1C,
++ TxRingPtr=0x20, TxRingPtrHi=0x24, TxConfig=0x28,
++ RxRingPtr=0x30, RxRingPtrHi=0x34, RxConfig=0x38,
++ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
++ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
++ StatsCtrl=0x5C, RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
++};
++
++/* Bits in ChipCmd. */
++enum ChipCmdBits {
++ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
++ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
++};
++
++/* Bits in ChipConfig. */
++enum ChipConfigBits {
++ CfgLinkGood=0x80000000, CfgFDX=0x10000000,
++ CfgXcrReset=0x0400, CfgXcrOff=0x0200,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
++ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
++ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
++ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
++ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
++ RxStatusOverrun=0x10000,
++ RxResetDone=0x00200000, TxResetDone=0x00400000,
++ IntrPCIErr=0x001E0000,
++ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
++};
++
++/* Bits in the RxMode register. */
++enum rx_mode_bits {
++ AcceptErr=0x20, AcceptRunt=0x10,
++ AcceptBroadcast=0xC0000000,
++ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
++ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
++};
++
++/* The Rx and Tx buffer descriptors. */
++/* Note that using only 32 bit fields simplifies conversion to big-endian
++ architectures. */
++struct netdev_desc {
++#if ADDRLEN == 64
++ u64 next_desc;
++ u64 buf_addr;
++#endif
++ u32 next_desc;
++ u32 buf_addr;
++ s32 cmd_status;
++ u32 vlan_status;
++};
++
++/* Bits in network_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
++ DescNoCRC=0x10000000,
++ DescPktOK=0x08000000, RxTooLong=0x00400000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct netdev_desc rx_ring[RX_RING_SIZE];
++ struct netdev_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ long in_interrupt; /* Word-long for SMP locks. */
++ int max_interrupt_work;
++ int intr_enable;
++ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
++ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
++
++ struct netdev_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* Rx filter. */
++ u32 cur_rx_mode;
++ u32 rx_filter[16];
++ int multicast_filter_limit;
++ /* FIFO and PCI burst thresholds. */
++ int tx_config, rx_config;
++ /* MII transceiver section. */
++ u16 advertising; /* NWay media advertisement */
++};
++
++static int eeprom_read(long ioaddr, int location);
++static void mdio_sync(long mdio_addr);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
++static int netdev_open(struct net_device *dev);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static int rx_ring_fill(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++int ns820_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&natsemi_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ /* Perhaps NETIF_MSG_PROBE */
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ for (i = 0; i < 3; i++)
++ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, 12 - i));
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(ChipReset, ioaddr + ChipCmd);
++ /* Power up Xcvr. */
++ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* The lower four bits are the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x33ff;
++ if (np->default_port & 0x330)
++ np->medialock = 1;
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++
++ /* Allow forcing the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ mdio_write(dev, 1, 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
++ return dev;
++}
++
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
++ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
++ Update to the code in other drivers for 8/10 bit addresses.
++*/
++
++/* Delay between EEPROM clock transitions.
++ This "delay" forces out buffered PCI writes, which is sufficient to meet
++ the timing requirements of most EEPROMs.
++*/
++#define eeprom_delay(ee_addr) readl(ee_addr)
++
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
++};
++#define EE_Write0 (EE_ChipSelect)
++#define EE_Write1 (EE_ChipSelect | EE_DataIn)
++
++/* The EEPROM commands include the 01 preamble. */
++enum EEPROM_Cmds {
++ EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7,
++};
++
++static int eeprom_read(long addr, int location)
++{
++ long eeprom_addr = addr + EECtrl;
++ int read_cmd = (EE_ReadCmd << 6) | location;
++ int retval = 0;
++ int i;
++
++ writel(EE_Write0, eeprom_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 10; i >= 0; i--) {
++ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
++ writel(dataval, eeprom_addr);
++ eeprom_delay(eeprom_addr);
++ writel(dataval | EE_ShiftClk, eeprom_addr);
++ eeprom_delay(eeprom_addr);
++ }
++ writel(EE_ChipSelect, eeprom_addr);
++ eeprom_delay(eeprom_addr);
++
++ for (i = 15; i >= 0; i--) {
++ writel(EE_ChipSelect | EE_ShiftClk, eeprom_addr);
++ eeprom_delay(eeprom_addr);
++ retval |= (readl(eeprom_addr) & EE_DataOut) ? 1 << i : 0;
++ writel(EE_ChipSelect, eeprom_addr);
++ eeprom_delay(eeprom_addr);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(EE_Write0, eeprom_addr);
++ writel(0, eeprom_addr);
++ return retval;
++}
++
++/* MII transceiver control section.
++ Read and write MII registers using software-generated serial MDIO
++ protocol. See the MII specifications or DP83840A data sheet for details.
++
++ The maximum data clock rate is 2.5 Mhz. To meet minimum timing we
++ must flush writes to the PCI bus with a PCI read. */
++#define mdio_delay(mdio_addr) readl(mdio_addr)
++
++/* Set iff a MII transceiver on any interface requires mdio preamble.
++ This only set with older tranceivers, so the extra
++ code size of a per-interface flag is not worthwhile. */
++static char mii_preamble_required = 0;
++
++enum mii_reg_bits {
++ MDIO_ShiftClk=0x0040, MDIO_Data=0x0010, MDIO_EnbOutput=0x0020,
++};
++#define MDIO_EnbIn (0)
++#define MDIO_WRITE0 (MDIO_EnbOutput)
++#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
++
++/* Generate the preamble required for initial synchronization and
++ a few older transceivers. */
++static void mdio_sync(long mdio_addr)
++{
++ int bits = 32;
++
++ /* Establish sync by sending at least 32 logic ones. */
++ while (--bits >= 0) {
++ writel(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++}
++
++static int mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ long mdio_addr = dev->base_addr + EECtrl;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int i, retval = 0;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 15; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ writel(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ writel(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Read the two transition, 16 data, and wire-idle bits. */
++ for (i = 19; i > 0; i--) {
++ writel(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ retval = (retval << 1) | ((readl(mdio_addr) & MDIO_Data) ? 1 : 0);
++ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return (retval>>1) & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
++{
++ long mdio_addr = dev->base_addr + EECtrl;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
++ int i;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ writel(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ writel(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ writel(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ /* We have not yet encountered a case where we need to reset the chip. */
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ /* Power up Xcvr. */
++ writel((~CfgXcrOff & readl(ioaddr + ChipConfig)) | 0x00400000,
++ ioaddr + ChipConfig);
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d intr_status %8.8x.\n",
++ dev->name, dev->irq, intr_status);
++
++ init_ring(dev);
++
++#if defined(ADDR_64BITS) && defined(__alpha__)
++ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtrHi);
++ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtrHi);
++#else
++ writel(0, ioaddr + RxRingPtrHi);
++ writel(0, ioaddr + TxRingPtrHi);
++#endif
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ for (i = 0; i < 6; i += 2) {
++ writel(i, ioaddr + RxFilterAddr);
++ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
++ ioaddr + RxFilterData);
++ }
++
++ /* Initialize other registers. */
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++ /* Configure for standard, in-spec Ethernet. */
++
++ if (np->full_duplex ||
++ ((readl(ioaddr + ChipConfig) & CfgFDX) == 0) ^
++ ((np->drv_flags & FDXActiveLow) != 0)) {
++ np->tx_config = 0xD0801002;
++ np->rx_config = 0x10000020;
++ } else {
++ np->tx_config = 0x10801002;
++ np->rx_config = 0x0020;
++ }
++ if (dev->mtu > 1500)
++ np->rx_config |= 0x08000000;
++ writel(np->tx_config, ioaddr + TxConfig);
++ writel(np->rx_config, ioaddr + RxConfig);
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x.\n",
++ dev->name, (int)readl(ioaddr + TxConfig));
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->in_interrupt = 0;
++
++ check_duplex(dev);
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
++ writel(np->intr_enable, ioaddr + IntrMask);
++ writel(1, ioaddr + IntrEnable);
++
++ writel(RxOn | TxOn, ioaddr + ChipCmd);
++ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
++ dev->name, (int)readl(ioaddr + ChipCmd));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int duplex;
++
++ if (np->duplex_lock)
++ return;
++ duplex = readl(ioaddr + ChipConfig) & CfgFDX ? 1 : 0;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
++ " capability.\n", dev->name,
++ duplex ? "full" : "half");
++ if (duplex) {
++ np->rx_config |= 0x10000000;
++ np->tx_config |= 0xC0000000;
++ } else {
++ np->rx_config &= ~0x10000000;
++ np->tx_config &= ~0xC0000000;
++ }
++ writel(np->tx_config, ioaddr + TxConfig);
++ writel(np->rx_config, ioaddr + RxConfig);
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x (%8.8x).\n",
++ dev->name, np->tx_config, (int)readl(ioaddr + TxConfig));
++ }
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
++ dev->name, (int)readl(ioaddr + ChipConfig));
++ if (np->rx_q_empty) {
++ /* Trigger an interrupt to refill. */
++ writel(SoftIntr, ioaddr + ChipCmd);
++ }
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
++
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %4.4x", np->tx_ring[i].cmd_status);
++ printk("\n");
++ }
++
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++/* Refill the Rx ring buffers, returning non-zero if not full. */
++static int rx_ring_fill(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned int entry;
++
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ return 1; /* Better luck next time. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[entry].buf_addr = virt_to_bus(skb->tail);
++ }
++ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
++ }
++ return 0;
++}
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
++ /* I know you _want_ to change this without understanding it. Don't. */
++ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
++ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
++ np->tx_ring[i].cmd_status = 0;
++ }
++ np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
++
++ /* Fill in the Rx buffers.
++ Allocation failure just leaves a "negative" np->dirty_rx. */
++ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
++ rx_ring_fill(dev);
++
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned int entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx.
++ No spinlock is needed for either Tx or Rx.
++ */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ np->tx_ring[entry].buf_addr = virt_to_bus(skb->data);
++ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
++ np->cur_tx++;
++
++ /* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
++
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 4) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++ /* Wake the potentially-idle transmit channel. */
++ writel(TxOn, dev->base_addr + ChipCmd);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
++ dev->name, np->cur_tx, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
++ "device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
++ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
++ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
++ dev->name);
++ dev->interrupt = 0; /* Avoid halting machine. */
++ return;
++ }
++#endif
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0 || intr_status == 0xffffffff)
++ break;
++
++ /* Acknowledge all of the current interrupt sources ASAP.
++ Nominally the read above accomplishes this, but... */
++ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
++
++ if (intr_status & (IntrRxDone | IntrRxIntr)) {
++ netdev_rx(dev);
++ np->rx_q_empty = rx_ring_fill(dev);
++ }
++
++ if (intr_status & (IntrRxIdle | IntrDrv)) {
++ unsigned int old_dirty_rx = np->dirty_rx;
++ if (rx_ring_fill(dev) == 0)
++ np->rx_q_empty = 0;
++ /* Restart Rx engine iff we did add a buffer. */
++ if (np->dirty_rx != old_dirty_rx)
++ writel(RxOn, dev->base_addr + ChipCmd);
++ }
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Tx entry %d @%p status %8.8x.\n",
++ dev->name, entry, &np->tx_ring[entry],
++ np->tx_ring[entry].cmd_status);
++ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
++ break;
++ if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, np->tx_ring[entry].cmd_status);
++ np->stats.tx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++ } else { /* Various Tx errors */
++ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
++ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
++ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
++ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
++ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ np->stats.tx_errors++;
++ }
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ /* Note the 4 slot hysteresis to mark the queue non-full. */
++ if (np->tx_full
++ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & IntrAbnormalSummary)
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ np->restore_intr_enable = 1;
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ clear_bit(0, (void*)&dev->interrupt);
++#endif
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
++
++ /* If the driver owns the next entry it's a new packet. Send it up. */
++ while (desc_status < 0) { /* e.g. & DescOwn */
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
++ entry, desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
++ if (desc_status & DescMore) {
++ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
++ "multiple buffers, entry %#x status %x.\n",
++ dev->name, np->cur_rx, desc_status);
++ np->stats.rx_length_errors++;
++ } else {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ desc_status);
++ np->stats.rx_errors++;
++ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
++ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
++ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
++ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
++ }
++ } else {
++ struct sk_buff *skb;
++ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if HAS_IP_COPYSUM
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++ }
++#ifndef final_version /* Remove after testing. */
++ /* You will want this info for the initial debug. */
++ if (np->msg_level & NETIF_MSG_PKTDATA)
++ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
++ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
++ "%d.%d.%d.%d.\n",
++ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
++ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
++ skb->data[8], skb->data[9], skb->data[10],
++ skb->data[11], skb->data[12], skb->data[13],
++ skb->data[14], skb->data[15], skb->data[16],
++ skb->data[17]);
++#endif
++ skb->protocol = eth_type_trans(skb, dev);
++ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
++ }
++
++ /* Refill is now done in the main interrupt loop. */
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (intr_status & LinkChange) {
++ int chip_config = readl(ioaddr + ChipConfig);
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
++ if (chip_config & CfgLinkGood)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if (intr_status & IntrTxUnderrun) {
++ /* Increase the Tx threshold, 32 byte units. */
++ if ((np->tx_config & 0x3f) < 62)
++ np->tx_config += 2; /* +64 bytes */
++ writel(np->tx_config, ioaddr + TxConfig);
++ }
++ if (intr_status & WOLPkt) {
++ int wol_status = readl(ioaddr + WOLCmd);
++ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
++ dev->name, wol_status);
++ }
++ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
++ if (np->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: Rx overflow! ns820 %8.8x.\n",
++ dev->name, intr_status);
++ np->stats.rx_fifo_errors++;
++ }
++ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
++ RxStatusOverrun|0xA7ff)) {
++ if (np->msg_level & NETIF_MSG_DRV)
++ printk(KERN_ERR "%s: Something Wicked happened! ns820 %8.8x.\n",
++ dev->name, intr_status);
++ }
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrPCIErr) {
++ np->stats.tx_fifo_errors++;
++ np->stats.rx_fifo_errors++;
++ }
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int crc_errs = readl(ioaddr + RxCRCErrs);
++
++ if (crc_errs != 0xffffffff) {
++ /* We need not lock this segment of code for SMP.
++ There is no atomic-add vulnerability for most CPUs,
++ and statistics are non-critical. */
++ /* The chip only need report frame silently dropped. */
++ np->stats.rx_crc_errors += crc_errs;
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
++ }
++
++ return &np->stats;
++}
++
++/* The little-endian AUTODIN II ethernet CRC calculations.
++ A big-endian version is also available.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and should be moved to net/core/crc.c.
++ Chips may use the upper or lower CRC bits, and may reverse and/or invert
++ them. Select the endian-ness that results in minimal calculations.
++*/
++static unsigned const ethernet_polynomial_le = 0xedb88320U;
++static inline unsigned ether_crc_le(int length, unsigned char *data)
++{
++ unsigned int crc = 0xffffffff; /* Initial value. */
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 8; --bit >= 0; current_octet >>= 1) {
++ if ((crc ^ current_octet) & 1) {
++ crc >>= 1;
++ crc ^= ethernet_polynomial_le;
++ } else
++ crc >>= 1;
++ }
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u8 mc_filter[64]; /* Multicast hash filter */
++ u32 rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
++ | AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x7ff,
++ mc_filter);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ for (i = 0; i < 64; i += 2) {
++ writel(rx_mode + 0x200 + i, ioaddr + RxFilterAddr);
++ writel((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
++ }
++ }
++ writel(rx_mode, ioaddr + RxFilterAddr);
++ np->cur_rx_mode = rx_mode;
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = 1;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == 1) {
++ u16 miireg = data[1] & 0x1f;
++ u16 value = data[2];
++ switch (miireg) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->duplex_lock = (value & 0x9000) ? 0 : 1;
++ if (np->duplex_lock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
++ "Int %2.2x.\n",
++ dev->name, (int)readl(ioaddr + ChipCmd),
++ (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* We don't want the timer to re-start anything. */
++ del_timer(&np->timer);
++
++ /* Disable interrupts using the mask. */
++ writel(0, ioaddr + IntrMask);
++ writel(0, ioaddr + IntrEnable);
++ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(RxOff | TxOff, ioaddr + ChipCmd);
++
++ get_stats(dev);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" #%d desc. %8.8x %8.8x.\n",
++ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
++ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].cmd_status = 0;
++ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ /* Power down Xcvr. */
++ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int power_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, freeze stats, stop Tx and Rx. */
++ writel(0, ioaddr + IntrEnable);
++ writel(2, ioaddr + StatsCtrl);
++ writel(RxOff | TxOff, ioaddr + ChipCmd);
++ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the open() actions should be repeated. */
++ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
++ set_rx_mode(dev);
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ writel(1, ioaddr + IntrEnable);
++ writel(RxOn | TxOn, ioaddr + ChipCmd);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++#ifdef CARDBUS
++ register_driver(&etherdev_ops);
++ return 0;
++#else
++ return pci_drv_register(&natsemi_drv_id, NULL);
++#endif
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++#ifdef CARDBUS
++ unregister_driver(&etherdev_ops);
++#else
++ pci_drv_unregister(&natsemi_drv_id);
++#endif
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++ iounmap((char *)root_net_dev->base_addr);
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` ns820.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ns820.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c ns820.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/pci-scan.c
+===================================================================
+RCS file: linux/src/drivers/net/pci-scan.c
+diff -N linux/src/drivers/net/pci-scan.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/pci-scan.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,659 @@
++/* pci-scan.c: Linux PCI network adapter support code. */
++/*
++ Originally written 1999-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms
++ of the GNU General Public License (GPL), incorporated herein by
++ reference. Drivers interacting with these functions are derivative
++ works and thus also must be licensed under the GPL and include an explicit
++ GPL notice.
++
++ This code provides common scan and activate functions for PCI network
++ interfaces.
++
++ The author may be reached as becker@scyld.com, or
++ Donald Becker
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Other contributers:
++*/
++static const char version[] =
++"pci-scan.c:v1.12 7/30/2003 Donald Becker <becker@scyld.com>"
++" http://www.scyld.com/linux/drivers.html\n";
++
++/* A few user-configurable values that may be modified when a module. */
++
++static int msg_level = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
++static int min_pci_latency = 32;
++
++#if ! defined(__KERNEL__)
++#define __KERNEL__ 1
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with the proper options, including "-O".
++#endif
++
++#if defined(MODULE) && ! defined(EXPORT_SYMTAB)
++#define EXPORT_SYMTAB
++#endif
++
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#if LINUX_VERSION_CODE < 0x20500 && defined(MODVERSIONS)
++/* Another interface semantics screw-up. */
++#include <linux/module.h>
++#include <linux/modversions.h>
++#else
++#include <linux/module.h>
++#endif
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/pci.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20300
++/* Bogus change in the middle of a "stable" kernel series.
++ Also, in 2.4.7+ slab must come before interrupt.h to avoid breakage. */
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <asm/io.h>
++#include "pci-scan.h"
++#include "kern_compat.h"
++#if defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
++#include <linux/apm_bios.h>
++#endif
++#ifdef CONFIG_PM
++/* New in 2.4 kernels, pointlessly incompatible with earlier APM. */
++#include <linux/pm.h>
++#endif
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++#if (LINUX_VERSION_CODE < 0x20100)
++#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
++#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */
++#define PCI_CAP_ID_PM 0x01 /* Power Management */
++#endif
++
++int (*register_hotswap_hook)(struct drv_id_info *did);
++void (*unregister_hotswap_hook)(struct drv_id_info *did);
++
++#if LINUX_VERSION_CODE > 0x20118 && defined(MODULE)
++MODULE_LICENSE("GPL");
++MODULE_PARM(msg_level, "i");
++MODULE_PARM(min_pci_latency, "i");
++MODULE_PARM_DESC(msg_level, "Enable additional status messages (0-7)");
++MODULE_PARM_DESC(min_pci_latency,
++ "Minimum value for the PCI Latency Timer settings");
++#if defined(EXPORT_SYMTAB)
++EXPORT_SYMBOL_NOVERS(pci_drv_register);
++EXPORT_SYMBOL_NOVERS(pci_drv_unregister);
++EXPORT_SYMBOL_NOVERS(acpi_wake);
++EXPORT_SYMBOL_NOVERS(acpi_set_pwr_state);
++EXPORT_SYMBOL_NOVERS(register_hotswap_hook);
++EXPORT_SYMBOL_NOVERS(unregister_hotswap_hook);
++#endif
++#endif
++
++/* List of registered drivers. */
++static struct drv_id_info *drv_list;
++/* List of detected PCI devices, for APM events. */
++static struct dev_info {
++ struct dev_info *next;
++ void *dev;
++ struct drv_id_info *drv_id;
++ int flags;
++} *dev_list;
++
++/*
++ This code is not intended to support every configuration.
++ It is intended to minimize duplicated code by providing the functions
++ needed in almost every PCI driver.
++
++ The "no kitchen sink" policy:
++ Additional features and code will be added to this module only if more
++ than half of the drivers for common hardware would benefit from the feature.
++*/
++
++/*
++ Ideally we would detect and number all cards of a type (e.g. network) in
++ PCI slot order.
++ But that does not work with hot-swap card, CardBus cards and added drivers.
++ So instead we detect just the each chip table in slot order.
++
++ This routine takes a PCI ID table, scans the PCI bus, and calls the
++ associated attach/probe1 routine with the hardware already activated and
++ single I/O or memory address already mapped.
++
++ This routine will later be supplemented with CardBus and hot-swap PCI
++ support using the same table. Thus the pci_chip_tbl[] should not be
++ marked as __initdata.
++*/
++
++#if LINUX_VERSION_CODE >= 0x20200
++/* Grrrr.. complex abstaction layers with negative benefit. */
++int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
++{
++ int chip_idx, cards_found = 0;
++ struct pci_dev *pdev = NULL;
++ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
++ struct drv_id_info *drv;
++ void *newdev;
++
++
++ /* Ignore a double-register attempt. */
++ for (drv = drv_list; drv; drv = drv->next)
++ if (drv == drv_id)
++ return -EBUSY;
++
++ while ((pdev = pci_find_class(drv_id->pci_class, pdev)) != 0) {
++ u32 pci_id, pci_subsys_id, pci_class_rev;
++ u16 pci_command, new_command;
++ int pci_flags;
++ long pciaddr; /* Bus address. */
++ long ioaddr; /* Mapped address for this processor. */
++
++ pci_read_config_dword(pdev, PCI_VENDOR_ID, &pci_id);
++ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
++ pci_read_config_dword(pdev, 0x2c, &pci_subsys_id);
++ pci_read_config_dword(pdev, PCI_REVISION_ID, &pci_class_rev);
++
++ if (msg_level > 3)
++ printk(KERN_DEBUG "PCI ID %8.8x subsystem ID is %8.8x.\n",
++ pci_id, pci_subsys_id);
++ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
++ struct pci_id_info *chip = &pci_tbl[chip_idx];
++ if ((pci_id & chip->id.pci_mask) == chip->id.pci
++ && (pci_subsys_id&chip->id.subsystem_mask) == chip->id.subsystem
++ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
++ break;
++ }
++ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
++ continue;
++
++ pci_flags = pci_tbl[chip_idx].pci_flags;
++#if LINUX_VERSION_CODE >= 0x2030C
++ /* Wow. A oversized, hard-to-use abstraction. Bogus. */
++ pciaddr = pdev->resource[(pci_flags >> 4) & 7].start;
++#else
++ pciaddr = pdev->base_address[(pci_flags >> 4) & 7];
++#if defined(__alpha__) /* Really any machine with 64 bit addressing. */
++ if (pci_flags & PCI_ADDR_64BITS)
++ pciaddr |= ((long)pdev->base_address[((pci_flags>>4)&7)+ 1]) << 32;
++#endif
++#endif
++ if (msg_level > 2)
++ printk(KERN_INFO "Found %s at PCI address %#lx, mapped IRQ %d.\n",
++ pci_tbl[chip_idx].name, pciaddr, pdev->irq);
++
++ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
++ (pdev->irq == 0 || pdev->irq == 255)) {
++ if (pdev->bus->number == 32) /* Broken CardBus activation. */
++ printk(KERN_WARNING "Resources for CardBus device '%s' have"
++ " not been allocated.\n"
++ KERN_WARNING "Activation has been delayed.\n",
++ pci_tbl[chip_idx].name);
++ else
++ printk(KERN_WARNING "PCI device '%s' was not assigned an "
++ "IRQ.\n"
++ KERN_WARNING "It will not be activated.\n",
++ pci_tbl[chip_idx].name);
++ continue;
++ }
++ if ((pci_flags & PCI_BASE_ADDRESS_SPACE_IO)) {
++ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
++ continue;
++ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_tbl[chip_idx].io_size)) == 0) {
++ printk(KERN_INFO "Failed to map PCI address %#lx for device "
++ "'%s'.\n", pciaddr, pci_tbl[chip_idx].name);
++ continue;
++ }
++ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
++ acpi_wake(pdev);
++ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
++ new_command = pci_command | (pci_flags & 7);
++ if (pci_command != new_command) {
++ printk(KERN_INFO " The PCI BIOS has not enabled the"
++ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
++ pdev->bus->number, pdev->devfn, pci_command, new_command);
++ pci_write_config_word(pdev, PCI_COMMAND, new_command);
++ }
++
++ newdev = drv_id->probe1(pdev, initial_device,
++ ioaddr, pdev->irq, chip_idx, cards_found);
++ if (newdev == NULL)
++ continue;
++ initial_device = 0;
++ cards_found++;
++ if (pci_flags & PCI_COMMAND_MASTER) {
++ pci_set_master(pdev);
++ if ( ! (pci_flags & PCI_NO_MIN_LATENCY)) {
++ u8 pci_latency;
++ pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
++ if (pci_latency < min_pci_latency) {
++ printk(KERN_INFO " PCI latency timer (CFLT) is "
++ "unreasonably low at %d. Setting to %d clocks.\n",
++ pci_latency, min_pci_latency);
++ pci_write_config_byte(pdev, PCI_LATENCY_TIMER,
++ min_pci_latency);
++ }
++ }
++ }
++ {
++ struct dev_info *devp =
++ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
++ if (devp == 0)
++ continue;
++ devp->next = dev_list;
++ devp->dev = newdev;
++ devp->drv_id = drv_id;
++ dev_list = devp;
++ }
++ }
++
++ if (((drv_id->flags & PCI_HOTSWAP)
++ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
++ || cards_found) {
++ MOD_INC_USE_COUNT;
++ drv_id->next = drv_list;
++ drv_list = drv_id;
++ return 0;
++ } else
++ return -ENODEV;
++}
++#else
++int pci_drv_register(struct drv_id_info *drv_id, void *initial_device)
++{
++ int pci_index, cards_found = 0;
++ unsigned char pci_bus, pci_device_fn;
++ struct pci_dev *pdev;
++ struct pci_id_info *pci_tbl = drv_id->pci_dev_tbl;
++ void *newdev;
++
++ if ( ! pcibios_present())
++ return -ENODEV;
++
++ for (pci_index = 0; pci_index < 0xff; pci_index++) {
++ u32 pci_id, subsys_id, pci_class_rev;
++ u16 pci_command, new_command;
++ int chip_idx, irq, pci_flags;
++ long pciaddr;
++ long ioaddr;
++ u32 pci_busaddr;
++ u8 pci_irq_line;
++
++ if (pcibios_find_class (drv_id->pci_class, pci_index,
++ &pci_bus, &pci_device_fn)
++ != PCIBIOS_SUCCESSFUL)
++ break;
++ pcibios_read_config_dword(pci_bus, pci_device_fn,
++ PCI_VENDOR_ID, &pci_id);
++ /* Offset 0x2c is PCI_SUBSYSTEM_ID aka PCI_SUBSYSTEM_VENDOR_ID. */
++ pcibios_read_config_dword(pci_bus, pci_device_fn, 0x2c, &subsys_id);
++ pcibios_read_config_dword(pci_bus, pci_device_fn,
++ PCI_REVISION_ID, &pci_class_rev);
++
++ for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
++ struct pci_id_info *chip = &pci_tbl[chip_idx];
++ if ((pci_id & chip->id.pci_mask) == chip->id.pci
++ && (subsys_id & chip->id.subsystem_mask) == chip->id.subsystem
++ && (pci_class_rev&chip->id.revision_mask) == chip->id.revision)
++ break;
++ }
++ if (pci_tbl[chip_idx].name == 0) /* Compiled out! */
++ continue;
++
++ pci_flags = pci_tbl[chip_idx].pci_flags;
++ pdev = pci_find_slot(pci_bus, pci_device_fn);
++ pcibios_read_config_byte(pci_bus, pci_device_fn,
++ PCI_INTERRUPT_LINE, &pci_irq_line);
++ irq = pci_irq_line;
++ pcibios_read_config_dword(pci_bus, pci_device_fn,
++ ((pci_flags >> 2) & 0x1C) + 0x10,
++ &pci_busaddr);
++ pciaddr = pci_busaddr;
++#if defined(__alpha__)
++ if (pci_flags & PCI_ADDR_64BITS) {
++ pcibios_read_config_dword(pci_bus, pci_device_fn,
++ ((pci_flags >> 2) & 0x1C) + 0x14,
++ &pci_busaddr);
++ pciaddr |= ((long)pci_busaddr)<<32;
++ }
++#endif
++
++ if (msg_level > 2)
++ printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
++ pci_tbl[chip_idx].name, pciaddr, irq);
++
++ if ( ! (pci_flags & PCI_UNUSED_IRQ) &&
++ (irq == 0 || irq == 255)) {
++ if (pci_bus == 32) /* Broken CardBus activation. */
++ printk(KERN_WARNING "Resources for CardBus device '%s' have"
++ " not been allocated.\n"
++ KERN_WARNING "It will not be activated.\n",
++ pci_tbl[chip_idx].name);
++ else
++ printk(KERN_WARNING "PCI device '%s' was not assigned an "
++ "IRQ.\n"
++ KERN_WARNING "It will not be activated.\n",
++ pci_tbl[chip_idx].name);
++ continue;
++ }
++
++ if ((pciaddr & PCI_BASE_ADDRESS_SPACE_IO)) {
++ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++ if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
++ continue;
++ } else if ((ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_tbl[chip_idx].io_size)) == 0) {
++ printk(KERN_INFO "Failed to map PCI address %#lx.\n",
++ pciaddr);
++ continue;
++ }
++
++ if ( ! (pci_flags & PCI_NO_ACPI_WAKE))
++ acpi_wake(pdev);
++ pcibios_read_config_word(pci_bus, pci_device_fn,
++ PCI_COMMAND, &pci_command);
++ new_command = pci_command | (pci_flags & 7);
++ if (pci_command != new_command) {
++ printk(KERN_INFO " The PCI BIOS has not enabled the"
++ " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
++ pci_bus, pci_device_fn, pci_command, new_command);
++ pcibios_write_config_word(pci_bus, pci_device_fn,
++ PCI_COMMAND, new_command);
++ }
++
++ newdev = drv_id->probe1(pdev, initial_device,
++ ioaddr, irq, chip_idx, cards_found);
++
++ if (newdev && (pci_flags & PCI_COMMAND_MASTER) &&
++ ! (pci_flags & PCI_NO_MIN_LATENCY)) {
++ u8 pci_latency;
++ pcibios_read_config_byte(pci_bus, pci_device_fn,
++ PCI_LATENCY_TIMER, &pci_latency);
++ if (pci_latency < min_pci_latency) {
++ printk(KERN_INFO " PCI latency timer (CFLT) is "
++ "unreasonably low at %d. Setting to %d clocks.\n",
++ pci_latency, min_pci_latency);
++ pcibios_write_config_byte(pci_bus, pci_device_fn,
++ PCI_LATENCY_TIMER, min_pci_latency);
++ }
++ }
++ if (newdev) {
++ struct dev_info *devp =
++ kmalloc(sizeof(struct dev_info), GFP_KERNEL);
++ if (devp) {
++ devp->next = dev_list;
++ devp->dev = newdev;
++ devp->drv_id = drv_id;
++ dev_list = devp;
++ }
++ }
++ initial_device = 0;
++ cards_found++;
++ }
++
++ if (((drv_id->flags & PCI_HOTSWAP)
++ && register_hotswap_hook && (*register_hotswap_hook)(drv_id) == 0)
++ || cards_found) {
++ MOD_INC_USE_COUNT;
++ drv_id->next = drv_list;
++ drv_list = drv_id;
++ return 0;
++ } else
++ return cards_found ? 0 : -ENODEV;
++}
++#endif
++
++void pci_drv_unregister(struct drv_id_info *drv_id)
++{
++ struct drv_id_info **drvp;
++ struct dev_info **devip = &dev_list;
++
++ if (unregister_hotswap_hook)
++ (*unregister_hotswap_hook)(drv_id);
++
++ for (drvp = &drv_list; *drvp; drvp = &(*drvp)->next)
++ if (*drvp == drv_id) {
++ *drvp = (*drvp)->next;
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ while (*devip) {
++ struct dev_info *thisdevi = *devip;
++ if (thisdevi->drv_id == drv_id) {
++ *devip = thisdevi->next;
++ kfree(thisdevi);
++ } else
++ devip = &(*devip)->next;
++ }
++
++ return;
++}
++
++#if LINUX_VERSION_CODE < 0x20400
++/*
++ Search PCI configuration space for the specified capability registers.
++ Return the index, or 0 on failure.
++ The 2.4 kernel now includes this function.
++*/
++int pci_find_capability(struct pci_dev *pdev, int findtype)
++{
++ u16 pci_status, cap_type;
++ u8 pci_cap_idx;
++ int cap_idx;
++
++ pci_read_config_word(pdev, PCI_STATUS, &pci_status);
++ if ( ! (pci_status & PCI_STATUS_CAP_LIST))
++ return 0;
++ pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pci_cap_idx);
++ cap_idx = pci_cap_idx;
++ for (cap_idx = pci_cap_idx; cap_idx; cap_idx = (cap_type >> 8) & 0xff) {
++ pci_read_config_word(pdev, cap_idx, &cap_type);
++ if ((cap_type & 0xff) == findtype)
++ return cap_idx;
++ }
++ return 0;
++}
++#endif
++
++/* Change a device from D3 (sleep) to D0 (active).
++ Return the old power state.
++ This is more complicated than you might first expect since most cards
++ forget all PCI config info during the transition! */
++int acpi_wake(struct pci_dev *pdev)
++{
++ u32 base[5], romaddr;
++ u16 pci_command, pwr_command;
++ u8 pci_latency, pci_cacheline, irq;
++ int i, pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
++
++ if (pwr_cmd_idx == 0)
++ return 0;
++ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
++ if ((pwr_command & 3) == 0)
++ return 0;
++ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
++ for (i = 0; i < 5; i++)
++ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
++ &base[i]);
++ pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &romaddr);
++ pci_read_config_byte( pdev, PCI_LATENCY_TIMER, &pci_latency);
++ pci_read_config_byte( pdev, PCI_CACHE_LINE_SIZE, &pci_cacheline);
++ pci_read_config_byte( pdev, PCI_INTERRUPT_LINE, &irq);
++
++ pci_write_config_word(pdev, pwr_cmd_idx + 4, 0x0000);
++ for (i = 0; i < 5; i++)
++ if (base[i])
++ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0 + i*4,
++ base[i]);
++ pci_write_config_dword(pdev, PCI_ROM_ADDRESS, romaddr);
++ pci_write_config_byte( pdev, PCI_INTERRUPT_LINE, irq);
++ pci_write_config_byte( pdev, PCI_CACHE_LINE_SIZE, pci_cacheline);
++ pci_write_config_byte( pdev, PCI_LATENCY_TIMER, pci_latency);
++ pci_write_config_word( pdev, PCI_COMMAND, pci_command | 5);
++ return pwr_command & 3;
++}
++
++int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state new_state)
++{
++ u16 pwr_command;
++ int pwr_cmd_idx = pci_find_capability(pdev, PCI_CAP_ID_PM);
++
++ if (pwr_cmd_idx == 0)
++ return 0;
++ pci_read_config_word(pdev, pwr_cmd_idx + 4, &pwr_command);
++ if ((pwr_command & 3) == ACPI_D3 && new_state != ACPI_D3)
++ acpi_wake(pdev); /* The complicated sequence. */
++ pci_write_config_word(pdev, pwr_cmd_idx + 4,
++ (pwr_command & ~3) | new_state);
++ return pwr_command & 3;
++}
++
++#if defined(CONFIG_PM)
++static int handle_pm_event(struct pm_dev *dev, int event, void *data)
++{
++ static int down = 0;
++ struct dev_info *devi;
++ int pwr_cmd = -1;
++
++ if (msg_level > 1)
++ printk(KERN_DEBUG "pci-scan: Handling power event %d for driver "
++ "list %s...\n",
++ event, drv_list->name);
++ switch (event) {
++ case PM_SUSPEND:
++ if (down) {
++ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
++ break;
++ }
++ down = 1;
++ for (devi = dev_list; devi; devi = devi->next)
++ if (devi->drv_id->pwr_event)
++ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
++ break;
++ case PM_RESUME:
++ if (!down) {
++ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
++ break;
++ }
++ for (devi = dev_list; devi; devi = devi->next) {
++ if (devi->drv_id->pwr_event) {
++ if (msg_level > 3)
++ printk(KERN_DEBUG "pci-scan: Calling resume for %s "
++ "device.\n", devi->drv_id->name);
++ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
++ }
++ }
++ down = 0;
++ break;
++ case PM_SET_WAKEUP: pwr_cmd = DRV_PWR_WakeOn; break;
++ case PM_EJECT: pwr_cmd = DRV_DETACH; break;
++ default:
++ printk(KERN_DEBUG "pci-scan: Unknown power management event %d.\n",
++ event);
++ }
++ if (pwr_cmd >= 0)
++ for (devi = dev_list; devi; devi = devi->next)
++ if (devi->drv_id->pwr_event)
++ devi->drv_id->pwr_event(devi->dev, pwr_cmd);
++
++ return 0;
++}
++
++#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
++static int handle_apm_event(apm_event_t event)
++{
++ static int down = 0;
++ struct dev_info *devi;
++
++ if (msg_level > 1)
++ printk(KERN_DEBUG "pci-scan: Handling APM event %d for driver "
++ "list %s...\n",
++ event, drv_list->name);
++ return 0;
++ switch (event) {
++ case APM_SYS_SUSPEND:
++ case APM_USER_SUSPEND:
++ if (down) {
++ printk(KERN_DEBUG "pci-scan: Received extra suspend event\n");
++ break;
++ }
++ down = 1;
++ for (devi = dev_list; devi; devi = devi->next)
++ if (devi->drv_id->pwr_event)
++ devi->drv_id->pwr_event(devi->dev, DRV_SUSPEND);
++ break;
++ case APM_NORMAL_RESUME:
++ case APM_CRITICAL_RESUME:
++ if (!down) {
++ printk(KERN_DEBUG "pci-scan: Received bogus resume event\n");
++ break;
++ }
++ for (devi = dev_list; devi; devi = devi->next)
++ if (devi->drv_id->pwr_event)
++ devi->drv_id->pwr_event(devi->dev, DRV_RESUME);
++ down = 0;
++ break;
++ }
++ return 0;
++}
++#endif /* CONFIG_APM */
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (msg_level) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s", version);
++
++#if defined(CONFIG_PM)
++ pm_register(PM_PCI_DEV, 0, &handle_pm_event);
++#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
++ apm_register_callback(&handle_apm_event);
++#endif
++ return 0;
++}
++void cleanup_module(void)
++{
++#if defined(CONFIG_PM)
++ pm_unregister_all(&handle_pm_event);
++#elif defined(CONFIG_APM) && LINUX_VERSION_CODE < 0x20400
++ apm_unregister_callback(&handle_apm_event);
++#endif
++ if (dev_list != NULL)
++ printk(KERN_WARNING "pci-scan: Unfreed device references.\n");
++ return;
++}
++#endif
++
++
++/*
++ * Local variables:
++ * compile-command: "gcc -DMODULE -D__KERNEL__ -DEXPORT_SYMTAB -Wall -Wstrict-prototypes -O6 -c pci-scan.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/pci-scan.h
+===================================================================
+RCS file: linux/src/drivers/net/pci-scan.h
+diff -N linux/src/drivers/net/pci-scan.h
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/pci-scan.h 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,90 @@
++#ifndef _PCI_SCAN_H
++#define _PCI_SCAN_H
++/*
++ version 1.02 $Version:$ $Date: 2001/03/18 21:35:59 $
++ Copyright 1999-2001 Donald Becker / Scyld Computing Corporation
++ This software is part of the Linux kernel. It may be used and
++ distributed according to the terms of the GNU Public License,
++ incorporated herein by reference.
++*/
++
++/*
++ These are the structures in the table that drives the PCI probe routines.
++ Note the matching code uses a bitmask: more specific table entries should
++ be placed before "catch-all" entries.
++
++ The table must be zero terminated.
++*/
++enum pci_id_flags_bits {
++ /* Set PCI command register bits before calling probe1(). */
++ PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
++ /* Read and map the single following PCI BAR. */
++ PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
++ PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
++ PCI_UNUSED_IRQ=0x800,
++};
++
++struct pci_id_info {
++ const char *name;
++ struct match_info {
++ int pci, pci_mask, subsystem, subsystem_mask;
++ int revision, revision_mask; /* Only 8 bits. */
++ } id;
++ enum pci_id_flags_bits pci_flags;
++ int io_size; /* Needed for I/O region check or ioremap(). */
++ int drv_flags; /* Driver use, intended as capability flags. */
++};
++
++enum drv_id_flags {
++ PCI_HOTSWAP=1, /* Leave module loaded for Cardbus-like chips. */
++};
++enum drv_pwr_action {
++ DRV_NOOP, /* No action. */
++ DRV_ATTACH, /* The driver may expect power ops. */
++ DRV_SUSPEND, /* Machine suspending, next event RESUME or DETACH. */
++ DRV_RESUME, /* Resume from previous SUSPEND */
++ DRV_DETACH, /* Card will-be/is gone. Valid from SUSPEND! */
++ DRV_PWR_WakeOn, /* Put device in e.g. Wake-On-LAN mode. */
++ DRV_PWR_DOWN, /* Go to lowest power mode. */
++ DRV_PWR_UP, /* Go to normal power mode. */
++};
++
++struct drv_id_info {
++ const char *name; /* Single-word driver name. */
++ int flags;
++ int pci_class; /* Typically PCI_CLASS_NETWORK_ETHERNET<<8. */
++ struct pci_id_info *pci_dev_tbl;
++ void *(*probe1)(struct pci_dev *pdev, void *dev_ptr,
++ long ioaddr, int irq, int table_idx, int fnd_cnt);
++ /* Optional, called for suspend, resume and detach. */
++ int (*pwr_event)(void *dev, int event);
++ /* Internal values. */
++ struct drv_id_info *next;
++ void *cb_ops;
++};
++
++/* PCI scan and activate.
++ Scan PCI-like hardware, calling probe1(..,dev,..) on devices that match.
++ Returns -ENODEV, a negative number, if no cards are found. */
++
++extern int pci_drv_register(struct drv_id_info *drv_id, void *initial_device);
++extern void pci_drv_unregister(struct drv_id_info *drv_id);
++
++
++/* ACPI routines.
++ Wake (change to ACPI D0 state) or set the ACPI power level of a sleeping
++ ACPI device. Returns the old power state. */
++
++int acpi_wake(struct pci_dev *pdev);
++enum acpi_pwr_state {ACPI_D0, ACPI_D1, ACPI_D2, ACPI_D3};
++int acpi_set_pwr_state(struct pci_dev *pdev, enum acpi_pwr_state state);
++
++
++/*
++ * Local variables:
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
++#endif
+Index: linux/src/drivers/net/pci-serial.c
+===================================================================
+RCS file: linux/src/drivers/net/pci-serial.c
+diff -N linux/src/drivers/net/pci-serial.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/pci-serial.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,258 @@
++/* pci-serial.c: A PCI serial port (e.g. modem) activator for Linux. */
++/*
++ This driver is an activator for PCI serial devices.
++
++ Written/copyright 1999-2002 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/updates.html
++*/
++
++static const char *version =
++"pci-serial.c:v1.03 7/30/2002 Donald Becker http://www.scyld.com/index.html\n";
++
++/* A few user-configurable values. */
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 1;
++
++/* Operational parameters that usually are not changed. */
++
++#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++#include <linux/config.h>
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/pci.h>
++#if LINUX_VERSION_CODE < 0x20155
++#include <linux/bios32.h>
++#define PCI_SUPPORT 1
++#else
++#define PCI_SUPPORT 2
++#endif
++#include <linux/major.h>
++#include <linux/serial.h>
++
++#include <asm/io.h>
++#include "kern_compat.h"
++
++#if ! defined (LINUX_VERSION_CODE) || LINUX_VERSION_CODE < 0x20000
++#warning This driver version is only for kernel versions 2.0.0 and later.
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
++MODULE_DESCRIPTION("PCI hot-swap serial port activator");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++
++#if LINUX_VERSION_CODE < 0x20123
++#define test_and_set_bit(val, addr) set_bit(val, addr)
++#endif
++#if LINUX_VERSION_CODE < 0x20155
++#define PCI_SUPPORT_VER1
++#define pci_present pcibios_present
++#endif
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This device driver is designed for PCI serial ports.
++
++
++II. Board-specific settings
++
++N/A
++
++III. Operation
++
++IVb. References
++
++IVc. Errata
++
++*/
++
++/* The rest of these values should never change. */
++
++static struct cb_serial_info {
++ struct cb_serial_info *next;
++ long ioaddr;
++ int major, minor;
++ char dev_name[8];
++ u32 subsystem_id;
++ u8 pci_bus, pci_devfn, irq;
++} *cb_serial_list;
++
++int serial_attach(int bus, int devfn)
++{
++ struct serial_struct serial;
++ int line;
++ u16 device_id, vendor_id, pci_cmd;
++ u32 addr0, subsystem_id, pwr_cmd;
++ u8 irq;
++ long ioaddr;
++
++ if (debug) {
++ printk(KERN_INFO "serial_attach(bus %d, function %d).\n",
++ bus, devfn);
++ }
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &addr0);
++ if ( ! (addr0 & 1))
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &addr0);
++ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
++ pcibios_read_config_word(bus, devfn, PCI_VENDOR_ID, &vendor_id);
++ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &device_id);
++ pcibios_read_config_dword(bus, devfn, PCI_SUBSYSTEM_ID, &subsystem_id);
++ pcibios_read_config_dword(bus, devfn, 0x44, &pwr_cmd);
++ pcibios_write_config_dword(bus, devfn, 0x44, pwr_cmd & ~3);
++ pcibios_read_config_word(bus, devfn, PCI_COMMAND, &pci_cmd);
++ ioaddr = addr0 & ~3;
++ if (ioaddr == 0 || irq == 0) {
++ printk(KERN_ERR "A CardBus serial port was not assigned an %s.\n",
++ ioaddr == 0 ? "I/O address" : "IRQ");
++ return 0;
++ }
++ if (debug > 1) {
++ printk(KERN_INFO " PCI command register was %4.4x.\n", pci_cmd);
++ printk(KERN_INFO "serial_attach(bus %d, function %d), device %4.4x "
++ "IRQ %d IO %lx subsystem ID %8.8x.\n", bus, devfn, device_id,
++ irq, ioaddr, subsystem_id);
++ }
++ /* Insert vendor-specific magic here. */
++ serial.port = ioaddr;
++ serial.irq = irq;
++ serial.flags = ASYNC_SHARE_IRQ;
++ line = register_serial(&serial);
++
++ if (debug > 2) {
++ int i;
++ printk(KERN_DEBUG "pci-serial: Register dump at %#lx:", ioaddr);
++ for (i = 0; i < 8; i++)
++ printk(" %2.2x", inb(ioaddr + i));
++ printk(".\n");
++ }
++
++ if (line < 0) {
++ printk(KERN_NOTICE "serial_cb: register_serial() at 0x%04x, "
++ "irq %d failed, status %d\n", serial.port, serial.irq, line);
++ } else {
++ struct cb_serial_info *info =
++ kmalloc(sizeof(struct cb_serial_info), GFP_KERNEL);
++ memset(info, 0, sizeof(struct cb_serial_info));
++ sprintf(info->dev_name, "ttyS%d", line);
++ info->major = TTY_MAJOR;
++ info->minor = 0x40 + line;
++ info->pci_bus = bus;
++ info->pci_devfn = devfn;
++ info->ioaddr = ioaddr;
++ info->subsystem_id = subsystem_id;
++ info->next = cb_serial_list;
++ cb_serial_list = info;
++ MOD_INC_USE_COUNT;
++ return 1;
++ }
++ return 0;
++}
++
++static void serial_detach(void)
++{
++ struct cb_serial_info *info, **infop;
++ if (debug)
++ printk(KERN_INFO "serial_detach()\n");
++ for (infop = &cb_serial_list; *infop; *infop = (*infop)->next)
++ if (1)
++ break;
++ info = *infop;
++ if (info == NULL)
++ return;
++#if 0
++ unregister_serial(node->minor - 0x40);
++#endif
++ *infop = info->next;
++ kfree(info);
++ MOD_DEC_USE_COUNT;
++ if (debug)
++ printk(KERN_INFO "serial_detach() done.\n");
++}
++
++
++#ifdef MODULE
++
++int init_module(void)
++{
++ int cards_found = 0;
++ int pci_index;
++ unsigned char pci_bus, pci_device_fn;
++
++ printk(KERN_INFO "%s", version);
++
++ if ( ! pcibios_present())
++ return -ENODEV;
++
++ for (pci_index = 0; pci_index < 0xff; pci_index++) {
++ if (pcibios_find_class (PCI_CLASS_COMMUNICATION_OTHER << 8, pci_index,
++ &pci_bus, &pci_device_fn)
++ != PCIBIOS_SUCCESSFUL)
++ break;
++ cards_found++;
++ serial_attach(pci_bus, pci_device_fn);
++ }
++ for (pci_index = 0; pci_index < 0xff; pci_index++) {
++ if (pcibios_find_class((PCI_CLASS_COMMUNICATION_SERIAL <<8) | 0x02,
++ pci_index, &pci_bus, &pci_device_fn)
++ != PCIBIOS_SUCCESSFUL)
++ break;
++ cards_found++;
++ serial_attach(pci_bus, pci_device_fn);
++ }
++ return cards_found ? 0 : -ENODEV;
++}
++
++void cleanup_module(void)
++{
++ return;
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make pci-serial.o"
++ * alt-compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c pci-serial.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/pci-skeleton.c
+===================================================================
+RCS file: linux/src/drivers/net/pci-skeleton.c
+diff -N linux/src/drivers/net/pci-skeleton.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/pci-skeleton.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,1694 @@
++/* pci-skeleton.c: A Linux PCI network adapter skeleton device driver. */
++/*
++ Written 1998-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/pci-skeleton.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"pci-skeleton.c:v2.13a 6/3/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/drivers.html\n";
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ Typical is a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability, however setting full_duplex[] is deprecated.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority, confuses the system network buffer limits,
++ and wastes memory.
++ Larger receive rings merely waste memory.
++*/
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
++#define RX_RING_SIZE 32
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung.
++ Re-autonegotiation may take up to 3 seconds.
++ */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++/* Set iff a MII transceiver on any interface requires mdio preamble.
++ This only set with older tranceivers, so the extra
++ code size of a per-interface flag is not worthwhile. */
++static char mii_preamble_required = 0;
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++/* Bogus change in the middle of a "stable" kernel series.
++ In 2.4.7+ slab must come before interrupt.h to avoid mystery breakage. */
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#if LINUX_VERSION_CODE >= 0x20300
++#include <linux/spinlock.h>
++#elif LINUX_VERSION_CODE >= 0x20200
++#include <asm/spinlock.h>
++#endif
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++/* Kernels before 2.1.0 cannot map the high addrs assigned by some BIOSes. */
++#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
++#define USE_IO_OPS
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("PCI network skeleton Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to force full duplex, non-negotiated link "
++ "(deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++State the chips and boards this driver is known to work with.
++Note any similar chips or boards that will not work.
++
++This driver skeleton demonstrates the driver for an idealized
++descriptor-based bus-master PCI chip.
++
++II. Board-specific settings
++
++No jumpers exist on most PCI boards, so this section is usually empty.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++Some chips explicitly use only 2^N sized rings, while others use a
++'next descriptor' pointer that the driver forms into rings.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that the IP header at offset 14 in an
++ethernet frame isn't longword aligned for further processing.
++When unaligned buffers are permitted by the hardware (and always on copies)
++frames are put into the skbuff at an offset of "+2", 16-byte aligning
++the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IIId. SMP semantics
++
++The following are serialized with respect to each other via the "xmit_lock".
++ dev->hard_start_xmit() Transmit a packet
++ dev->tx_timeout() Transmit watchdog for stuck Tx
++ dev->set_multicast_list() Set the recieve filter.
++Note: The Tx timeout watchdog code is implemented by the timer routine in
++kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
++driver interface.
++
++The following fall under the global kernel lock. The module will not be
++unloaded during the call, unless a call with a potential reschedule e.g.
++kmalloc() is called. No other synchronization assertion is made.
++ dev->open()
++ dev->do_ioctl()
++ dev->get_stats()
++Caution: The lock for dev->open() is commonly broken with request_irq() or
++kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
++get_stats(), or additional module locking code must be implemented.
++
++The following is self-serialized (no simultaneous entry)
++ An handler registered with request_irq().
++
++IV. Notes
++
++There are few hard rules about writing device drivers, but I have read some
++amazingly unwise code. Bad code often stems from the mistaken belief that
++this device driver is the most important code the machine is running.
++
++Remember that this is a real OS, not DOS. Never mess with system hardware
++(the timer chip, DMA channels, IRQ mapping): use the hardware-independent
++kernel services instead.
++
++While there is a udelay() function, use it sparingly and only with tiny
++delays. It is not for having the kernel wait three seconds while
++autonegotiation completes! At boot time or module insertion time this rule
++can be relaxed somewhat, but even then the total delay should be under a
++timer tick (10msec).
++
++All loops should be checked with a 'boguscnt' limit. That includes the
++interrupt handler, which should limit the work it does with a tunable
++parameter. Loops that check for hardware completion should have a typical
++completion count in a comment. An exception is traversing software
++maintained lists, most of which should be designed to grow arbitrarily long.
++
++The device driver source code file should be self-contained, and as compact
++as readability permits. It should not be spread out over multiple source
++files, and there should only be a driver.h file in special circumstances.
++
++Finally, always support multiple devices. That means few, if any, global
++variables. All driver variables should be 'static'.
++
++IVb. References
++
++http://www.scyld.com/expert/100mbps.html
++http://scyld.com/expert/NWay.html
++
++List the documentation used to write the driver. Note any proprietary or
++trade secret information, and the agreement you have to release the same.
++
++IVc. Errata
++
++Note any known bugs or limitations.
++*/
++
++
++
++/* This table drives the PCI probe routines.
++ Note the matching code -- the first table entry matches only the 5678 card,
++ the second all remaining 56** cards.
++*/
++
++static void *netfin_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int netfin_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags {CanHaveMII=1, };
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"NetTechCom 5678 adapter", {0x56781234, 0xffffffff, },
++ PCI_IOTYPE, 128, CanHaveMII},
++ {"NetTechCom 5600 series", {0x56001234, 0xff00ffff, },
++ PCI_IOTYPE, 128, CanHaveMII},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info netfin_drv_id = {
++ "netfin", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ netfin_probe1, netfin_pwr_event };
++
++/* This driver was written to use PCI memory space, however x86-oriented
++ hardware sometimes works only with I/O space accesses. */
++#ifdef USE_IO_OPS
++#undef readb
++#undef readw
++#undef readl
++#undef writeb
++#undef writew
++#undef writel
++#define readb inb
++#define readw inw
++#define readl inl
++#define writeb outb
++#define writew outw
++#define writel outl
++#endif
++
++/* Offsets to the device registers.
++ Unlike software-only systems, device drivers interact with complex hardware.
++ It's not useful to define symbolic names for every register bit in the
++ device. The name can only partially document the semantics and make
++ the driver longer and more difficult to read.
++ In general, only the important configuration values or bits changed
++ multiple times should be defined symbolically.
++*/
++enum register_offsets {
++ ChipCmd=0x00, IntrStatus=0x04, IntrEnable=0x08,
++ TxStatus=0x10, TxCmd=0x14, TxRingPtr=0x18,
++ RxStatus=0x20, RxCmd=0x24, RxRingPtr=0x28,
++ EECtrl=0x40, MIICtrl=0x44, LEDCtrl=0x48,
++ StationAddr=0x50, RxMode=0x58, TxMode=0x5C,
++ RxMissed=0x60, RxCRCErrs=0x64, MulticastFilter0=0x68,MulticastFilter1=0x6C,
++ PCIBusCfg=0x70, FIFOCfg=0x74, ChipReset=0x78,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrRxDone=0x01, IntrRxEmpty=0x02, IntrRxPCIErr=0x04,
++ IntrTxDone=0x10, IntrTxEmpty=0x20, IntrTxPCIErr=0x40,
++ StatsMax=0x0100, LinkChange=0x0200, TxUnderrun=0x0400, RxOverflow=0x0800,
++ IntrNormalSummary=0x8000, IntrAbnormalSummary=0x4000,
++};
++
++/* Bits in the RxMode register. */
++enum rx_mode_bits {
++ AcceptErr=0x20, AcceptRunt=0x10,
++ AcceptBroadcast=0x08, AcceptMulticast=0x04,
++ AcceptAllPhys=0x02, AcceptMyPhys=0x01,
++};
++
++/* Misc. bits. Symbolic names so that may be searched for. */
++enum misc_bits {
++ ChipResetCmd=1, RxEnable=1, RxPoll=2, RxDisable=4,
++ TxEnable=1, TxPoll=2, TxDisable=4,
++ TxModeFDX=1, TxThresholdField=0x0ff0, TxThresholdInc=0x0010,
++};
++
++/* The Rx and Tx buffer descriptors. */
++/* Note that using only 32 bit fields simplifies conversion to big-endian
++ architectures. */
++struct netdev_desc {
++ u32 status;
++ u32 length;
++ u32 addr;
++ u32 next_desc;
++};
++
++/* Bits in network_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000, DescEndPacket=0x40000000, DescEndRing=0x20000000,
++ DescIntr=0x10000000,
++ RxDescWholePkt=0x60000000,
++ RxDescErrSum=0x80, RxErrRunt=0x40, RxErrLong=0x20, RxErrFrame=0x10,
++ RxErrCRC=0x08, RxErrCode=0x04,
++ TxErrAbort=0x2000, TxErrCarrier=0x1000, TxErrLate=0x0800,
++ TxErr16Colls=0x0400, TxErrDefer=0x0200, TxErrHeartbeat=0x0100,
++ TxColls=0x00ff,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
++ within the structure. */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct netdev_desc rx_ring[RX_RING_SIZE];
++ struct netdev_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ long in_interrupt; /* Word-long for SMP locks. */
++ int max_interrupt_work;
++ int intr_enable;
++ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
++
++ struct netdev_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_config;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* Rx filter. */
++ u32 cur_rx_mode;
++ u32 rx_filter[2];
++ int multicast_filter_limit;
++
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++/* The station address location in the EEPROM. */
++#define EEPROM_SA_OFFSET 0x10
++
++static int eeprom_read(long ioaddr, int location);
++static int mdio_read(struct net_device *dev, int phy_id,
++ unsigned int location);
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value);
++static int netdev_open(struct net_device *dev);
++static int change_mtu(struct net_device *dev, int new_mtu);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++/* You *must* rename this! */
++int skel_netdev_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&netfin_drv_id, dev) < 0)
++ return -ENODEV;
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *netfin_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ /* Perhaps NETIF_MSG_PROBE */
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ for (i = 0; i < 3; i++)
++ ((u16 *)dev->dev_addr)[i] =
++ le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
++ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
++ printk(" (MISSING EEPROM ADDRESS)");
++ /* Fill a temp addr with the "locally administered" bit set. */
++ memcpy(dev->dev_addr, ">Linux", 6);
++ }
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++#if ! defined(final_version) /* Dump the EEPROM contents during development. */
++ if (debug > 4)
++ for (i = 0; i < 0x40; i++)
++ printk("%4.4x%s",
++ eeprom_read(ioaddr, i), i % 16 != 15 ? " " : "\n");
++#endif
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ /* Do bogusness checks before this point.
++ We do a request_region() only to register /proc/ioports info. */
++#ifdef USE_IO_OPS
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++#endif
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(ChipResetCmd, ioaddr + ChipReset);
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ /* The lower four bits are the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330)
++ np->medialock = 1;
++ }
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++ dev->change_mtu = &change_mtu;
++
++ if (np->drv_flags & CanHaveMII) {
++ int phy, phy_idx = 0;
++ mii_preamble_required++;
++ /* In some cases the search should begin with #0. */
++ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ if ((mii_status & 0x0040) == 0)
++ mii_preamble_required++;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ mii_preamble_required--;
++ np->mii_cnt = phy_idx;
++ }
++
++ /* Allow forcing the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ if (np->mii_cnt)
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
++ return dev;
++}
++
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
++ These are often serial bit streams generated by the host processor.
++ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
++
++/* Delay between EEPROM clock transitions.
++ This "delay" forces out buffered PCI writes.
++ Typically no extra delay is needed.
++ Note that pre-2.0.34 kernels had a cache-alignment bug that made
++ udelay() unreliable.
++*/
++#define eeprom_delay(ee_addr) readl(ee_addr)
++
++/* Note carefully if "DataIn" refers to the NIC or EEPROM viewpoint. */
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x01, EE_DataBit=0x02, EE_ChipSelect=0x04, EE_DataDir=0x08,
++};
++#define EE_Write0 (EE_DataDir | EE_ChipSelect)
++#define EE_Write1 (EE_DataDir | EE_ChipSelect | EE_DataBit)
++
++/* The EEPROM commands always start with 01.. preamble bits.
++ Commands are prepended to the variable-length address. */
++enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
++
++static int eeprom_read(long addr, int location)
++{
++ int i;
++ int retval = 0;
++ long ee_addr = addr + EECtrl;
++ int read_cmd = location | (EE_ReadCmd<<6);
++
++ writel(EE_DataDir, ee_addr);
++ /* Shift the read command bits out. */
++ for (i = 10; i >= 0; i--) {
++ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
++ writel(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ writel(dataval | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++
++ for (i = 16; i > 0; i--) {
++ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ retval = (retval << 1) | ((readl(ee_addr) & EE_DataBit) ? 1 : 0);
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(EE_DataDir, ee_addr);
++ writel(0, ee_addr);
++ return retval;
++}
++
++/* MII transceiver control section.
++ Read and write the MII registers using software-generated serial
++ MDIO protocol. See the MII specifications or DP83840A data sheet
++ for details.
++
++ The maximum data clock rate is 2.5 Mhz.
++ The timing is decoupled from the processor clock by flushing the write
++ from the CPU write buffer with a following read, and using PCI
++ transaction time. */
++#define mdio_in(mdio_addr) readl(mdio_addr)
++#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
++#define mdio_delay(mdio_addr) readl(mdio_addr)
++
++enum mii_reg_bits {
++ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
++};
++#define MDIO_EnbIn (0)
++#define MDIO_WRITE0 (MDIO_EnbOutput)
++#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
++
++/* Generate the preamble required for initial synchronization and
++ a few older transceivers. */
++static void mdio_sync(long mdio_addr)
++{
++ int bits = 32;
++
++ /* Establish sync by sending at least 32 logic ones. */
++ while (--bits >= 0) {
++ mdio_out(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++}
++
++static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int i, retval = 0;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 15; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Read the two transition, 16 data, and wire-idle bits. */
++ for (i = 19; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return (retval>>1) & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
++ int i;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ /* Some chips may need to be reset. */
++
++ MOD_INC_USE_COUNT;
++
++ /* Note that both request_irq() and init_ring() call kmalloc(), which
++ break the global kernel lock protecting this routine. */
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++#if ADDRLEN == 64
++ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
++ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
++#endif
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ for (i = 0; i < 6; i++)
++ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
++#if 0
++ /* Or, if Address register must be written as words. */
++ writel(cpu_to_le32(cpu_to_le32(get_unaligned((u32 *)dev->dev_addr))),
++ ioaddr + StationAddr);
++ writel(cpu_to_le16(cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)))),
++ ioaddr + StationAddr + 4);
++#endif
++
++ /* Initialize other registers. */
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++ writel(0x0000, ioaddr + PCIBusCfg);
++ writel(0x0000, ioaddr + FIFOCfg);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->in_interrupt = 0;
++
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
++ IntrTxDone | IntrTxEmpty | IntrTxPCIErr | StatsMax | LinkChange;
++ writel(np->intr_enable, ioaddr + IntrEnable);
++
++ writel(RxEnable, dev->base_addr + RxCmd);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x.\n",
++ dev->name, (int)readl(ioaddr + RxStatus),
++ (int)readl(ioaddr + TxStatus));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++/* This is only needed if the chip supports >1500 byte frames.
++ Changing the MTU while active is usually race prone or impossible, thus
++ no configuration relies on the capability.
++ */
++static int change_mtu(struct net_device *dev, int new_mtu)
++{
++ if ((new_mtu < 68) || (new_mtu > 1500))
++ return -EINVAL;
++ if (netif_running(dev))
++ return -EBUSY;
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int old_tx_mode = np->tx_config;
++
++ if (np->medialock) {
++ if (np->full_duplex)
++ np->tx_config |= 1;
++ } else {
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
++ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++ if (np->duplex_lock || mii_reg5 == 0xffff)
++ return;
++ if (duplex)
++ np->tx_config |= TxModeFDX;
++ else
++ np->tx_config &= ~TxModeFDX;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
++ " negotiated capability %4.4x.\n", dev->name,
++ duplex ? "full" : "half", np->phys[0], negotiated);
++ }
++ }
++ if (old_tx_mode != np->tx_config)
++ writew(np->tx_config, ioaddr + TxMode);
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
++ "Tx %x Rx %x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus),
++ (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
++ }
++ /* This will either have a small false-trigger window or will not catch
++ tbusy incorrectly set when the queue is empty. */
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + TxStatus));
++
++#ifndef __alpha__
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %8.8x", np->tx_ring[i].status);
++ printk("\n");
++ }
++#endif
++
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ /* Use 1518/+18 if the CRC is transferred. */
++ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].length = cpu_to_le32(np->rx_buf_sz);
++ np->rx_ring[i].status = 0;
++ /* np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);*/
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].status |= cpu_to_le32(DescEndRing);
++ /* Or np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);*/
++
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
++ np->rx_ring[i].status = cpu_to_le32(DescOwn | DescIntr);
++ }
++ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ /* Or np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);*/
++ }
++ /* Or np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]); */
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
++ np->tx_ring[entry].length = cpu_to_le32(skb->len);
++ if (entry >= TX_RING_SIZE-1) /* Wrap ring */
++ np->tx_ring[entry].status =
++ cpu_to_le32(DescOwn|DescEndPacket|DescEndRing);
++ else
++ np->tx_ring[entry].status = cpu_to_le32(DescOwn|DescEndPacket);
++ np->cur_tx++;
++
++ /* On some architectures: explicitly flush cache lines here. */
++
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 2) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++ /* Wake the potentially-idle transmit channel. */
++ writel(TxPoll, dev->base_addr + TxCmd);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
++ dev->name, np->cur_tx, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
++ "device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
++ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
++ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
++ dev->name);
++ dev->interrupt = 0; /* Avoid halting machine. */
++ return;
++ }
++#endif
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ /* Acknowledge all of the current interrupt sources ASAP. */
++ writel(intr_status & 0x0000ffff, ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0 || intr_status == 0xffffffff)
++ break;
++
++ if (intr_status & IntrRxDone)
++ netdev_rx(dev);
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ int tx_status = le32_to_cpu(np->tx_ring[entry].status);
++ if (tx_status & DescOwn)
++ break;
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ if (tx_status & (TxErrAbort | TxErrCarrier | TxErrLate
++ | TxErr16Colls | TxErrHeartbeat)) {
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ np->stats.tx_errors++;
++ if (tx_status & TxErrCarrier) np->stats.tx_carrier_errors++;
++ if (tx_status & TxErrLate) np->stats.tx_window_errors++;
++ if (tx_status & TxErrHeartbeat) np->stats.tx_heartbeat_errors++;
++#ifdef ETHER_STATS
++ if (tx_status & TxErr16Colls) np->stats.collisions16++;
++ if (tx_status & TxErrAbort) np->stats.tx_aborted_errors++;
++#else
++ if (tx_status & (TxErr16Colls|TxErrAbort))
++ np->stats.tx_aborted_errors++;
++#endif
++ } else {
++ np->stats.tx_packets++;
++ np->stats.collisions += tx_status & TxColls;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++#ifdef ETHER_STATS
++ if (tx_status & TxErrDefer) np->stats.tx_deferred++;
++#endif
++ }
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ /* Note the 4 slot hysteresis to mark the queue non-full. */
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & (IntrTxPCIErr | IntrRxPCIErr | LinkChange | StatsMax))
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ np->restore_intr_enable = 1;
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
++ clear_bit(0, (void*)&dev->interrupt);
++#endif
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
++ entry, np->rx_ring[entry].status);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while ( ! (np->rx_head_desc->status & cpu_to_le32(DescOwn))) {
++ struct netdev_desc *desc = np->rx_head_desc;
++ u32 desc_status = le32_to_cpu(desc->status);
++ int data_size = le32_to_cpu(desc->length);
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
++ desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ((desc_status & RxDescWholePkt) != RxDescWholePkt) {
++ /* Select a message. */
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
++ "multiple buffers, entry %#x length %d status %4.4x!\n",
++ dev->name, np->cur_rx, data_size, desc_status);
++ printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n",
++ dev->name, np->rx_head_desc,
++ &np->rx_ring[np->cur_rx % RX_RING_SIZE]);
++ printk(KERN_WARNING "%s: Oversized Ethernet frame -- next status %x/%x last status %x.\n",
++ dev->name,
++ np->rx_ring[(np->cur_rx+1) % RX_RING_SIZE].status,
++ np->rx_ring[(np->cur_rx+1) % RX_RING_SIZE].length,
++ np->rx_ring[(np->cur_rx-1) % RX_RING_SIZE].status);
++ np->stats.rx_length_errors++;
++ } else if (desc_status & RxDescErrSum) {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ desc_status);
++ np->stats.rx_errors++;
++ if (desc_status & (RxErrLong|RxErrRunt))
++ np->stats.rx_length_errors++;
++ if (desc_status & (RxErrFrame|RxErrCode))
++ np->stats.rx_frame_errors++;
++ if (desc_status & RxErrCRC)
++ np->stats.rx_crc_errors++;
++ } else {
++ struct sk_buff *skb;
++ /* Reported length should omit the CRC. */
++ u16 pkt_len = data_size - 4;
++
++#ifndef final_version
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ " of %d, bogus_cnt %d.\n",
++ pkt_len, data_size, boguscnt);
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if (LINUX_VERSION_CODE >= 0x20100)
++ /* Use combined copy + cksum if available. */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++#ifndef final_version /* Remove after testing. */
++ if (le32desc_to_virt(np->rx_ring[entry].addr) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in netdev_rx: %p vs. %p / %p.\n",
++ dev->name,
++ le32desc_to_virt(np->rx_ring[entry].addr),
++ skb->head, temp);
++#endif
++ }
++#ifndef final_version /* Remove after testing. */
++ /* You will want this info for the initial debug. */
++ if (np->msg_level & NETIF_MSG_PKTDATA)
++ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
++ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
++ "%d.%d.%d.%d.\n",
++ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
++ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
++ skb->data[8], skb->data[9], skb->data[10],
++ skb->data[11], skb->data[12], skb->data[13],
++ skb->data[14], skb->data[15], skb->data[16],
++ skb->data[17]);
++#endif
++ skb->protocol = eth_type_trans(skb, dev);
++ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
++ np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
++ }
++ np->rx_ring[entry].length = cpu_to_le32(np->rx_buf_sz);
++ np->rx_ring[entry].status = (entry == RX_RING_SIZE - 1)
++ ? cpu_to_le32(DescOwn | DescEndPacket | DescEndRing | DescIntr)
++ : cpu_to_le32(DescOwn | DescEndPacket | DescIntr);
++ }
++
++ /* Restart Rx engine if stopped. */
++ writel(RxPoll, dev->base_addr + RxCmd);
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ if (intr_status & LinkChange) {
++ int phy_num = np->phys[0];
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ mdio_read(dev, phy_num, 4),
++ mdio_read(dev, phy_num, 5));
++ /* Clear sticky bit. */
++ mdio_read(dev, phy_num, 1);
++ /* If link beat has returned... */
++ if (mdio_read(dev, phy_num, 1) & 0x0004)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if ((intr_status & TxUnderrun)
++ && (np->tx_config & TxThresholdField) != TxThresholdField) {
++ long ioaddr = dev->base_addr;
++ np->tx_config += TxThresholdInc;
++ writel(np->tx_config, ioaddr + TxMode);
++ np->stats.tx_fifo_errors++;
++ }
++ if (intr_status & RxOverflow) {
++ printk(KERN_WARNING "%s: Receiver overflow.\n", dev->name);
++ np->stats.rx_over_errors++;
++ netdev_rx(dev); /* Refill */
++ get_stats(dev); /* Empty dropped counter. */
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if ((intr_status & ~(LinkChange|StatsMax|TxUnderrun|RxOverflow))
++ && (np->msg_level & NETIF_MSG_DRV))
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrTxPCIErr)
++ np->stats.tx_fifo_errors++;
++ if (intr_status & IntrRxPCIErr)
++ np->stats.rx_fifo_errors++;
++}
++
++/* We frequently do not bother to spinlock statistics.
++ A window only exists if we have non-atomic adds, the error counts are
++ typically zero, and statistics are non-critical. */
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ /* We should lock this segment of code for SMP eventually, although
++ the vulnerability window is very small and statistics are
++ non-critical. */
++ /* The chip only need report frame silently dropped. */
++ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
++
++ return &np->stats;
++}
++
++/* The little-endian AUTODIN II ethernet CRC calculations.
++ A big-endian version is also available.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and should be moved to net/core/crc.c.
++ Chips may use the upper or lower CRC bits, and may reverse and/or invert
++ them. Select the endian-ness that results in minimal calculations.
++*/
++static unsigned const ethernet_polynomial_le = 0xedb88320U;
++static inline unsigned ether_crc_le(int length, unsigned char *data)
++{
++ unsigned int crc = ~0; /* Initial value. */
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 8; --bit >= 0; current_octet >>= 1) {
++ if ((crc ^ current_octet) & 1) {
++ crc >>= 1;
++ crc ^= ethernet_polynomial_le;
++ } else
++ crc >>= 1;
++ }
++ }
++ return crc;
++}
++
++/*
++ Set the receiver mode, including the multicast filter.
++ The driver must not sleep here. If it must allocate memory use
++ GFP_ATOMIC and recover from allocation failure in the timer code.
++*/
++static void set_rx_mode(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ u32 rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ memset(mc_filter, ~0, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys
++ | AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ memset(mc_filter, 0xff, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
++ mc_filter);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ }
++ writel(mc_filter[0], ioaddr + MulticastFilter0);
++ writel(mc_filter[1], ioaddr + MulticastFilter1);
++ writel(rx_mode, ioaddr + RxMode);
++}
++
++/*
++ Handle user-level ioctl() calls.
++ We must use two numeric constants as the key because some clueless person
++ changed the value for the symbolic name.
++*/
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
++ "Rx %4.4x Int %2.2x.\n",
++ dev->name, (int)readl(ioaddr + TxStatus),
++ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(0x0000, ioaddr + IntrEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(RxDisable, ioaddr + RxCmd);
++ writew(TxDisable, ioaddr + TxCmd);
++
++ del_timer(&np->timer);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
++ i, np->tx_ring[i].length,
++ np->tx_ring[i].status, np->tx_ring[i].addr);
++ printk(KERN_DEBUG "\n" KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
++ i, np->rx_ring[i].length,
++ np->rx_ring[i].status, np->rx_ring[i].addr);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ writel(0x00, ioaddr + LEDCtrl);
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int netfin_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writel(0x0000, ioaddr + IntrEnable);
++ writel(RxDisable, ioaddr + RxCmd);
++ writew(TxDisable, ioaddr + TxCmd);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the actions are very chip specific. */
++ set_rx_mode(dev);
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&netfin_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&netfin_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++#ifdef USE_IO_OPS
++ release_region(root_net_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++#else
++ iounmap((char *)(root_net_dev->base_addr));
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` pci-skeleton.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c pci-skeleton.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c pci-skeleton.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/rtl8139.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/rtl8139.c,v
+retrieving revision 1.2
+diff -u -r1.2 rtl8139.c
+--- linux/src/drivers/net/rtl8139.c 9 Sep 1999 06:30:37 -0000 1.2
++++ linux/src/drivers/net/rtl8139.c 20 Aug 2004 10:32:54 -0000
+@@ -1,34 +1,62 @@
+ /* rtl8139.c: A RealTek RTL8129/8139 Fast Ethernet driver for Linux. */
+ /*
+- Written 1997-1998 by Donald Becker.
+-
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
+- All other rights reserved.
++ Written and Copyright 1997-2003 by Donald Becker.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for boards based on the RTL8129 and RTL8139 PCI ethernet
+ chips.
+
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
+
+ Support and updates available at
+- http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html
++ http://www.scyld.com/network/rtl8139.html
+
+- Twister-tuning code contributed by Kinston <shangh@realtek.com.tw>.
++ Twister-tuning table provided by Kinston <shangh@realtek.com.tw>.
+ */
+
+-static const char *version =
+-"rtl8139.c:v0.99B 4/7/98 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/rtl8139.html\n";
++/* These identify the driver base version and may not be removed. */
++static const char versionA[] =
++"rtl8139.c:v1.23a 8/24/2003 Donald Becker, becker@scyld.com.\n";
++static const char versionB[] =
++" http://www.scyld.com/network/rtl8139.html\n";
++
++#ifndef USE_MEM_OPS
++/* Note: Register access width and timing restrictions apply in MMIO mode.
++ This updated driver should nominally work, but I/O mode is better tested. */
++#define USE_IO_OPS
++#endif
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
+
+-/* A few user-configurable values. */
+ /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+-static int max_interrupt_work = 10;
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
++ The RTL chips use a 64 element hash table based on the Ethernet CRC. It
++ is efficient to update the hardware filter, but recalculating the table
++ for a long filter list is painful. */
++static int multicast_filter_limit = 32;
++
++/* Used to pass the full-duplex flag, etc. */
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
+
+-/* Size of the in-memory receive ring. */
++/* Maximum size of the in-memory receive ring (smaller if no memory). */
+ #define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
+-#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+ /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
+ #define TX_BUF_SIZE 1536
+
+@@ -39,68 +67,86 @@
+ /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024. */
+ #define RX_FIFO_THRESH 4 /* Rx buffer level before first PCI xfer. */
+ #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 bytes */
+-#define TX_DMA_BURST 4
++#define TX_DMA_BURST 4 /* Calculate as 16<<val. */
+
+ /* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT ((4000*HZ)/1000)
++#define TX_TIMEOUT (6*HZ)
+
+-#ifdef MODULE
+-#ifdef MODVERSIONS
+-#include <linux/modversions.h>
++/* Allocation size of Rx buffers with full-sized Ethernet frames.
++ This is a cross-driver value that is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++
++#ifndef __KERNEL__
++#define __KERNEL__
+ #endif
+-#include <linux/module.h>
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
+ #include <linux/version.h>
+-#else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
+ #endif
++#include <linux/module.h>
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+-#include <linux/ptrace.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+-#include <linux/bios32.h>
+-#include <asm/processor.h> /* Processor type for cache alignment. */
+-#include <asm/bitops.h>
+-#include <asm/io.h>
+-#include <asm/dma.h>
+-
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
+
+-#define RUN_AT(x) (jiffies + (x))
+-
+-#include <linux/delay.h>
+-
+-#if (LINUX_VERSION_CODE < 0x20123)
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
++#if LINUX_VERSION_CODE >= 0x20300
++#include <linux/spinlock.h>
++#elif LINUX_VERSION_CODE >= 0x20200
++#include <asm/spinlock.h>
+ #endif
+
+-/* The I/O extent. */
+-#define RTL8129_TOTAL_SIZE 0x80
+-
+-#ifdef HAVE_DEVLIST
+-struct netdev_entry rtl8139_drv =
+-{"RTL8139", rtl8139_probe, RTL8129_TOTAL_SIZE, NULL};
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
+ #endif
+
+-static int rtl8129_debug = 1;
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
+
+ /*
+ Theory of Operation
+
+ I. Board Compatibility
+
+-This device driver is designed for the RealTek RTL8129, the RealTek Fast
+-Ethernet controllers for PCI. This chip is used on a few clone boards.
++This device driver is designed for the RealTek RTL8129 series, the RealTek
++Fast Ethernet controllers for PCI and CardBus. This chip is used on many
++low-end boards, sometimes with custom chip labels.
+
+
+ II. Board-specific settings
+@@ -121,15 +167,17 @@
+ skbuffs.
+
+ Comment: While it is theoretically possible to process many frames in place,
+-any delay in Rx processing would cause us to drop frames. More importantly,
+-the Linux protocol stack is not designed to operate in this manner.
++any delay in Rx processing would block the Rx ring and cause us to drop
++frames. It would be difficult to design a protocol stack where the data
++buffer could be recalled by the device driver.
+
+ IIIb. Tx operation
+
+-The RTL8129 uses a fixed set of four Tx descriptors in register space.
+-In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
+-aligns the IP header on word boundaries, and 14 byte ethernet header means
+-that almost all frames will need to be copied to an alignment buffer.
++The RTL8129 uses a fixed set of four Tx descriptors in register space. Tx
++frames must be 32 bit aligned. Linux aligns the IP header on word
++boundaries, and 14 byte ethernet header means that almost all frames will
++need to be copied to an alignment buffer. The driver statically allocates
++alignment the four alignment buffers at open() time.
+
+ IVb. References
+
+@@ -139,15 +187,74 @@
+ IVc. Errata
+
+ */
++
+
+-#ifndef PCI_VENDOR_ID_REALTEK
+-#define PCI_VENDOR_ID_REALTEK 0x10ec
+-#endif
+-#ifndef PCI_DEVICE_ID_REALTEK_8129
+-#define PCI_DEVICE_ID_REALTEK_8129 0x8129
++static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int rtl_pwr_event(void *dev_instance, int event);
++
++enum chip_capability_flags {HAS_MII_XCVR=0x01, HAS_CHIP_XCVR=0x02,
++ HAS_LNK_CHNG=0x04, HAS_DESC=0x08};
++#ifdef USE_IO_OPS
++#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_IO |PCI_ADDR0
++#else
++#define RTL8139_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR1
+ #endif
+-#ifndef PCI_DEVICE_ID_REALTEK_8139
+-#define PCI_DEVICE_ID_REALTEK_8139 0x8139
++#define RTL8129_CAPS HAS_MII_XCVR
++#define RTL8139_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
++#define RTL8139D_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG|HAS_DESC
++
++/* Note: Update the marked constant in _attach() if the RTL8139B entry moves.*/
++static struct pci_id_info pci_tbl[] = {
++ {"RealTek RTL8139C+, 64 bit high performance",
++ { 0x813910ec, 0xffffffff, 0,0, 0x20, 0xff},
++ RTL8139_IOTYPE, 0x80, RTL8139D_CAPS, },
++ {"RealTek RTL8139C Fast Ethernet",
++ { 0x813910ec, 0xffffffff, 0,0, 0x10, 0xff},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"RealTek RTL8129 Fast Ethernet", { 0x812910ec, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8129_CAPS, },
++ {"RealTek RTL8139 Fast Ethernet", { 0x813910ec, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"RealTek RTL8139B PCI/CardBus", { 0x813810ec, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"SMC1211TX EZCard 10/100 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"Accton MPX5030 (RealTek RTL8139)", { 0x12111113, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"D-Link DFE-530TX+ (RealTek RTL8139C)",
++ { 0x13001186, 0xffffffff, 0x13011186, 0xffffffff,},
++ RTL8139_IOTYPE, 0x100, RTL8139_CAPS, },
++ {"D-Link DFE-538TX (RealTek RTL8139)", { 0x13001186, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"LevelOne FPC-0106Tx (RealTek RTL8139)", { 0x0106018a, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"Compaq HNE-300 (RealTek RTL8139c)", { 0x8139021b, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"Edimax EP-4103DL CardBus (RealTek RTL8139c)", { 0xab0613d1, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {"Siemens 1012v2 CardBus (RealTek RTL8139c)", { 0x101202ac, 0xffffffff,},
++ RTL8139_IOTYPE, 0x80, RTL8139_CAPS, },
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info rtl8139_drv_id = {
++ "realtek", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
++ rtl8139_probe1, rtl_pwr_event };
++
++#ifndef USE_IO_OPS
++#undef inb
++#undef inw
++#undef inl
++#undef outb
++#undef outw
++#undef outl
++#define inb readb
++#define inw readw
++#define inl readl
++#define outb writeb
++#define outw writew
++#define outl writel
+ #endif
+
+ /* The rest of these values should never change. */
+@@ -157,7 +264,7 @@
+ enum RTL8129_registers {
+ MAC0=0, /* Ethernet hardware address. */
+ MAR0=8, /* Multicast filter. */
+- TxStat0=0x10, /* Transmit status (Four 32bit registers). */
++ TxStatus0=0x10, /* Transmit status (Four 32bit registers). */
+ TxAddr0=0x20, /* Tx descriptors (also four 32bit). */
+ RxBuf=0x30, RxEarlyCnt=0x34, RxEarlyStatus=0x36,
+ ChipCmd=0x37, RxBufPtr=0x38, RxBufAddr=0x3A,
+@@ -168,9 +275,10 @@
+ Cfg9346=0x50, Config0=0x51, Config1=0x52,
+ FlashReg=0x54, GPPinData=0x58, GPPinDir=0x59, MII_SMI=0x5A, HltClk=0x5B,
+ MultiIntr=0x5C, TxSummary=0x60,
+- BMCR=0x62, BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68, NWayExpansion=0x6A,
++ MII_BMCR=0x62, MII_BMSR=0x64, NWayAdvert=0x66, NWayLPAR=0x68,
++ NWayExpansion=0x6A,
+ /* Undocumented registers, but required for proper operation. */
+- FIFOTMS=0x70, /* FIFO Test Mode Select */
++ FIFOTMS=0x70, /* FIFO Control and test. */
+ CSCR=0x74, /* Chip Status and Configuration Register. */
+ PARA78=0x78, PARA7c=0x7c, /* Magic transceiver parameter register. */
+ };
+@@ -194,281 +302,240 @@
+ RxBadAlign=0x0002, RxStatusOK=0x0001,
+ };
+
++/* Twister tuning parameters from RealTek.
++ Completely undocumented, but required to tune bad links. */
+ enum CSCRBits {
+ CSCR_LinkOKBit=0x0400, CSCR_LinkChangeBit=0x0800,
+ CSCR_LinkStatusBits=0x0f000, CSCR_LinkDownOffCmd=0x003c0,
+ CSCR_LinkDownCmd=0x0f3c0,
+-};
+-
+-/* Twister tuning parameters from RealTek. Completely undocumented. */
++};
++#define PARA78_default 0x78fa8388
++#define PARA7c_default 0xcb38de43 /* param[0][3] */
++#define PARA7c_xxx 0xcb38de43
+ unsigned long param[4][4]={
+- {0x0cb39de43,0x0cb39ce43,0x0fb38de03,0x0cb38de43},
+- {0x0cb39de43,0x0cb39ce43,0x0cb39ce83,0x0cb39ce83},
+- {0x0cb39de43,0x0cb39ce43,0x0cb39ce83,0x0cb39ce83},
+- {0x0bb39de43,0x0bb39ce43,0x0bb39ce83,0x0bb39ce83}
++ {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
++ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
++ {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
++ {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+ };
+
++#define PRIV_ALIGN 15 /* Desired alignment mask */
+ struct rtl8129_private {
+- char devname[8]; /* Used only for kernel debugging. */
+- const char *product_name;
+- struct device *next_module;
+- int chip_id;
+- int chip_revision;
+-#if LINUX_VERSION_CODE > 0x20139
++ struct net_device *next_module;
++ void *priv_addr; /* Unaligned address for kfree */
++
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
+ struct net_device_stats stats;
+-#else
+- struct enet_statistics stats;
+-#endif
+ struct timer_list timer; /* Media selection timer. */
+- unsigned int cur_rx, cur_tx; /* The next free and used entries */
+- unsigned int dirty_rx, dirty_tx;
++ int msg_level;
++ int max_interrupt_work;
++
++ /* Receive state. */
++ unsigned char *rx_ring;
++ unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
++ unsigned int rx_buf_len; /* Size (8K 16K 32K or 64KB) of the Rx ring */
++
++ /* Transmit state. */
++ unsigned int cur_tx, dirty_tx, tx_flag;
++ unsigned long tx_full; /* The Tx queue is full. */
+ /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+ struct sk_buff* tx_skbuff[NUM_TX_DESC];
+ unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
+- unsigned char *rx_ring;
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+- unsigned char mc_filter[8]; /* Current multicast filter. */
++
++ /* Receive filter state. */
++ unsigned int rx_config;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ int cur_rx_mode;
++ int multicast_filter_limit;
++
++ /* Transceiver state. */
+ char phys[4]; /* MII device addresses. */
+- int in_interrupt; /* Alpha needs word-wide lock. */
+- unsigned int tx_full:1; /* The Tx queue is full. */
++ u16 advertising; /* NWay media advertisement */
++ char twistie, twist_row, twist_col; /* Twister tune state. */
++ u8 config1;
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+- unsigned int default_port:4; /* Last dev->if_port value. */
++ unsigned int duplex_lock:1;
+ unsigned int media2:4; /* Secondary monitored media port. */
+ unsigned int medialock:1; /* Don't sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
++ unsigned int default_port; /* Last dev->if_port value. */
+ };
+
+-#ifdef MODULE
+-/* Used to pass the full-duplex flag, etc. */
+-static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-#if LINUX_VERSION_CODE > 0x20118
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+ MODULE_DESCRIPTION("RealTek RTL8129/8139 Fast Ethernet driver");
+-MODULE_PARM(debug, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
++MODULE_LICENSE("GPL");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
+ MODULE_PARM(max_interrupt_work, "i");
+-#endif
+-#endif
+-
+-static struct device *rtl8129_probe1(struct device *dev, int ioaddr, int irq,
+- int chip_id, int options, int card_idx);
+-static int rtl8129_open(struct device *dev);
+-static int read_eeprom(int ioaddr, int location);
+-static int mdio_read(int ioaddr, int phy_id, int location);
++MODULE_PARM(debug, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++
++static int rtl8129_open(struct net_device *dev);
++static void rtl_hw_start(struct net_device *dev);
++static int read_eeprom(long ioaddr, int location, int addr_len);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int val);
+ static void rtl8129_timer(unsigned long data);
+-static void rtl8129_tx_timeout(struct device *dev);
+-static void rtl8129_init_ring(struct device *dev);
+-static int rtl8129_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int rtl8129_rx(struct device *dev);
++static void rtl8129_tx_timeout(struct net_device *dev);
++static void rtl8129_init_ring(struct net_device *dev);
++static int rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static int rtl8129_rx(struct net_device *dev);
+ static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+-static int rtl8129_close(struct device *dev);
+-static struct enet_statistics *rtl8129_get_stats(struct device *dev);
+-static void set_rx_mode(struct device *dev);
++static void rtl_error(struct net_device *dev, int status, int link_status);
++static int rtl8129_close(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static struct net_device_stats *rtl8129_get_stats(struct net_device *dev);
++static inline u32 ether_crc(int length, unsigned char *data);
++static void set_rx_mode(struct net_device *dev);
+
+
+-#ifdef MODULE
+ /* A list of all installed RTL8129 devices, for removing the driver module. */
+-static struct device *root_rtl8129_dev = NULL;
+-#endif
++static struct net_device *root_rtl8129_dev = NULL;
+
+-int rtl8139_probe(struct device *dev)
++#ifndef MODULE
++int rtl8139_probe(struct net_device *dev)
+ {
+- int cards_found = 0;
+- static int pci_index = 0; /* Static, for multiple probe calls. */
+-
+- /* Ideally we would detect all network cards in slot order. That would
+- be best done a central PCI probe dispatch, which wouldn't work
+- well with the current structure. So instead we detect just the
+- Rtl81*9 cards in slot order. */
+-
+- if (pcibios_present()) {
+- unsigned char pci_bus, pci_device_fn;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- u8 pci_irq_line, pci_latency;
+- u16 pci_command, new_command, vendor, device;
+- u32 pci_ioaddr;
+-
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+-#ifdef REVERSE_PROBE_ORDER
+- 0xff - pci_index,
+-#else
+- pci_index,
+-#endif
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- if (vendor != PCI_VENDOR_ID_REALTEK)
+- continue;
+-
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- /* Remove I/O space marker in bit 0. */
+- pci_ioaddr &= ~3;
+-
+- if (device != PCI_DEVICE_ID_REALTEK_8129
+- && device != PCI_DEVICE_ID_REALTEK_8139) {
+- printk(KERN_NOTICE "Unknown RealTek PCI ethernet chip type "
+- "%4.4x detected: not configured.\n", device);
+- continue;
+- }
+- if (check_region(pci_ioaddr, RTL8129_TOTAL_SIZE))
+- continue;
+-
+- /* Activate the card: fix for brain-damaged Win98 BIOSes. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled this"
+- " device! Updating PCI config %4.4x->%4.4x.\n",
+- pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+-#ifdef MODULE
+- dev = rtl8129_probe1(dev, pci_ioaddr, pci_irq_line, device,
+- options[cards_found], cards_found);
+-#else
+- dev = rtl8129_probe1(dev, pci_ioaddr, pci_irq_line, device,
+- dev ? dev->mem_start : 0, -1);
+-#endif
+-
+- if (dev) {
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < 32) {
+- printk(KERN_NOTICE" PCI latency timer (CFLT) is "
+- "unreasonably low at %d. Setting to 64 clocks.\n",
+- pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, 64);
+- } else if (rtl8129_debug > 1)
+- printk(KERN_INFO" PCI latency timer (CFLT) is %#x.\n",
+- pci_latency);
+- dev = 0;
+- cards_found++;
+- }
+- }
+- }
++ static int did_version = 0; /* Already printed version info. */
+
+-#if defined (MODULE)
+- return cards_found;
+-#else
+- return cards_found ? 0 : -ENODEV;
+-#endif
++ if (debug >= NETIF_MSG_DRV /* Emit version even if no cards detected. */
++ && did_version++ == 0)
++ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
++ return pci_drv_register(&rtl8139_drv_id, dev);
+ }
++#endif
+
+-static struct device *rtl8129_probe1(struct device *dev, int ioaddr, int irq,
+- int chip_id, int options, int card_idx)
++static void *rtl8139_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int found_cnt)
+ {
+- static int did_version = 0; /* Already printed version info. */
+- struct rtl8129_private *tp;
+- int i;
+-
+- if (rtl8129_debug > 0 && did_version++ == 0)
+- printk(KERN_INFO "%s", version);
++ struct net_device *dev;
++ struct rtl8129_private *np;
++ void *priv_mem;
++ int i, option = found_cnt < MAX_UNITS ? options[found_cnt] : 0;
++ int config1;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
+
+- dev = init_etherdev(dev, 0);
+-
+- printk(KERN_INFO "%s: RealTek RTL%x at %#3x, IRQ %d, ",
+- dev->name, chip_id, ioaddr, irq);
++ printk(KERN_INFO "%s: %s at %#lx, IRQ %d, ",
++ dev->name, pci_tbl[chip_idx].name, ioaddr, irq);
+
+ /* Bring the chip out of low-power mode. */
+- outb(0x00, ioaddr + Config1);
+-
+- /* Perhaps this should be read from the EEPROM? */
+- for (i = 0; i < 6; i++)
+- dev->dev_addr[i] = inb(ioaddr + MAC0 + i);
++ config1 = inb(ioaddr + Config1);
++ if (pci_tbl[chip_idx].drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
++ outb(config1 & ~0x03, ioaddr + Config1);
++
++ {
++ int addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
++ for (i = 0; i < 3; i++)
++ ((u16 *)(dev->dev_addr))[i] =
++ le16_to_cpu(read_eeprom(ioaddr, i+7, addr_len));
++ }
+
+ for (i = 0; i < 5; i++)
+ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x.\n", dev->dev_addr[i]);
+
+- if (rtl8129_debug > 1) {
+- printk(KERN_INFO "%s: EEPROM contents\n", dev->name);
+- for (i = 0; i < 64; i++)
+- printk(" %4.4x%s", read_eeprom(ioaddr, i),
+- i%16 == 15 ? "\n"KERN_INFO : "");
+- }
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
+
+ /* We do a request_region() to register /proc/ioports info. */
+- request_region(ioaddr, RTL8129_TOTAL_SIZE, "RealTek RTL8129/39 Fast Ethernet");
++ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+- /* Some data structures must be quadword aligned. */
+- tp = kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA);
+- memset(tp, 0, sizeof(*tp));
+- dev->priv = tp;
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
+
+-#ifdef MODULE
+- tp->next_module = root_rtl8129_dev;
++ np->next_module = root_rtl8129_dev;
+ root_rtl8129_dev = dev;
+-#endif
+
+- tp->chip_id = chip_id;
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ np->config1 = config1;
+
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later, but
+ takes too much time. */
+- if (chip_id == 0x8129) {
+- int phy, phy_idx;
+- for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys);
+- phy++) {
+- int mii_status = mdio_read(ioaddr, phy, 1);
+-
+- if (mii_status != 0xffff && mii_status != 0x0000) {
+- tp->phys[phy_idx++] = phy;
+- printk(KERN_INFO "%s: MII transceiver found at address %d.\n",
+- dev->name, phy);
++ if (np->drv_flags & HAS_MII_XCVR) {
++ int phy, phy_idx = 0;
++ for (phy = 0; phy < 32 && phy_idx < sizeof(np->phys); phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ printk(KERN_INFO "%s: MII transceiver %d status 0x%4.4x "
++ "advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
+ }
+ }
+ if (phy_idx == 0) {
+ printk(KERN_INFO "%s: No MII transceivers found! Assuming SYM "
+ "transceiver.\n",
+ dev->name);
+- tp->phys[0] = -1;
++ np->phys[0] = 32;
+ }
+- } else {
+- tp->phys[0] = -1;
+- }
++ } else
++ np->phys[0] = 32;
+
+ /* Put the chip into low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+- outb(0x03, ioaddr + Config1);
++ if (np->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
++ outb(0x03, ioaddr + Config1);
++
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ /* The lower four bits are the media type. */
+- if (options > 0) {
+- tp->full_duplex = (options & 16) ? 1 : 0;
+- tp->default_port = options & 15;
+- if (tp->default_port)
+- tp->medialock = 1;
+- }
+-#ifdef MODULE
+- if (card_idx >= 0) {
+- if (full_duplex[card_idx] >= 0)
+- tp->full_duplex = full_duplex[card_idx];
++ if (option > 0) {
++ np->full_duplex = (option & 0x220) ? 1 : 0;
++ np->default_port = option & 0x330;
++ if (np->default_port)
++ np->medialock = 1;
++ }
++
++ if (found_cnt < MAX_UNITS && full_duplex[found_cnt] > 0)
++ np->full_duplex = full_duplex[found_cnt];
++
++ if (np->full_duplex) {
++ printk(KERN_INFO "%s: Media type forced to Full Duplex.\n", dev->name);
++ /* Changing the MII-advertised media might prevent re-connection. */
++ np->duplex_lock = 1;
++ }
++ if (np->default_port) {
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (option & 0x220 ? "full" : "half"));
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ ((option & 0x220) ? 0x0100 : 0)); /* Full duplex? */
+ }
+-#endif
+
+- /* The Rtl8129-specific entries in the device structure. */
++ /* The rtl81x9-specific entries in the device structure. */
+ dev->open = &rtl8129_open;
+ dev->hard_start_xmit = &rtl8129_start_xmit;
+ dev->stop = &rtl8129_close;
+ dev->get_stats = &rtl8129_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
+
+ return dev;
+ }
+@@ -485,48 +552,43 @@
+ #define EE_ENB (0x80 | EE_CS)
+
+ /* Delay between EEPROM clock transitions.
+- No extra delay is needed with 33Mhz PCI, but 66Mhz is untested.
++ No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+-#ifdef _LINUX_DELAY_H
+-#define eeprom_delay(nanosec) udelay(1)
+-#else
+-#define eeprom_delay(nanosec) do { ; } while (0)
+-#endif
++#define eeprom_delay() inl(ee_addr)
+
+ /* The EEPROM commands include the alway-set leading bit. */
+-#define EE_WRITE_CMD (5 << 6)
+-#define EE_READ_CMD (6 << 6)
+-#define EE_ERASE_CMD (7 << 6)
++#define EE_WRITE_CMD (5)
++#define EE_READ_CMD (6)
++#define EE_ERASE_CMD (7)
+
+-static int read_eeprom(int ioaddr, int location)
++static int read_eeprom(long ioaddr, int location, int addr_len)
+ {
+ int i;
+ unsigned retval = 0;
+- int ee_addr = ioaddr + Cfg9346;
+- int read_cmd = location | EE_READ_CMD;
++ long ee_addr = ioaddr + Cfg9346;
++ int read_cmd = location | (EE_READ_CMD << addr_len);
+
+ outb(EE_ENB & ~EE_CS, ee_addr);
+ outb(EE_ENB, ee_addr);
+
+ /* Shift the read command bits out. */
+- for (i = 10; i >= 0; i--) {
++ for (i = 4 + addr_len; i >= 0; i--) {
+ int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+ outb(EE_ENB | dataval, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ outb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+- eeprom_delay(150);
+- outb(EE_ENB | dataval, ee_addr); /* Finish EEPROM a clock tick. */
+- eeprom_delay(250);
++ eeprom_delay();
+ }
+ outb(EE_ENB, ee_addr);
++ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ retval = (retval << 1) | ((inb(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ outb(EE_ENB, ee_addr);
+- eeprom_delay(100);
++ eeprom_delay();
+ }
+
+ /* Terminate the EEPROM access. */
+@@ -544,288 +606,402 @@
+ #define MDIO_DATA_OUT 0x04
+ #define MDIO_DATA_IN 0x02
+ #define MDIO_CLK 0x01
+-#ifdef _LINUX_DELAY_H
+-#define mdio_delay() udelay(1) /* Really 400ns. */
+-#else
+-#define mdio_delay() do { ; } while (0)
+-#endif
++#define MDIO_WRITE0 (MDIO_DIR)
++#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
++
++#define mdio_delay(mdio_addr) inb(mdio_addr)
++
++static char mii_2_8139_map[8] = {MII_BMCR, MII_BMSR, 0, 0, NWayAdvert,
++ NWayLPAR, NWayExpansion, 0 };
+
+ /* Syncronize the MII management interface by shifting 32 one bits out. */
+-static void mdio_sync(int ioaddr)
++static void mdio_sync(long mdio_addr)
+ {
+ int i;
+- int mdio_addr = ioaddr + MII_SMI;
+
+ for (i = 32; i >= 0; i--) {
+- outb(MDIO_DIR | MDIO_DATA_OUT, mdio_addr);
+- mdio_delay();
+- outb(MDIO_DIR | MDIO_DATA_OUT | MDIO_CLK, mdio_addr);
+- mdio_delay();
++ outb(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ outb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
++ mdio_delay(mdio_addr);
+ }
+ return;
+ }
+-static int mdio_read(int ioaddr, int phy_id, int location)
++static int mdio_read(struct net_device *dev, int phy_id, int location)
+ {
+- int i;
+- int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ long mdio_addr = dev->base_addr + MII_SMI;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+ int retval = 0;
+- int mdio_addr = ioaddr + MII_SMI;
++ int i;
+
+- mdio_sync(ioaddr);
++ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
++ return location < 8 && mii_2_8139_map[location] ?
++ inw(dev->base_addr + mii_2_8139_map[location]) : 0;
++ }
++ mdio_sync(mdio_addr);
+ /* Shift the read command bits out. */
+ for (i = 15; i >= 0; i--) {
+- int dataval =
+- (read_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+ outb(MDIO_DIR | dataval, mdio_addr);
+- mdio_delay();
++ mdio_delay(mdio_addr);
+ outb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+- mdio_delay();
++ mdio_delay(mdio_addr);
+ }
+
+ /* Read the two transition, 16 data, and wire-idle bits. */
+ for (i = 19; i > 0; i--) {
+ outb(0, mdio_addr);
+- mdio_delay();
++ mdio_delay(mdio_addr);
+ retval = (retval << 1) | ((inb(mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+ outb(MDIO_CLK, mdio_addr);
+- mdio_delay();
++ mdio_delay(mdio_addr);
+ }
+ return (retval>>1) & 0xffff;
+ }
+-
+-static int
+-rtl8129_open(struct device *dev)
++
++static void mdio_write(struct net_device *dev, int phy_id, int location,
++ int value)
+ {
+- struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long mdio_addr = dev->base_addr + MII_SMI;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
+ int i;
+- int full_duplex = 0;
+
+- /* Soft reset the chip. */
+- outb(CmdReset, ioaddr + ChipCmd);
++ if (phy_id > 31) { /* Really a 8139. Use internal registers. */
++ long ioaddr = dev->base_addr;
++ if (location == 0) {
++ outb(0xC0, ioaddr + Cfg9346);
++ outw(value, ioaddr + MII_BMCR);
++ outb(0x00, ioaddr + Cfg9346);
++ } else if (location < 8 && mii_2_8139_map[location])
++ outw(value, ioaddr + mii_2_8139_map[location]);
++ return;
++ }
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++ outb(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ outb(dataval | MDIO_CLK, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ outb(0, mdio_addr);
++ mdio_delay(mdio_addr);
++ outb(MDIO_CLK, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++
++static int rtl8129_open(struct net_device *dev)
++{
++ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int rx_buf_len_idx;
+
++ MOD_INC_USE_COUNT;
+ if (request_irq(dev->irq, &rtl8129_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+
+- MOD_INC_USE_COUNT;
++ /* The Rx ring allocation size is 2^N + delta, which is worst-case for
++ the kernel binary-buddy allocation. We allocate the Tx bounce buffers
++ at the same time to use some of the otherwise wasted space.
++ The delta of +16 is required for dribble-over because the receiver does
++ not wrap when the packet terminates just beyond the end of the ring. */
++ rx_buf_len_idx = RX_BUF_LEN_IDX;
++ do {
++ tp->rx_buf_len = 8192 << rx_buf_len_idx;
++ tp->rx_ring = kmalloc(tp->rx_buf_len + 16 +
++ (TX_BUF_SIZE * NUM_TX_DESC), GFP_KERNEL);
++ } while (tp->rx_ring == NULL && --rx_buf_len_idx >= 0);
+
+- tp->tx_bufs = kmalloc(TX_BUF_SIZE * NUM_TX_DESC, GFP_KERNEL);
+- tp->rx_ring = kmalloc(RX_BUF_LEN + 16, GFP_KERNEL);
+- if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+- free_irq (dev->irq, dev);
+- if (tp->tx_bufs)
+- kfree(tp->tx_bufs);
+- if (rtl8129_debug > 0)
++ if (tp->rx_ring == NULL) {
++ if (debug > 0)
+ printk(KERN_ERR "%s: Couldn't allocate a %d byte receive ring.\n",
+- dev->name, RX_BUF_LEN);
++ dev->name, tp->rx_buf_len);
++ MOD_DEC_USE_COUNT;
+ return -ENOMEM;
+ }
++ tp->tx_bufs = tp->rx_ring + tp->rx_buf_len + 16;
++
+ rtl8129_init_ring(dev);
++ tp->full_duplex = tp->duplex_lock;
++ tp->tx_flag = (TX_FIFO_THRESH<<11) & 0x003f0000;
++ tp->rx_config =
++ (RX_FIFO_THRESH << 13) | (rx_buf_len_idx << 11) | (RX_DMA_BURST<<8);
++
++ rtl_hw_start(dev);
++ netif_start_tx_queue(dev);
++
++ if (tp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %#lx IRQ %d"
++ " GP Pins %2.2x %s-duplex.\n",
++ dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
++ tp->full_duplex ? "full" : "half");
+
++ /* Set the timer to switch to check for link beat and perhaps switch
++ to an alternate media type. */
++ init_timer(&tp->timer);
++ tp->timer.expires = jiffies + 3*HZ;
++ tp->timer.data = (unsigned long)dev;
++ tp->timer.function = &rtl8129_timer;
++ add_timer(&tp->timer);
++
++ return 0;
++}
++
++/* Start the hardware at open or resume. */
++static void rtl_hw_start(struct net_device *dev)
++{
++ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ /* Soft reset the chip. */
++ outb(CmdReset, ioaddr + ChipCmd);
+ /* Check that the chip has finished the reset. */
+ for (i = 1000; i > 0; i--)
+ if ((inb(ioaddr + ChipCmd) & CmdReset) == 0)
+ break;
++ /* Restore our idea of the MAC address. */
++ outb(0xC0, ioaddr + Cfg9346);
++ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 0)), ioaddr + MAC0 + 0);
++ outl(cpu_to_le32(*(u32*)(dev->dev_addr + 4)), ioaddr + MAC0 + 4);
+
+- for (i = 0; i < 6; i++)
+- outb(dev->dev_addr[i], ioaddr + MAC0 + i);
++ /* Hmmm, do these belong here? */
++ tp->cur_rx = 0;
+
+ /* Must enable Tx/Rx before setting transfer thresholds! */
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+- outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) | (RX_DMA_BURST<<8),
+- ioaddr + RxConfig);
+- outl((TX_DMA_BURST<<8)|0x03000000, ioaddr + TxConfig);
+-
+- full_duplex = tp->full_duplex;
+- if (tp->phys[0] >= 0 || tp->chip_id == 0x8139) {
+- u16 mii_reg5;
+- if (tp->chip_id == 0x8139)
+- mii_reg5 = inw(ioaddr + NWayLPAR);
+- else
+- mii_reg5 = mdio_read(ioaddr, tp->phys[0], 5);
++ outl(tp->rx_config, ioaddr + RxConfig);
++ /* Check this value: the documentation contradicts ifself. Is the
++ IFG correct with bit 28:27 zero, or with |0x03000000 ? */
++ outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
++
++ /* This is check_duplex() */
++ if (tp->phys[0] >= 0 || (tp->drv_flags & HAS_MII_XCVR)) {
++ u16 mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+ if (mii_reg5 == 0xffff)
+ ; /* Not there */
+ else if ((mii_reg5 & 0x0100) == 0x0100
+ || (mii_reg5 & 0x00C0) == 0x0040)
+- full_duplex = 1;
+- if (rtl8129_debug > 1)
++ tp->full_duplex = 1;
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Setting %s%s-duplex based on"
+ " auto-negotiated partner ability %4.4x.\n", dev->name,
+ mii_reg5 == 0 ? "" :
+ (mii_reg5 & 0x0180) ? "100mbps " : "10mbps ",
+- full_duplex ? "full" : "half", mii_reg5);
++ tp->full_duplex ? "full" : "half", mii_reg5);
+ }
+
+- outb(0xC0, ioaddr + Cfg9346);
+- outb(full_duplex ? 0x60 : 0x20, ioaddr + Config1);
++ if (tp->drv_flags & HAS_MII_XCVR) /* rtl8129 chip */
++ outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + RxBuf);
+-
+ /* Start the chip's Tx and Rx process. */
+ outl(0, ioaddr + RxMissed);
+ set_rx_mode(dev);
+-
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+-
+- dev->tbusy = 0;
+- dev->interrupt = 0;
+- dev->start = 1;
+-
+ /* Enable all known interrupts by setting the interrupt mask. */
+ outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+- | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+-
+- if (rtl8129_debug > 1)
+- printk(KERN_DEBUG"%s: rtl8129_open() ioaddr %4.4x IRQ %d"
+- " GP Pins %2.2x %s-duplex.\n",
+- dev->name, ioaddr, dev->irq, inb(ioaddr + GPPinData),
+- full_duplex ? "full" : "half");
+-
+- /* Set the timer to switch to check for link beat and perhaps switch
+- to an alternate media type. */
+- init_timer(&tp->timer);
+- tp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+- tp->timer.data = (unsigned long)dev;
+- tp->timer.function = &rtl8129_timer; /* timer handler */
+- add_timer(&tp->timer);
++ | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
+
+- return 0;
+ }
+
+ static void rtl8129_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
+- struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
+- int next_tick = 0;
+-
+- if (tp->chip_id == 0x8139) {
+- u16 mii_reg5 = inw(ioaddr + NWayLPAR);
+- if ((mii_reg5 & 0x0100) == 0x0100
+- || (mii_reg5 & 0x00C0) == 0x0040)
+- if ( ! tp->full_duplex) {
+- tp->full_duplex = 1;
+- if (rtl8129_debug > 0)
+- printk(KERN_INFO "%s: Switching to full-duplex based on "
+- "link partner ability of %4.4x.\n",
+- dev->name, mii_reg5);
++ struct net_device *dev = (struct net_device *)data;
++ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 60*HZ;
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++
++ if (! np->duplex_lock && mii_reg5 != 0xffff) {
++ int duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ printk(KERN_INFO "%s: Using %s-duplex based on MII #%d link"
++ " partner ability of %4.4x.\n", dev->name,
++ np->full_duplex ? "full" : "half", np->phys[0], mii_reg5);
++ if (np->drv_flags & HAS_MII_XCVR) {
+ outb(0xC0, ioaddr + Cfg9346);
+- outb(tp->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
++ outb(np->full_duplex ? 0x60 : 0x20, ioaddr + Config1);
+ outb(0x00, ioaddr + Cfg9346);
+ }
++ }
+ }
+- if (rtl8129_debug > 2) {
+- if (tp->chip_id == 0x8129)
++#if LINUX_VERSION_CODE < 0x20300
++ /* Check for bogusness. */
++ if (inw(ioaddr + IntrStatus) & (TxOK | RxOK)) {
++ int status = inw(ioaddr + IntrStatus); /* Double check */
++ if (status & (TxOK | RxOK) && ! dev->interrupt) {
++ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
++ dev->name, status);
++ rtl8129_interrupt(dev->irq, dev, 0);
++ }
++ }
++ if (dev->tbusy && jiffies - dev->trans_start >= 2*TX_TIMEOUT)
++ rtl8129_tx_timeout(dev);
++#else
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ rtl8129_tx_timeout(dev);
++ }
++#endif
++
++#if defined(RTL_TUNE_TWISTER)
++ /* This is a complicated state machine to configure the "twister" for
++ impedance/echos based on the cable length.
++ All of this is magic and undocumented.
++ */
++ if (np->twistie) switch(np->twistie) {
++ case 1: {
++ if (inw(ioaddr + CSCR) & CSCR_LinkOKBit) {
++ /* We have link beat, let us tune the twister. */
++ outw(CSCR_LinkDownOffCmd, ioaddr + CSCR);
++ np->twistie = 2; /* Change to state 2. */
++ next_tick = HZ/10;
++ } else {
++ /* Just put in some reasonable defaults for when beat returns. */
++ outw(CSCR_LinkDownCmd, ioaddr + CSCR);
++ outl(0x20,ioaddr + FIFOTMS); /* Turn on cable test mode. */
++ outl(PARA78_default ,ioaddr + PARA78);
++ outl(PARA7c_default ,ioaddr + PARA7c);
++ np->twistie = 0; /* Bail from future actions. */
++ }
++ } break;
++ case 2: {
++ /* Read how long it took to hear the echo. */
++ int linkcase = inw(ioaddr + CSCR) & CSCR_LinkStatusBits;
++ if (linkcase == 0x7000) np->twist_row = 3;
++ else if (linkcase == 0x3000) np->twist_row = 2;
++ else if (linkcase == 0x1000) np->twist_row = 1;
++ else np->twist_row = 0;
++ np->twist_col = 0;
++ np->twistie = 3; /* Change to state 2. */
++ next_tick = HZ/10;
++ } break;
++ case 3: {
++ /* Put out four tuning parameters, one per 100msec. */
++ if (np->twist_col == 0) outw(0, ioaddr + FIFOTMS);
++ outl(param[(int)np->twist_row][(int)np->twist_col], ioaddr + PARA7c);
++ next_tick = HZ/10;
++ if (++np->twist_col >= 4) {
++ /* For short cables we are done.
++ For long cables (row == 3) check for mistune. */
++ np->twistie = (np->twist_row == 3) ? 4 : 0;
++ }
++ } break;
++ case 4: {
++ /* Special case for long cables: check for mistune. */
++ if ((inw(ioaddr + CSCR) & CSCR_LinkStatusBits) == 0x7000) {
++ np->twistie = 0;
++ break;
++ } else {
++ outl(0xfb38de03, ioaddr + PARA7c);
++ np->twistie = 5;
++ next_tick = HZ/10;
++ }
++ } break;
++ case 5: {
++ /* Retune for shorter cable (column 2). */
++ outl(0x20,ioaddr + FIFOTMS);
++ outl(PARA78_default, ioaddr + PARA78);
++ outl(PARA7c_default, ioaddr + PARA7c);
++ outl(0x00,ioaddr + FIFOTMS);
++ np->twist_row = 2;
++ np->twist_col = 0;
++ np->twistie = 3;
++ next_tick = HZ/10;
++ } break;
++ }
++#endif
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ if (np->drv_flags & HAS_MII_XCVR)
+ printk(KERN_DEBUG"%s: Media selection tick, GP pins %2.2x.\n",
+ dev->name, inb(ioaddr + GPPinData));
+ else
+ printk(KERN_DEBUG"%s: Media selection tick, Link partner %4.4x.\n",
+ dev->name, inw(ioaddr + NWayLPAR));
+- printk(KERN_DEBUG"%s: Other registers are IntMask %4.4x IntStatus %4.4x"
+- " RxStatus %4.4x.\n",
++ printk(KERN_DEBUG"%s: Other registers are IntMask %4.4x "
++ "IntStatus %4.4x RxStatus %4.4x.\n",
+ dev->name, inw(ioaddr + IntrMask), inw(ioaddr + IntrStatus),
+- inl(ioaddr + RxEarlyStatus));
++ (int)inl(ioaddr + RxEarlyStatus));
+ printk(KERN_DEBUG"%s: Chip config %2.2x %2.2x.\n",
+ dev->name, inb(ioaddr + Config0), inb(ioaddr + Config1));
+ }
+
+- if (next_tick) {
+- tp->timer.expires = RUN_AT(next_tick);
+- add_timer(&tp->timer);
+- }
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
+ }
+
+-static void rtl8129_tx_timeout(struct device *dev)
++static void rtl8129_tx_timeout(struct net_device *dev)
+ {
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
+- int i;
++ long ioaddr = dev->base_addr;
++ int status = inw(ioaddr + IntrStatus);
++ int mii_reg, i;
++
++ /* Could be wrapped with if (tp->msg_level & NETIF_MSG_TX_ERR) */
++ printk(KERN_ERR "%s: Transmit timeout, status %2.2x %4.4x "
++ "media %2.2x.\n",
++ dev->name, inb(ioaddr + ChipCmd), status, inb(ioaddr + GPPinData));
++
++ if (status & (TxOK | RxOK)) {
++ printk(KERN_ERR "%s: RTL8139 Interrupt line blocked, status %x.\n",
++ dev->name, status);
++ }
+
+ /* Disable interrupts by clearing the interrupt mask. */
+ outw(0x0000, ioaddr + IntrMask);
+-
+- if (rtl8129_debug > 0)
+- printk(KERN_WARNING "%s: Transmit timeout, status %2.2x %4.4x.\n",
+- dev->name, inb(ioaddr + ChipCmd), inw(ioaddr + IntrStatus));
+ /* Emit info to figure out what went wrong. */
++ printk(KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d%s.\n",
++ dev->name, tp->cur_tx, tp->dirty_tx, tp->tx_full ? ", full" : "");
+ for (i = 0; i < NUM_TX_DESC; i++)
+- printk(KERN_DEBUG"%s: Tx descriptor %d is %8.8x.%s\n",
+- dev->name, i, inl(ioaddr + TxStat0 + i*4),
++ printk(KERN_DEBUG "%s: Tx descriptor %d is %8.8x.%s\n",
++ dev->name, i, (int)inl(ioaddr + TxStatus0 + i*4),
+ i == tp->dirty_tx % NUM_TX_DESC ? " (queue head)" : "");
+- if (tp->chip_id == 0x8129) {
+- int mii_reg;
+- printk(KERN_DEBUG"%s: MII #%d registers are:", dev->name, tp->phys[0]);
+- for (mii_reg = 0; mii_reg < 8; mii_reg++)
+- printk(" %4.4x", mdio_read(ioaddr, tp->phys[0], mii_reg));
+- printk(".\n");
+- } else {
+- printk(KERN_DEBUG"%s: MII status register is %4.4x.\n",
+- dev->name, inw(ioaddr + BMSR));
+- }
+-
+- /* Soft reset the chip. */
+- outb(CmdReset, ioaddr + ChipCmd);
+- for (i = 0; i < 6; i++)
+- outb(dev->dev_addr[i], ioaddr + MAC0 + i);
+-
+- { /* Save the unsent Tx packets. */
+- struct sk_buff *saved_skb[NUM_TX_DESC], *skb;
+- int j = 0;
+- for (; tp->cur_tx - tp->dirty_tx > 0 ; tp->dirty_tx++)
+- saved_skb[j++] = tp->tx_skbuff[tp->dirty_tx % NUM_TX_DESC];
+- tp->dirty_tx = tp->cur_tx = 0;
+-
+- for (i = 0; i < j; i++) {
+- skb = tp->tx_skbuff[i] = saved_skb[i];
+- if ((long)skb->data & 3) { /* Must use alignment buffer. */
+- memcpy(tp->tx_buf[i], skb->data, skb->len);
+- outl(virt_to_bus(tp->tx_buf[i]), ioaddr + TxAddr0 + i*4);
+- } else
+- outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + i*4);
+- /* Note: the chip doesn't have auto-pad! */
+- outl(((TX_FIFO_THRESH<<11) & 0x003f0000) |
+- (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+- ioaddr + TxStat0 + i*4);
+- }
+- tp->cur_tx = i;
+- while (i < NUM_TX_DESC)
++ printk(KERN_DEBUG "%s: MII #%d registers are:", dev->name, tp->phys[0]);
++ for (mii_reg = 0; mii_reg < 8; mii_reg++)
++ printk(" %4.4x", mdio_read(dev, tp->phys[0], mii_reg));
++ printk(".\n");
++
++ /* Stop a shared interrupt from scavenging while we are. */
++ tp->dirty_tx = tp->cur_tx = 0;
++ /* Dump the unsent Tx packets. */
++ for (i = 0; i < NUM_TX_DESC; i++) {
++ if (tp->tx_skbuff[i]) {
++ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+- if (tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) {/* Typical path */
+- dev->tbusy = 0;
+- } else {
+- tp->tx_full = 1;
++ tp->stats.tx_dropped++;
+ }
+ }
+-
+- /* Must enable Tx/Rx before setting transfer thresholds! */
+- set_rx_mode(dev);
+- outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+- outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) | (RX_DMA_BURST<<8),
+- ioaddr + RxConfig);
+- outl((TX_DMA_BURST<<8), ioaddr + TxConfig);
+-
+- dev->trans_start = jiffies;
+- tp->stats.tx_errors++;
+- /* Enable all known interrupts by setting the interrupt mask. */
+- outw(PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver
+- | TxErr | TxOK | RxErr | RxOK, ioaddr + IntrMask);
++ rtl_hw_start(dev);
++ netif_unpause_tx_queue(dev);
++ tp->tx_full = 0;
+ return;
+ }
+
+
+ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+ static void
+-rtl8129_init_ring(struct device *dev)
++rtl8129_init_ring(struct net_device *dev)
+ {
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+ tp->tx_full = 0;
+- tp->cur_rx = tp->cur_tx = 0;
+- tp->dirty_rx = tp->dirty_tx = 0;
++ tp->dirty_tx = tp->cur_tx = 0;
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ tp->tx_skbuff[i] = 0;
+@@ -834,18 +1010,16 @@
+ }
+
+ static int
+-rtl8129_start_xmit(struct sk_buff *skb, struct device *dev)
++rtl8129_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+ int entry;
+
+- /* Block a timer-based transmit from overlapping. This could better be
+- done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start < TX_TIMEOUT)
+- return 1;
+- rtl8129_tx_timeout(dev);
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ rtl8129_tx_timeout(dev);
+ return 1;
+ }
+
+@@ -859,20 +1033,26 @@
+ } else
+ outl(virt_to_bus(skb->data), ioaddr + TxAddr0 + entry*4);
+ /* Note: the chip doesn't have auto-pad! */
+- outl(((TX_FIFO_THRESH<<11) & 0x003f0000) |
+- (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
+- ioaddr + TxStat0 + entry*4);
++ outl(tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN),
++ ioaddr + TxStatus0 + entry*4);
+
+- if (++tp->cur_tx - tp->dirty_tx < NUM_TX_DESC) {/* Typical path */
+- dev->tbusy = 0;
+- } else {
+- tp->tx_full = 1;
+- }
++ /* There is a race condition here -- we might read dirty_tx, take an
++ interrupt that clears the Tx queue, and only then set tx_full.
++ So we do this in two phases. */
++ if (++tp->cur_tx - tp->dirty_tx >= NUM_TX_DESC) {
++ set_bit(0, &tp->tx_full);
++ if (tp->cur_tx - (volatile unsigned int)tp->dirty_tx < NUM_TX_DESC) {
++ clear_bit(0, &tp->tx_full);
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev);
+
+ dev->trans_start = jiffies;
+- if (rtl8129_debug > 4)
+- printk(KERN_DEBUG"%s: Queued Tx packet at %p size %ld to slot %d.\n",
+- dev->name, skb->data, skb->len, entry);
++ if (tp->msg_level & NETIF_MSG_TX_QUEUED)
++ printk(KERN_DEBUG"%s: Queued Tx packet at %p size %d to slot %d.\n",
++ dev->name, skb->data, (int)skb->len, entry);
+
+ return 0;
+ }
+@@ -881,31 +1061,32 @@
+ after the Tx thread. */
+ static void rtl8129_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+ {
+- struct device *dev = (struct device *)dev_instance;
+- struct rtl8129_private *tp;
+- int ioaddr, boguscnt = max_interrupt_work;
+- int status;
+-
+- if (dev == NULL) {
+- printk (KERN_ERR"rtl8139_interrupt(): IRQ %d for unknown device.\n",
+- irq);
+- return;
+- }
+-
+- ioaddr = dev->base_addr;
+- tp = (struct rtl8129_private *)dev->priv;
+- if (test_and_set_bit(0, (void*)&tp->in_interrupt)) {
+- printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
++ struct rtl8129_private *tp = np;
++ int boguscnt = np->max_interrupt_work;
++ long ioaddr = dev->base_addr;
++ int link_changed = 0; /* Grrr, avoid bogus "uninitialized" warning */
++
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
++ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
++ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
++ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
++ dev->name);
++ dev->interrupt = 0; /* Avoid halting machine. */
+ return;
+ }
+- dev->interrupt = 1;
++#endif
+
+ do {
+- status = inw(ioaddr + IntrStatus);
+- /* Acknowledge all of the current interrupt sources ASAP. */
++ int status = inw(ioaddr + IntrStatus);
++ /* Acknowledge all of the current interrupt sources ASAP, but
++ an first get an additional status bit from CSCR. */
++ if (status & RxUnderrun)
++ link_changed = inw(ioaddr+CSCR) & CSCR_LinkChangeBit;
+ outw(status, ioaddr + IntrStatus);
+
+- if (rtl8129_debug > 4)
++ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
+ dev->name, status, inw(ioaddr + IntrStatus));
+
+@@ -917,27 +1098,25 @@
+ rtl8129_rx(dev);
+
+ if (status & (TxOK | TxErr)) {
+- unsigned int dirty_tx;
++ unsigned int dirty_tx = tp->dirty_tx;
+
+- for (dirty_tx = tp->dirty_tx; dirty_tx < tp->cur_tx; dirty_tx++) {
++ while (tp->cur_tx - dirty_tx > 0) {
+ int entry = dirty_tx % NUM_TX_DESC;
+- int txstatus = inl(ioaddr + TxStat0 + entry*4);
++ int txstatus = inl(ioaddr + TxStatus0 + entry*4);
+
+- if ( ! (txstatus & TxHostOwns))
++ if ( ! (txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+ break; /* It still hasn't been Txed */
+
+ /* Note: TxCarrierLost is always asserted at 100mbps. */
+ if (txstatus & (TxOutOfWindow | TxAborted)) {
+ /* There was an major error, log it. */
+-#ifndef final_version
+- if (rtl8129_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_NOTICE"%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, txstatus);
+-#endif
+ tp->stats.tx_errors++;
+ if (txstatus&TxAborted) {
+ tp->stats.tx_aborted_errors++;
+- outl((TX_DMA_BURST<<8)|0x03000001, ioaddr + TxConfig);
++ outl(TX_DMA_BURST << 8, ioaddr + TxConfig);
+ }
+ if (txstatus&TxCarrierLost) tp->stats.tx_carrier_errors++;
+ if (txstatus&TxOutOfWindow) tp->stats.tx_window_errors++;
+@@ -946,11 +1125,13 @@
+ tp->stats.collisions16++;
+ #endif
+ } else {
+-#ifdef ETHER_STATS
+- /* No count for tp->stats.tx_deferred */
+-#endif
++ if (tp->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status"
++ " %8.8x.\n", dev->name, txstatus);
+ if (txstatus & TxUnderrun) {
+- /* Todo: increase the Tx FIFO threshold. */
++ /* Add 64 to the Tx FIFO threshold. */
++ if (tp->tx_flag < 0x00300000)
++ tp->tx_flag += 0x00020000;
+ tp->stats.tx_fifo_errors++;
+ }
+ tp->stats.collisions += (txstatus >> 24) & 15;
+@@ -961,48 +1142,34 @@
+ }
+
+ /* Free the original skb. */
+- dev_kfree_skb(tp->tx_skbuff[entry], FREE_WRITE);
++ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
++ if (test_bit(0, &tp->tx_full)) {
++ /* The ring is no longer full, clear tbusy. */
++ clear_bit(0, &tp->tx_full);
++ netif_resume_tx_queue(dev);
++ }
++ dirty_tx++;
+ }
+
+ #ifndef final_version
+ if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
+ printk(KERN_ERR"%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+- dev->name, dirty_tx, tp->cur_tx, tp->tx_full);
++ dev->name, dirty_tx, tp->cur_tx, (int)tp->tx_full);
+ dirty_tx += NUM_TX_DESC;
+ }
+ #endif
+-
+- if (tp->tx_full && dev->tbusy
+- && dirty_tx > tp->cur_tx - NUM_TX_DESC) {
+- /* The ring is no longer full, clear tbusy. */
+- tp->tx_full = 0;
+- dev->tbusy = 0;
+- mark_bh(NET_BH);
+- }
+-
+ tp->dirty_tx = dirty_tx;
+ }
+
+ /* Check uncommon events with one test. */
+ if (status & (PCIErr|PCSTimeout |RxUnderrun|RxOverflow|RxFIFOOver
+ |TxErr|RxErr)) {
+- /* Update the error count. */
+- tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+- outl(0, ioaddr + RxMissed);
+-
+- if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+- tp->stats.rx_errors++;
+-
+- if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
+- if (status & (RxUnderrun|RxFIFOOver)) tp->stats.rx_fifo_errors++;
+- if (status & RxOverflow) {
+- tp->stats.rx_over_errors++;
+- tp->cur_rx = inw(ioaddr + RxBufAddr) % RX_BUF_LEN;
+- outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
+- }
+- /* Error sources cleared above. */
++ if (status == 0xffff) /* Missing chip! */
++ break;
++ rtl_error(dev, status, link_changed);
+ }
++
+ if (--boguscnt < 0) {
+ printk(KERN_WARNING"%s: Too much work at interrupt, "
+ "IntrStatus=0x%4.4x.\n",
+@@ -1013,55 +1180,65 @@
+ }
+ } while (1);
+
+- if (rtl8129_debug > 3)
++ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG"%s: exiting interrupt, intr_status=%#4.4x.\n",
+- dev->name, inl(ioaddr + IntrStatus));
++ dev->name, inw(ioaddr + IntrStatus));
+
+- dev->interrupt = 0;
+- clear_bit(0, (void*)&tp->in_interrupt);
++#if defined(__i386__) && LINUX_VERSION_CODE < 0x20123
++ clear_bit(0, (void*)&dev->interrupt);
++#endif
+ return;
+ }
+
+ /* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
+ field alignments and semantics. */
+-static int
+-rtl8129_rx(struct device *dev)
++static int rtl8129_rx(struct net_device *dev)
+ {
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+ unsigned char *rx_ring = tp->rx_ring;
+ u16 cur_rx = tp->cur_rx;
+
+- if (rtl8129_debug > 4)
++ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: In rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+ inw(ioaddr + RxBufPtr), inb(ioaddr + ChipCmd));
+
+- while ((inb(ioaddr + ChipCmd) & 1) == 0) {
+- int ring_offset = cur_rx % RX_BUF_LEN;
+- u32 rx_status = *(u32*)(rx_ring + ring_offset);
+- int rx_size = rx_status >> 16;
++ while ((inb(ioaddr + ChipCmd) & RxBufEmpty) == 0) {
++ int ring_offset = cur_rx % tp->rx_buf_len;
++ u32 rx_status = le32_to_cpu(*(u32*)(rx_ring + ring_offset));
++ int rx_size = rx_status >> 16; /* Includes the CRC. */
+
+- if (rtl8129_debug > 4) {
++ if (tp->msg_level & NETIF_MSG_RX_STATUS) {
+ int i;
+- printk(KERN_DEBUG"%s: rtl8129_rx() status %4.4x, size %4.4x, cur %4.4x.\n",
++ printk(KERN_DEBUG"%s: rtl8129_rx() status %4.4x, size %4.4x,"
++ " cur %4.4x.\n",
+ dev->name, rx_status, rx_size, cur_rx);
+ printk(KERN_DEBUG"%s: Frame contents ", dev->name);
+ for (i = 0; i < 70; i++)
+ printk(" %2.2x", rx_ring[ring_offset + i]);
+ printk(".\n");
+ }
+- if (rx_status & RxTooLong) {
+- if (rtl8129_debug > 0)
+- printk(KERN_NOTICE"%s: Oversized Ethernet frame, status %4.4x!\n",
+- dev->name, rx_status);
+- tp->stats.rx_length_errors++;
+- } else if (rx_status &
+- (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
+- if (rtl8129_debug > 1)
++ if (rx_status & (RxBadSymbol|RxRunt|RxTooLong|RxCRCErr|RxBadAlign)) {
++ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG"%s: Ethernet frame had errors,"
+- " status %4.4x.\n", dev->name, rx_status);
++ " status %8.8x.\n", dev->name, rx_status);
++ if (rx_status == 0xffffffff) {
++ printk(KERN_NOTICE"%s: Invalid receive status at ring "
++ "offset %4.4x\n", dev->name, ring_offset);
++ rx_status = 0;
++ }
++ if (rx_status & RxTooLong) {
++ if (tp->msg_level & NETIF_MSG_DRV)
++ printk(KERN_NOTICE"%s: Oversized Ethernet frame, status"
++ " %4.4x!\n",
++ dev->name, rx_status);
++ /* A.C.: The chip hangs here.
++ This should never occur, which means that we are screwed
++ when it does.
++ */
++ }
+ tp->stats.rx_errors++;
+ if (rx_status & (RxBadSymbol|RxBadAlign))
+ tp->stats.rx_frame_errors++;
+@@ -1070,15 +1247,18 @@
+ /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ tp->cur_rx = 0;
+ outb(CmdTxEnb, ioaddr + ChipCmd);
++ /* A.C.: Reset the multicast list. */
++ set_rx_mode(dev);
+ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
+- outl((RX_FIFO_THRESH << 13) | (RX_BUF_LEN_IDX << 11) |
+- (RX_DMA_BURST<<8), ioaddr + RxConfig);
+ } else {
+ /* Malloc up new buffer, compatible with net-2e. */
+ /* Omit the four octet CRC from the length. */
+ struct sk_buff *skb;
++ int pkt_size = rx_size - 4;
+
+- skb = dev_alloc_skb(rx_size + 2);
++ /* Allocate a common-sized skbuff if we are close. */
++ skb = dev_alloc_skb(1400 < pkt_size && pkt_size < PKT_BUF_SZ-2 ?
++ PKT_BUF_SZ : pkt_size + 2);
+ if (skb == NULL) {
+ printk(KERN_WARNING"%s: Memory squeeze, deferring packet.\n",
+ dev->name);
+@@ -1089,13 +1269,14 @@
+ }
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
+- if (ring_offset+rx_size+4 > RX_BUF_LEN) {
+- int semi_count = RX_BUF_LEN - ring_offset - 4;
++ if (ring_offset + rx_size > tp->rx_buf_len) {
++ int semi_count = tp->rx_buf_len - ring_offset - 4;
++ /* This could presumably use two calls to copy_and_sum()? */
+ memcpy(skb_put(skb, semi_count), &rx_ring[ring_offset + 4],
+ semi_count);
+- memcpy(skb_put(skb, rx_size-semi_count), rx_ring,
+- rx_size-semi_count);
+- if (rtl8129_debug > 4) {
++ memcpy(skb_put(skb, pkt_size-semi_count), rx_ring,
++ pkt_size-semi_count);
++ if (tp->msg_level & NETIF_MSG_PKTDATA) {
+ int i;
+ printk(KERN_DEBUG"%s: Frame wrap @%d",
+ dev->name, semi_count);
+@@ -1104,22 +1285,23 @@
+ printk(".\n");
+ memset(rx_ring, 0xcc, 16);
+ }
+- } else
+- memcpy(skb_put(skb, rx_size), &rx_ring[ring_offset + 4],
+- rx_size);
++ } else {
++ eth_copy_and_sum(skb, &rx_ring[ring_offset + 4],
++ pkt_size, 0);
++ skb_put(skb, pkt_size);
++ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ #if LINUX_VERSION_CODE > 0x20119
+- tp->stats.rx_bytes += rx_size;
++ tp->stats.rx_bytes += pkt_size;
+ #endif
+ tp->stats.rx_packets++;
+ }
+
+- cur_rx += rx_size + 4;
+- cur_rx = (cur_rx + 3) & ~3;
++ cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+ outw(cur_rx - 16, ioaddr + RxBufPtr);
+ }
+- if (rtl8129_debug > 4)
++ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG"%s: Done rtl8129_rx(), current %4.4x BufAddr %4.4x,"
+ " free to %4.4x, Cmd %2.2x.\n",
+ dev->name, cur_rx, inw(ioaddr + RxBufAddr),
+@@ -1128,17 +1310,77 @@
+ return 0;
+ }
+
++/* Error and abnormal or uncommon events handlers. */
++static void rtl_error(struct net_device *dev, int status, int link_changed)
++{
++ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE"%s: Abnormal interrupt, status %8.8x.\n",
++ dev->name, status);
++
++ /* Update the error count. */
++ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
++ outl(0, ioaddr + RxMissed);
++
++ if (status & RxUnderrun){
++ /* This might actually be a link change event. */
++ if ((tp->drv_flags & HAS_LNK_CHNG) && link_changed) {
++ /* Really link-change on new chips. */
++ int lpar = inw(ioaddr + NWayLPAR);
++ int duplex = (lpar&0x0100) || (lpar & 0x01C0) == 0x0040
++ || tp->duplex_lock;
++ /* Do not use MII_BMSR as that clears sticky bit. */
++ if (inw(ioaddr + GPPinData) & 0x0004) {
++ netif_link_down(dev);
++ } else
++ netif_link_up(dev);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Link changed, link partner "
++ "%4.4x new duplex %d.\n",
++ dev->name, lpar, duplex);
++ tp->full_duplex = duplex;
++ /* Only count as errors with no link change. */
++ status &= ~RxUnderrun;
++ } else {
++ /* If this does not work, we will do rtl_hw_start(dev); */
++ outb(CmdTxEnb, ioaddr + ChipCmd);
++ set_rx_mode(dev); /* Reset the multicast list. */
++ outb(CmdRxEnb | CmdTxEnb, ioaddr + ChipCmd);
++
++ tp->stats.rx_errors++;
++ tp->stats.rx_fifo_errors++;
++ }
++ }
++
++ if (status & (RxOverflow | RxErr | RxFIFOOver)) tp->stats.rx_errors++;
++ if (status & (PCSTimeout)) tp->stats.rx_length_errors++;
++ if (status & RxFIFOOver) tp->stats.rx_fifo_errors++;
++ if (status & RxOverflow) {
++ tp->stats.rx_over_errors++;
++ tp->cur_rx = inw(ioaddr + RxBufAddr) % tp->rx_buf_len;
++ outw(tp->cur_rx - 16, ioaddr + RxBufPtr);
++ }
++ if (status & PCIErr) {
++ u32 pci_cmd_status;
++ pci_read_config_dword(tp->pci_dev, PCI_COMMAND, &pci_cmd_status);
++
++ printk(KERN_ERR "%s: PCI Bus error %4.4x.\n",
++ dev->name, pci_cmd_status);
++ }
++}
++
+ static int
+-rtl8129_close(struct device *dev)
++rtl8129_close(struct net_device *dev)
+ {
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- dev->tbusy = 1;
++ netif_stop_tx_queue(dev);
+
+- if (rtl8129_debug > 1)
++ if (tp->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG"%s: Shutting down ethercard, status was 0x%4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+@@ -1158,15 +1400,15 @@
+
+ for (i = 0; i < NUM_TX_DESC; i++) {
+ if (tp->tx_skbuff[i])
+- dev_kfree_skb(tp->tx_skbuff[i], FREE_WRITE);
++ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
+ kfree(tp->rx_ring);
+- kfree(tp->tx_bufs);
++ tp->rx_ring = 0;
+
+ /* Green! Put the chip in low-power mode. */
+ outb(0xC0, ioaddr + Cfg9346);
+- outb(0x03, ioaddr + Config1);
++ outb(tp->config1 | 0x03, ioaddr + Config1);
+ outb('H', ioaddr + HltClk); /* 'R' would leave the clock running. */
+
+ MOD_DEC_USE_COUNT;
+@@ -1174,13 +1416,69 @@
+ return 0;
+ }
+
+-static struct enet_statistics *
+-rtl8129_get_stats(struct device *dev)
++/*
++ Handle user-level ioctl() calls.
++ We must use two numeric constants as the key because some clueless person
++ changed value for the symbolic name.
++*/
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x3f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0], data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ }
++ mdio_write(dev, data[0], data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = 0; /* No rx_copybreak, always copy. */
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static struct net_device_stats *
++rtl8129_get_stats(struct net_device *dev)
+ {
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+
+- if (dev->start) {
++ if (netif_running(dev)) {
+ tp->stats.rx_missed_errors += inl(ioaddr + RxMissed);
+ outl(0, ioaddr + RxMissed);
+ }
+@@ -1189,100 +1487,236 @@
+ }
+
+ /* Set or clear the multicast filter for this adaptor.
+- Note that we only use exclusion around actually queueing the
+- new frame, not around filling tp->setup_frame. This is non-deterministic
+- when re-entered but still correct. */
+-
+-/* The little-endian AUTODIN II ethernet CRC calculation.
+- N.B. Do not use for bulk data, use a table-based routine instead.
+- This is common code and should be moved to net/core/crc.c */
+-static unsigned const ethernet_polynomial_le = 0xedb88320U;
+-static inline unsigned ether_crc_le(int length, unsigned char *data)
++ This routine is not state sensitive and need not be SMP locked. */
++
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static inline u32 ether_crc(int length, unsigned char *data)
+ {
+- unsigned int crc = 0xffffffff; /* Initial value. */
+- while(--length >= 0) {
++ int crc = -1;
++
++ while (--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+- for (bit = 8; --bit >= 0; current_octet >>= 1) {
+- if ((crc ^ current_octet) & 1) {
+- crc >>= 1;
+- crc ^= ethernet_polynomial_le;
+- } else
+- crc >>= 1;
+- }
++ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
++ crc = (crc << 1) ^
++ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+ return crc;
+ }
+
+-static void set_rx_mode(struct device *dev)
++/* Bits in RxConfig. */
++enum rx_mode_bits {
++ AcceptErr=0x20, AcceptRunt=0x10, AcceptBroadcast=0x08,
++ AcceptMulticast=0x04, AcceptMyPhys=0x02, AcceptAllPhys=0x01,
++};
++
++static void set_rx_mode(struct net_device *dev)
+ {
+- int ioaddr = dev->base_addr;
+ struct rtl8129_private *tp = (struct rtl8129_private *)dev->priv;
+- unsigned char mc_filter[8]; /* Multicast hash filter */
+- int i;
++ long ioaddr = dev->base_addr;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ int i, rx_mode;
+
+- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ if (tp->msg_level & NETIF_MSG_RXFILTER)
++ printk(KERN_DEBUG"%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
++ dev->name, dev->flags, (int)inl(ioaddr + RxConfig));
++
++ /* Note: do not reorder, GCC is clever about common statements. */
++ if (dev->flags & IFF_PROMISC) {
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE"%s: Promiscuous mode enabled.\n", dev->name);
+- memset(mc_filter, 0xff, sizeof(mc_filter));
+- outb(0x0F, ioaddr + RxConfig);
+- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
++ rx_mode = AcceptBroadcast|AcceptMulticast|AcceptMyPhys|AcceptAllPhys;
++ mc_filter[1] = mc_filter[0] = 0xffffffff;
++ } else if ((dev->mc_count > tp->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter perfectly -- accept all multicasts. */
+- memset(mc_filter, 0xff, sizeof(mc_filter));
+- outb(0x0E, ioaddr + RxConfig);
+- } else if (dev->mc_count == 0) {
+- outb(0x0A, ioaddr + RxConfig);
+- return;
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ mc_filter[1] = mc_filter[0] = 0xffffffff;
+ } else {
+ struct dev_mc_list *mclist;
+-
+- memset(mc_filter, 0, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ mc_filter[1] = mc_filter[0] = 0;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+- set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
+- mc_filter);
+- }
+- /* ToDo: perhaps we need to stop the Tx and Rx process here? */
+- if (memcmp(mc_filter, tp->mc_filter, sizeof(mc_filter))) {
+- for (i = 0; i < 2; i++)
+- outl(((u32 *)mc_filter)[i], ioaddr + MAR0 + i*4);
+- memcpy(tp->mc_filter, mc_filter, sizeof(mc_filter));
++ set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26, mc_filter);
+ }
+- if (rtl8129_debug > 3)
+- printk(KERN_DEBUG"%s: set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
+- dev->name, dev->flags, inl(ioaddr + RxConfig));
++ /* We can safely update without stopping the chip. */
++ outl(tp->rx_config | rx_mode, ioaddr + RxConfig);
++ tp->mc_filter[0] = mc_filter[0];
++ tp->mc_filter[1] = mc_filter[1];
++ outl(mc_filter[0], ioaddr + MAR0 + 0);
++ outl(mc_filter[1], ioaddr + MAR0 + 4);
+ return;
+ }
+
+-#ifdef MODULE
+
+-/* An additional parameter that may be passed in... */
+-static int debug = -1;
++static int rtl_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct rtl8129_private *np = (struct rtl8129_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk("%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ netif_device_detach(dev);
++ /* Disable interrupts, stop Tx and Rx. */
++ outw(0x0000, ioaddr + IntrMask);
++ outb(0x00, ioaddr + ChipCmd);
++ /* Update the error counts. */
++ np->stats.rx_missed_errors += inl(ioaddr + RxMissed);
++ outl(0, ioaddr + RxMissed);
++ break;
++ case DRV_RESUME:
++ netif_device_attach(dev);
++ rtl_hw_start(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_rtl8129_dev; *devp; devp = next) {
++ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++#ifdef CARDBUS
++
++#include <pcmcia/driver_ops.h>
+
+-int
+-init_module(void)
++static dev_node_t *rtl8139_attach(dev_locator_t *loc)
+ {
+- int cards_found;
++ struct net_device *dev;
++ u16 dev_id;
++ u32 pciaddr;
++ u8 bus, devfn, irq;
++ long hostaddr;
++ /* Note: the chip index should match the 8139B pci_tbl[] entry. */
++ int chip_idx = 2;
++
++ if (loc->bus != LOC_PCI) return NULL;
++ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
++ printk(KERN_DEBUG "rtl8139_attach(bus %d, function %d)\n", bus, devfn);
++#ifdef USE_IO_OPS
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &pciaddr);
++ hostaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++#else
++ pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_1, &pciaddr);
++ hostaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_tbl[chip_idx].io_size);
++#endif
++ pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
++ pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
++ if (hostaddr == 0 || irq == 0) {
++ printk(KERN_ERR "The %s interface at %d/%d was not assigned an %s.\n"
++ KERN_ERR " It will not be activated.\n",
++ pci_tbl[chip_idx].name, bus, devfn,
++ hostaddr == 0 ? "address" : "IRQ");
++ return NULL;
++ }
++ dev = rtl8139_probe1(pci_find_slot(bus, devfn), NULL,
++ hostaddr, irq, chip_idx, 0);
++ if (dev) {
++ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
++ strcpy(node->dev_name, dev->name);
++ node->major = node->minor = 0;
++ node->next = NULL;
++ MOD_INC_USE_COUNT;
++ return node;
++ }
++ return NULL;
++}
+
+- if (debug >= 0)
+- rtl8129_debug = debug;
++static void rtl8139_detach(dev_node_t *node)
++{
++ struct net_device **devp, **next;
++ printk(KERN_INFO "rtl8139_detach(%s)\n", node->dev_name);
++ for (devp = &root_rtl8129_dev; *devp; devp = next) {
++ next = &((struct rtl8129_private *)(*devp)->priv)->next_module;
++ if (strcmp((*devp)->name, node->dev_name) == 0) break;
++ }
++ if (*devp) {
++ struct rtl8129_private *np =
++ (struct rtl8129_private *)(*devp)->priv;
++ unregister_netdev(*devp);
++ release_region((*devp)->base_addr, pci_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)(*devp)->base_addr);
++#endif
++ kfree(*devp);
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ *devp = *next;
++ kfree(node);
++ MOD_DEC_USE_COUNT;
++ }
++}
+
+- root_rtl8129_dev = NULL;
+- cards_found = rtl8139_probe(0);
++struct driver_operations realtek_ops = {
++ "realtek_cb",
++ rtl8139_attach, /*rtl8139_suspend*/0, /*rtl8139_resume*/0, rtl8139_detach
++};
+
+- return cards_found ? 0 : -ENODEV;
++#endif /* Cardbus support */
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
++#ifdef CARDBUS
++ register_driver(&realtek_ops);
++ return 0;
++#else
++ return pci_drv_register(&rtl8139_drv_id, NULL);
++#endif
+ }
+
+-void
+-cleanup_module(void)
++void cleanup_module(void)
+ {
+- struct device *next_dev;
++ struct net_device *next_dev;
++
++#ifdef CARDBUS
++ unregister_driver(&realtek_ops);
++#else
++ pci_drv_unregister(&rtl8139_drv_id);
++#endif
+
+- /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_rtl8129_dev) {
+- next_dev = ((struct rtl8129_private *)root_rtl8129_dev->priv)->next_module;
++ struct rtl8129_private *np = (void *)(root_rtl8129_dev->priv);
+ unregister_netdev(root_rtl8129_dev);
+- release_region(root_rtl8129_dev->base_addr, RTL8129_TOTAL_SIZE);
++ release_region(root_rtl8129_dev->base_addr,
++ pci_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)(root_rtl8129_dev->base_addr));
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
+ kfree(root_rtl8129_dev);
+ root_rtl8129_dev = next_dev;
+ }
+@@ -1292,8 +1726,9 @@
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c rtl8139.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c rtl8139.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
++ * compile-command: "make KERNVER=`uname -r` rtl8139.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c"
++ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c rtl8139.c -o realtek_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/starfire.c
+===================================================================
+RCS file: linux/src/drivers/net/starfire.c
+diff -N linux/src/drivers/net/starfire.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/starfire.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,1535 @@
++/* starfire.c: Linux device driver for the Adaptec Starfire network adapter. */
++/*
++ Written/Copyright 1998-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/starfire.html
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"starfire.c:v1.09 7/22/2003 Copyright by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" Updates and info at http://www.scyld.com/network/starfire.html\n";
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Used for tuning interrupt latency vs. overhead. */
++static int interrupt_mitigation = 0x0;
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ The Starfire has a 512 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' exist for driver interoperability,
++ however full_duplex[] should never be used in new configurations.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Automatically extracted configuration info:
++probe-func: starfire_probe
++config-in: tristate 'Adaptec DuraLAN ("starfire") series PCI Ethernet support' CONFIG_DURLAN
++
++c-help-name: Adaptec DuraLAN ("starfire") series PCI Ethernet support
++c-help-symbol: CONFIG_DURALAN
++c-help: This driver is for the Adaptec DuraLAN series, the 6915, 62022
++c-help: and 62044 boards.
++c-help: Design information, usage details and updates are available from
++c-help: http://www.scyld.com/network/starfire.html
++*/
++
++/* Operational parameters that are set at compile time. */
++
++/* The "native" ring sizes are either 256 or 2048.
++ However in some modes a descriptor may be marked to wrap the ring earlier.
++ The driver allocates a single page for each descriptor ring, constraining
++ the maximum size in an architecture-dependent way.
++*/
++#define RX_RING_SIZE 256
++#define TX_RING_SIZE 32
++/* The completion queues are fixed at 1024 entries i.e. 4K or 8KB. */
++#define DONE_Q_SIZE 1024
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability.
++ Compatibility defines are in kern_compat.h */
++
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Adaptec Starfire Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM_DESC(debug, "Driver message enable level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to set forced full duplex (deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is for the Adaptec 6915 DuraLAN "Starfire" 64 bit PCI Ethernet
++adapter, and the multiport boards using the same chip.
++
++II. Board-specific settings
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++The Starfire hardware uses multiple fixed-size descriptor queues/rings. The
++ring sizes are set fixed by the hardware, but may optionally be wrapped
++earlier by the END bit in the descriptor.
++This driver uses that hardware queue size for the Rx ring, where a large
++number of entries has no ill effect beyond increases the potential backlog.
++The Tx ring is wrapped with the END bit, since a large hardware Tx queue
++disables the queue layer priority ordering and we have no mechanism to
++utilize the hardware two-level priority queue. When modifying the
++RX/TX_RING_SIZE pay close attention to page sizes and the ring-empty warning
++levels.
++
++IIIb/c. Transmit/Receive Structure
++
++See the Adaptec manual for the many possible structures, and options for
++each structure. There are far too many to document here.
++
++For transmit this driver uses type 1 transmit descriptors, and relies on
++automatic minimum-length padding. It does not use the completion queue
++consumer index, but instead checks for non-zero status entries.
++
++For receive this driver uses type 0 receive descriptors. The driver
++allocates full frame size skbuffs for the Rx ring buffers, so all frames
++should fit in a single descriptor. The driver does not use the completion
++queue consumer index, but instead checks for non-zero status entries.
++
++When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff
++is allocated and the frame is copied to the new skbuff. When the incoming
++frame is larger, the skbuff is passed directly up the protocol stack.
++Buffers consumed this way are replaced by newly allocated skbuffs in a later
++phase of receive.
++
++A notable aspect of operation is that unaligned buffers are not permitted by
++the Starfire hardware. The IP header at offset 14 in an ethernet frame thus
++isn't longword aligned, which may cause problems on some machine
++e.g. Alphas. Copied frames are put into the skbuff at an offset of "+2",
++16-byte aligning the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IV. Notes
++
++IVb. References
++
++The Adaptec Starfire manuals, available only from Adaptec.
++http://www.scyld.com/expert/100mbps.html
++http://www.scyld.com/expert/NWay.html
++
++IVc. Errata
++
++*/
++
++
++
++static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int starfire_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags {CanHaveMII=1, };
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0)
++/* And maps in 0.5MB(!) -- no I/O mapping here! */
++#define MEM_ADDR_SZ 0x80000
++
++#if 0 && (defined(__x86_64) || defined(__alpha__))
++/* Enable 64 bit address modes. */
++#define STARFIRE_ADDR_64BITS 1
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Adaptec Starfire 6915", { 0x69159004, 0xffffffff, },
++ PCI_IOTYPE, MEM_ADDR_SZ, CanHaveMII},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info starfire_drv_id = {
++ "starfire", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ starfire_probe1, starfire_pwr_event };
++
++/* Offsets to the device registers.
++ Unlike software-only systems, device drivers interact with complex hardware.
++ It's not useful to define symbolic names for every register bit in the
++ device. The name can only partially document the semantics and make
++ the driver longer and more difficult to read.
++ In general, only the important configuration values or bits changed
++ multiple times should be defined symbolically.
++*/
++enum register_offsets {
++ PCIDeviceConfig=0x50040, GenCtrl=0x50070, IntrTimerCtrl=0x50074,
++ IntrClear=0x50080, IntrStatus=0x50084, IntrEnable=0x50088,
++ MIICtrl=0x52000, StationAddr=0x50120, EEPROMCtrl=0x51000,
++ TxDescCtrl=0x50090,
++ TxRingPtr=0x50098, HiPriTxRingPtr=0x50094, /* Low and High priority. */
++ TxRingHiAddr=0x5009C, /* 64 bit address extension. */
++ TxProducerIdx=0x500A0, TxConsumerIdx=0x500A4,
++ TxThreshold=0x500B0,
++ CompletionHiAddr=0x500B4, TxCompletionAddr=0x500B8,
++ RxCompletionAddr=0x500BC, RxCompletionQ2Addr=0x500C0,
++ CompletionQConsumerIdx=0x500C4,
++ RxDescQCtrl=0x500D4, RxDescQHiAddr=0x500DC, RxDescQAddr=0x500E0,
++ RxDescQIdx=0x500E8, RxDMAStatus=0x500F0, RxFilterMode=0x500F4,
++ TxMode=0x55000,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrNormalSummary=0x8000, IntrAbnormalSummary=0x02000000,
++ IntrRxDone=0x0300, IntrRxEmpty=0x10040, IntrRxPCIErr=0x80000,
++ IntrTxDone=0x4000, IntrTxEmpty=0x1000, IntrTxPCIErr=0x80000,
++ StatsMax=0x08000000, LinkChange=0xf0000000,
++ IntrTxDataLow=0x00040000,
++ IntrPCIPin=0x01,
++};
++
++/* Bits in the RxFilterMode register. */
++enum rx_mode_bits {
++ AcceptBroadcast=0x04, AcceptAllMulticast=0x02, AcceptAll=0x01,
++ AcceptMulticast=0x10, AcceptMyPhys=0xE040,
++};
++
++/* Misc. bits. Symbolic names so that may be searched for. */
++enum misc_bits {
++ ChipResetCmd=1, /* PCIDeviceConfig */
++ PCIIntEnb=0x00800000, /* PCIDeviceConfig */
++ TxEnable=0x0A, RxEnable=0x05, SoftIntr=0x100, /* GenCtrl */
++};
++
++/* The Rx and Tx buffer descriptors. */
++struct starfire_rx_desc {
++ u32 rxaddr; /* Optionally 64 bits. */
++#if defined(STARFIRE_ADDR_64BITS)
++ u32 rxaddr_hi; /* Optionally 64 bits. */
++#endif
++};
++enum rx_desc_bits {
++ RxDescValid=1, RxDescEndRing=2,
++};
++
++/* Completion queue entry.
++ You must update the page allocation, init_ring and the shift count in rx()
++ if using a larger format. */
++struct rx_done_desc {
++ u32 status; /* Low 16 bits is length. */
++#ifdef full_rx_status
++ u32 status2;
++ u16 vlanid;
++ u16 csum; /* partial checksum */
++ u32 timestamp;
++#endif
++};
++enum rx_done_bits {
++ RxOK=0x20000000, RxFIFOErr=0x10000000, RxBufQ2=0x08000000,
++};
++
++/* Type 1 Tx descriptor. */
++struct starfire_tx_desc {
++ u32 status; /* Upper bits are status, lower 16 length. */
++ u32 addr;
++};
++enum tx_desc_bits {
++ TxDescID=0xB1010000, /* Also marks single fragment, add CRC. */
++ TxDescIntr=0x08000000, TxRingWrap=0x04000000,
++};
++struct tx_done_report {
++ u32 status; /* timestamp, index. */
++#if 0
++ u32 intrstatus; /* interrupt status */
++#endif
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct starfire_rx_desc *rx_ring;
++ struct starfire_tx_desc *tx_ring;
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of rx/tx-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ u8 pad0[100]; /* Impact padding */
++ /* Pointers to completion queues (full pages). Cache line pad.. */
++ struct rx_done_desc *rx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
++ unsigned int rx_done;
++ struct tx_done_report *tx_done_q __attribute__((aligned (L1_CACHE_BYTES)));
++ unsigned int tx_done;
++
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int max_interrupt_work;
++ int intr_enable;
++ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
++ unsigned int polling:1; /* Erk, IRQ err. */
++
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1, /* Full-duplex operation requested. */
++ medialock:1, /* Xcvr set to fixed speed/duplex. */
++ rx_flowctrl:1,
++ tx_flowctrl:1; /* Use 802.3x flow control. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ u32 tx_mode;
++ u8 tx_threshold;
++ u32 cur_rx_mode;
++ u16 mc_filter[32];
++ int multicast_filter_limit;
++
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location,
++ int value);
++static int netdev_open(struct net_device *dev);
++static int change_mtu(struct net_device *dev, int new_mtu);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++int starfire_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&starfire_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *starfire_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ /* Serial EEPROM reads are hidden by the hardware. */
++ for (i = 0; i < 6; i++)
++ dev->dev_addr[i] = readb(ioaddr + EEPROMCtrl + 20-i);
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ /* Reset the chip to erase previous misconfiguration. */
++ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex) {
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->medialock = 1;
++ }
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++ dev->change_mtu = &change_mtu;
++
++ if (np->drv_flags & CanHaveMII) {
++ int phy, phy_idx = 0;
++ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ np->mii_cnt = phy_idx;
++ }
++
++ /* Force the media type after detecting the transceiver. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
++ return dev;
++}
++
++
++/* Read the MII Management Data I/O (MDIO) interfaces. */
++
++static int mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
++ int result, boguscnt=1000;
++ /* ??? Should we add a busy-wait here? */
++ do
++ result = readl(mdio_addr);
++ while ((result & 0xC0000000) != 0x80000000 && --boguscnt >= 0);
++ return result & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
++{
++ long mdio_addr = dev->base_addr + MIICtrl + (phy_id<<7) + (location<<2);
++ writel(value, mdio_addr);
++ /* The busy-wait will occur before a read. */
++ return;
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ /* We have no reports that indicate we need to reset the chip.
++ But to be on the safe side... */
++ /* Disable the Rx and Tx, and reset the chip. */
++ writel(0, ioaddr + GenCtrl);
++ writel(ChipResetCmd, ioaddr + PCIDeviceConfig);
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++ /* Allocate the various queues, failing gracefully. */
++ if (np->tx_done_q == 0)
++ np->tx_done_q = (struct tx_done_report *)get_free_page(GFP_KERNEL);
++ if (np->rx_done_q == 0)
++ np->rx_done_q = (struct rx_done_desc *)get_free_page(GFP_KERNEL);
++ if (np->tx_ring == 0)
++ np->tx_ring = (struct starfire_tx_desc *)get_free_page(GFP_KERNEL);
++ if (np->rx_ring == 0)
++ np->rx_ring = (struct starfire_rx_desc *)get_free_page(GFP_KERNEL);
++ if (np->tx_done_q == 0 || np->rx_done_q == 0
++ || np->rx_ring == 0 || np->tx_ring == 0) {
++ /* Retain the pages to increase our chances next time. */
++ MOD_DEC_USE_COUNT;
++ return -ENOMEM;
++ }
++
++ init_ring(dev);
++ /* Set the size of the Rx buffers. */
++ writel((np->rx_buf_sz<<16) | 0xA000, ioaddr + RxDescQCtrl);
++
++ /* Set Tx descriptor to type 1 and padding to 0 bytes. */
++ writel(0x02000401, ioaddr + TxDescCtrl);
++
++#if defined(STARFIRE_ADDR_64BITS)
++ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxDescQHiAddr);
++ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingHiAddr);
++#else
++ writel(0, ioaddr + RxDescQHiAddr);
++ writel(0, ioaddr + TxRingHiAddr);
++ writel(0, ioaddr + CompletionHiAddr);
++#endif
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxDescQAddr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ writel(virt_to_bus(np->tx_done_q), ioaddr + TxCompletionAddr);
++ writel(virt_to_bus(np->rx_done_q), ioaddr + RxCompletionAddr);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Filling in the station address.\n", dev->name);
++
++ /* Fill both the unused Tx SA register and the Rx perfect filter. */
++ for (i = 0; i < 6; i++)
++ writeb(dev->dev_addr[i], ioaddr + StationAddr + 5-i);
++ for (i = 0; i < 16; i++) {
++ u16 *eaddrs = (u16 *)dev->dev_addr;
++ long setup_frm = ioaddr + 0x56000 + i*16;
++ writew(cpu_to_be16(eaddrs[2]), setup_frm); setup_frm += 4;
++ writew(cpu_to_be16(eaddrs[1]), setup_frm); setup_frm += 4;
++ writew(cpu_to_be16(eaddrs[0]), setup_frm); setup_frm += 8;
++ }
++
++ /* Initialize other registers. */
++ /* Configure the PCI bus bursts and FIFO thresholds. */
++ np->tx_mode = 0; /* Initialized when TxMode set. */
++ np->tx_threshold = 4;
++ writel(np->tx_threshold, ioaddr + TxThreshold);
++ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Setting the Rx and Tx modes.\n", dev->name);
++ set_rx_mode(dev);
++
++ np->advertising = mdio_read(dev, np->phys[0], 4);
++ check_duplex(dev);
++ netif_start_tx_queue(dev);
++
++ /* Set the interrupt mask and enable PCI interrupts. */
++ np->intr_enable = IntrRxDone | IntrRxEmpty | IntrRxPCIErr |
++ IntrTxDone | IntrTxEmpty | IntrTxPCIErr |
++ StatsMax | LinkChange | IntrNormalSummary | IntrAbnormalSummary
++ | 0x0010;
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ writel(PCIIntEnb | readl(ioaddr + PCIDeviceConfig),
++ ioaddr + PCIDeviceConfig);
++
++ /* Enable the Rx and Tx units. */
++ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open().\n",
++ dev->name);
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++/* The starfire can handle frame sizes up to 64KB, but we arbitrarily
++ * limit the size.
++ */
++static int change_mtu(struct net_device *dev, int new_mtu)
++{
++ if ((new_mtu < 68) || (new_mtu > 17268))
++ return -EINVAL;
++ if (netif_running(dev))
++ return -EBUSY;
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int new_tx_mode;
++
++ new_tx_mode = 0x0C04 | (np->tx_flowctrl ? 0x0800:0)
++ | (np->rx_flowctrl ? 0x0400:0);
++ if (np->medialock) {
++ if (np->full_duplex)
++ new_tx_mode |= 2;
++ } else {
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
++ int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++ if (duplex)
++ new_tx_mode |= 2;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d"
++ " negotiated capability %4.4x.\n", dev->name,
++ duplex ? "full" : "half", np->phys[0], negotiated);
++ }
++ }
++ if (new_tx_mode != np->tx_mode) {
++ np->tx_mode = new_tx_mode;
++ writel(np->tx_mode | 0x8000, ioaddr + TxMode);
++ writel(np->tx_mode, ioaddr + TxMode);
++ }
++}
++
++/* Check for duplex changes, but mostly check for failures. */
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int status = readl(ioaddr + IntrStatus);
++ static long last_msg = 0;
++
++ /* Normally we check only every few seconds. */
++ np->timer.expires = jiffies + 60*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
++ dev->name, status);
++ }
++
++ /* Check for a missing chip or failed interrupt line.
++ * The latter may be falsely triggered, so we check twice. */
++ if (status == 0xffffffff) {
++ if (jiffies - last_msg > 10*HZ) {
++ last_msg = jiffies;
++ printk(KERN_ERR "%s: The Starfire chip is missing!\n",
++ dev->name);
++ }
++ } else if (np->polling) {
++ if (status & IntrPCIPin) {
++ intr_handler(dev->irq, dev, 0);
++ if (jiffies - last_msg > 10*HZ) {
++ printk(KERN_ERR "%s: IRQ %d is still blocked!\n",
++ dev->name, dev->irq);
++ last_msg = jiffies;
++ }
++ } else if (jiffies - last_msg > 10*HZ)
++ np->polling = 0;
++ np->timer.expires = jiffies + 2;
++ } else if (status & IntrPCIPin) {
++ int new_status = readl(ioaddr + IntrStatus);
++ /* Bogus hardware IRQ mapping: Fake an interrupt handler call. */
++ if (new_status & IntrPCIPin) {
++ printk(KERN_ERR "%s: IRQ %d is not raising an interrupt! "
++ "Status %8.8x/%8.8x. \n",
++ dev->name, dev->irq, status, new_status);
++ intr_handler(dev->irq, dev, 0);
++ np->timer.expires = jiffies + 2;
++ np->polling = 1;
++ }
++ } else if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ /* This will not catch tbusy incorrectly set when the queue is empty,
++ * but that state should never occur. */
++ tx_timeout(dev);
++ }
++
++ check_duplex(dev);
++
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
++
++#if defined(__i386__)
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk("\n" KERN_DEBUG " Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %4.4x", np->tx_ring[i].status);
++ printk("\n" KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].rxaddr);
++ printk("\n");
++ }
++#endif
++
++ /* If a specific problem is reported, reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++ writel(0, ioaddr + GenCtrl);
++ /* Enable the Rx and Tx units. */
++ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->rx_done = np->dirty_tx = np->tx_done = 0;
++
++ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ :
++ (dev->mtu + 14 + 3) & ~3); /* Round to word. */
++
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ /* Grrr, we cannot offset to correctly align the IP header. */
++ np->rx_ring[i].rxaddr =
++ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
++ }
++ writew(i - 1, dev->base_addr + RxDescQIdx);
++ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
++ /* Clear the remainder of the Rx buffer ring. */
++ for ( ; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].rxaddr = 0;
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].rxaddr |= cpu_to_le32(RxDescEndRing);
++
++ /* Clear the completion rings. */
++ for (i = 0; i < DONE_Q_SIZE; i++) {
++ np->rx_done_q[i].status = 0;
++ np->tx_done_q[i].status = 0;
++ }
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ }
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Caution: the write order is important here, set the field
++ with the "ownership" bits last. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++
++ np->tx_skbuff[entry] = skb;
++
++ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
++ /* Add "| TxDescIntr" to generate Tx-done interrupts. */
++ np->tx_ring[entry].status = cpu_to_le32(skb->len | TxDescID);
++#if 1
++ if (entry >= TX_RING_SIZE-1) { /* Wrap ring */
++ np->tx_ring[entry].status |= cpu_to_le32(TxRingWrap | TxDescIntr);
++ entry = -1;
++ }
++#endif
++
++ /* On some architectures better performance results by explicitly
++ flushing cache lines: pci_flush_virt(skb->data, skb->len); */
++
++ np->cur_tx++;
++ /* Update the producer index. */
++ writel(++entry, dev->base_addr + TxProducerIdx);
++
++ /* cf. using TX_QUEUE_LEN instead of TX_RING_SIZE here. */
++ if (np->cur_tx - np->dirty_tx >= TX_RING_SIZE - 1) {
++ np->tx_full = 1;
++ /* Check for the rare case of a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_RING_SIZE - 2) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Tx frame #%d slot %d %8.8x %8.8x.\n",
++ dev->name, np->cur_tx, entry,
++ np->tx_ring[entry].status, np->tx_ring[entry].addr);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++#ifndef final_version /* Can never occur. */
++ if (dev == NULL) {
++ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
++ "device.\n", irq);
++ return;
++ }
++#endif
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrClear);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status == 0 || intr_status == 0xffffffff)
++ break;
++
++ if (intr_status & IntrRxDone)
++ netdev_rx(dev);
++
++ /* Scavenge the skbuff list based on the Tx-done queue.
++ There are redundant checks here that may be cleaned up
++ after the driver has proven to be reliable. */
++ {
++ int consumer = readl(ioaddr + TxConsumerIdx);
++ int tx_status;
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Tx Consumer index is %d.\n",
++ dev->name, consumer);
++#if 0
++ if (np->tx_done >= 250 || np->tx_done == 0)
++ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x, "
++ "%d is %8.8x.\n", dev->name,
++ np->tx_done, np->tx_done_q[np->tx_done].status,
++ (np->tx_done+1) & (DONE_Q_SIZE-1),
++ np->tx_done_q[(np->tx_done+1)&(DONE_Q_SIZE-1)].status);
++#endif
++ while ((tx_status = cpu_to_le32(np->tx_done_q[np->tx_done].status))
++ != 0) {
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Tx completion entry %d is %8.8x.\n",
++ dev->name, np->tx_done, tx_status);
++ if ((tx_status & 0xe0000000) == 0xa0000000) {
++ np->stats.tx_packets++;
++ } else if ((tx_status & 0xe0000000) == 0x80000000) {
++ u16 entry = tx_status; /* Implicit truncate */
++ entry >>= 3;
++ /* Scavenge the descriptor. */
++ if (np->tx_skbuff[entry]) {
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ } else
++ printk(KERN_WARNING "%s: Null skbuff at entry %d!!!\n",
++ dev->name, entry);
++ np->tx_skbuff[entry] = 0;
++ np->dirty_tx++;
++ }
++ np->tx_done_q[np->tx_done].status = 0;
++ np->tx_done = (np->tx_done+1) & (DONE_Q_SIZE-1);
++ }
++ writew(np->tx_done, ioaddr + CompletionQConsumerIdx + 2);
++ }
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & IntrAbnormalSummary)
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
++ dev->name, intr_status);
++ writel(0x0021, ioaddr + IntrTimerCtrl);
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++ u32 desc_status;
++
++ if (np->rx_done_q == 0) {
++ printk(KERN_ERR "%s: rx_done_q is NULL! rx_done is %d. %p.\n",
++ dev->name, np->rx_done, np->tx_done_q);
++ return 0;
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while ((desc_status = le32_to_cpu(np->rx_done_q[np->rx_done].status)) != 0) {
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status of %d was %8.8x.\n",
++ np->rx_done, desc_status);
++ if (--boguscnt < 0)
++ break;
++ if ( ! (desc_status & RxOK)) {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ desc_status);
++ np->stats.rx_errors++;
++ if (desc_status & RxFIFOErr)
++ np->stats.rx_fifo_errors++;
++ } else {
++ struct sk_buff *skb;
++ u16 pkt_len = desc_status; /* Implicitly Truncate */
++ int entry = (desc_status >> 16) & 0x7ff;
++
++#ifndef final_version
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ ", bogus_cnt %d.\n",
++ pkt_len, boguscnt);
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++#ifndef final_version /* Remove after testing. */
++ if (le32desc_to_virt(np->rx_ring[entry].rxaddr & ~3) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in netdev_rx: %p vs. %p / %p.\n",
++ dev->name,
++ le32desc_to_virt(np->rx_ring[entry].rxaddr),
++ skb->head, temp);
++#endif
++ }
++ skb->protocol = eth_type_trans(skb, dev);
++#ifdef full_rx_status
++ if (np->rx_done_q[np->rx_done].status2 & cpu_to_le32(0x01000000))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++#endif
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++ }
++ np->cur_rx++;
++ np->rx_done_q[np->rx_done].status = 0;
++ np->rx_done = (np->rx_done + 1) & (DONE_Q_SIZE-1);
++ }
++ writew(np->rx_done, dev->base_addr + CompletionQConsumerIdx);
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ int entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[entry].rxaddr =
++ virt_to_le32desc(skb->tail) | cpu_to_le32(RxDescValid);
++ }
++ if (entry == RX_RING_SIZE - 1)
++ np->rx_ring[entry].rxaddr |= cpu_to_le32(RxDescEndRing);
++ /* We could defer this until later... */
++ writew(entry, dev->base_addr + RxDescQIdx);
++ }
++
++ if ((np->msg_level & NETIF_MSG_RX_STATUS)
++ || memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1))
++ printk(KERN_DEBUG " exiting netdev_rx() status of %d was %8.8x %d.\n",
++ np->rx_done, desc_status,
++ memcmp(np->pad0, np->pad0 + 1, sizeof(np->pad0) -1));
++
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ if (intr_status & LinkChange) {
++ int phy_num = np->phys[0];
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ mdio_read(dev, phy_num, 4),
++ mdio_read(dev, phy_num, 5));
++ /* Clear sticky bit. */
++ mdio_read(dev, phy_num, 1);
++ /* If link beat has returned... */
++ if (mdio_read(dev, phy_num, 1) & 0x0004)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ check_duplex(dev);
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ /* Came close to underrunning the Tx FIFO, increase threshold. */
++ if (intr_status & IntrTxDataLow)
++ writel(++np->tx_threshold, dev->base_addr + TxThreshold);
++ /* Ingore expected normal events, and handled abnormal events. */
++ if ((intr_status &
++ ~(IntrAbnormalSummary|LinkChange|StatsMax|IntrTxDataLow| 0xFF01))
++ && (np->msg_level & NETIF_MSG_DRV))
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear how to recover from PCI faults. */
++ if (intr_status & IntrTxPCIErr)
++ np->stats.tx_fifo_errors++;
++ if (intr_status & IntrRxPCIErr)
++ np->stats.rx_fifo_errors++;
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ /* This adapter architecture needs no SMP locks. */
++#if LINUX_VERSION_CODE > 0x20119
++ np->stats.tx_bytes = readl(ioaddr + 0x57010);
++ np->stats.rx_bytes = readl(ioaddr + 0x57044);
++#endif
++ np->stats.tx_packets = readl(ioaddr + 0x57000);
++ np->stats.tx_aborted_errors =
++ readl(ioaddr + 0x57024) + readl(ioaddr + 0x57028);
++ np->stats.tx_window_errors = readl(ioaddr + 0x57018);
++ np->stats.collisions = readl(ioaddr + 0x57004) + readl(ioaddr + 0x57008);
++
++ /* The chip only need report frame silently dropped. */
++ np->stats.rx_dropped += readw(ioaddr + RxDMAStatus);
++ writew(0, ioaddr + RxDMAStatus);
++ np->stats.rx_crc_errors = readl(ioaddr + 0x5703C);
++ np->stats.rx_frame_errors = readl(ioaddr + 0x57040);
++ np->stats.rx_length_errors = readl(ioaddr + 0x57058);
++ np->stats.rx_missed_errors = readl(ioaddr + 0x5707C);
++
++ return &np->stats;
++}
++
++/* The little-endian AUTODIN II ethernet CRC calculations.
++ A big-endian version is also available.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and should be moved to net/core/crc.c.
++ Chips may use the upper or lower CRC bits, and may reverse and/or invert
++ them. Select the endian-ness that results in minimal calculations.
++*/
++static unsigned const ethernet_polynomial_le = 0xedb88320U;
++static inline unsigned ether_crc_le(int length, unsigned char *data)
++{
++ unsigned int crc = ~0; /* Initial value. */
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 8; --bit >= 0; current_octet >>= 1) {
++ if ((crc ^ current_octet) & 1) {
++ crc >>= 1;
++ crc ^= ethernet_polynomial_le;
++ } else
++ crc >>= 1;
++ }
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u32 rx_mode;
++ struct dev_mc_list *mclist;
++ int i;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptAll|AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ rx_mode = AcceptBroadcast|AcceptAllMulticast|AcceptMyPhys;
++ } else if (dev->mc_count <= 15) {
++ /* Use the 16 element perfect filter. */
++ long filter_addr = ioaddr + 0x56000 + 1*16;
++ for (i = 1, mclist = dev->mc_list; mclist && i <= dev->mc_count;
++ i++, mclist = mclist->next) {
++ u16 *eaddrs = (u16 *)mclist->dmi_addr;
++ writew(cpu_to_be16(eaddrs[2]), filter_addr); filter_addr += 4;
++ writew(cpu_to_be16(eaddrs[1]), filter_addr); filter_addr += 4;
++ writew(cpu_to_be16(eaddrs[0]), filter_addr); filter_addr += 8;
++ }
++ while (i++ < 16) {
++ writew(0xffff, filter_addr); filter_addr += 4;
++ writew(0xffff, filter_addr); filter_addr += 4;
++ writew(0xffff, filter_addr); filter_addr += 8;
++ }
++ rx_mode = AcceptBroadcast | AcceptMyPhys;
++ } else {
++ /* Must use a multicast hash table. */
++ long filter_addr;
++ u16 mc_filter[32]; /* Multicast hash filter */
++
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23, mc_filter);
++ }
++ /* Clear the perfect filter list. */
++ filter_addr = ioaddr + 0x56000 + 1*16;
++ for (i = 1; i < 16; i++) {
++ writew(0xffff, filter_addr); filter_addr += 4;
++ writew(0xffff, filter_addr); filter_addr += 4;
++ writew(0xffff, filter_addr); filter_addr += 8;
++ }
++ for (filter_addr=ioaddr + 0x56100, i=0; i < 32; filter_addr+= 16, i++){
++ np->mc_filter[i] = mc_filter[i];
++ writew(mc_filter[i], filter_addr);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ }
++ writel(rx_mode, ioaddr + RxFilterMode);
++}
++
++/*
++ Handle user-level ioctl() calls.
++ We must use two numeric constants as the key because some clueless person
++ changed the value for the symbolic name.
++*/
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ check_duplex(dev);
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, Intr status %4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(0, ioaddr + IntrEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(0, ioaddr + GenCtrl);
++
++ del_timer(&np->timer);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < 8 /* TX_RING_SIZE is huge! */; i++)
++ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x -> %8.8x.\n",
++ i, np->tx_ring[i].status, np->tx_ring[i].addr,
++ np->tx_done_q[i].status);
++ printk(KERN_DEBUG " Rx ring at %8.8x -> %p:\n",
++ (int)virt_to_bus(np->rx_ring), np->rx_done_q);
++ if (np->rx_done_q)
++ for (i = 0; i < 8 /* RX_RING_SIZE */; i++) {
++ printk(KERN_DEBUG " #%d desc. %8.8x -> %8.8x\n",
++ i, np->rx_ring[i].rxaddr, np->rx_done_q[i].status);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].rxaddr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++
++static int starfire_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writel(0x0000, ioaddr + IntrEnable);
++ writel(0, ioaddr + GenCtrl);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: we must factor start_chip() out of open(). */
++ writel(np->tx_threshold, ioaddr + TxThreshold);
++ writel(interrupt_mitigation, ioaddr + IntrTimerCtrl);
++ set_rx_mode(dev);
++ writel(np->intr_enable, ioaddr + IntrEnable);
++ writel(TxEnable|RxEnable, ioaddr + GenCtrl);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ if (pci_drv_register(&starfire_drv_id, NULL)) {
++ printk(KERN_INFO " No Starfire adapters detected, driver not loaded.\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&starfire_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++ iounmap((char *)(root_net_dev->base_addr));
++ next_dev = np->next_module;
++ if (np->tx_done_q) free_page((long)np->tx_done_q);
++ if (np->rx_done_q) free_page((long)np->rx_done_q);
++ if (np->priv_addr) kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` starfire.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c starfire.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c starfire.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/sundance.c
+===================================================================
+RCS file: linux/src/drivers/net/sundance.c
+diff -N linux/src/drivers/net/sundance.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/sundance.c 20 Aug 2004 10:32:54 -0000
+@@ -0,0 +1,1556 @@
++/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
++/*
++ Written 1999-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 410 Severn Ave., Suite 210
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/sundance.html
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"sundance.c:v1.11 2/4/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/sundance.html\n";
++/* Updated to recommendations in pci-skeleton v2.12. */
++
++/* Automatically extracted configuration info:
++probe-func: sundance_probe
++config-in: tristate 'Sundance ST201 "Alta" PCI Ethernet support' CONFIG_SUNDANCE
++c-help-name: Sundance ST201 "Alta" PCI Ethernet support
++c-help-symbol: CONFIG_SUNDANCE
++c-help: This driver is for the Sundance ST201 "Alta" and Kendin KS8723, as
++c-help: used on the D-Link DFE-550 and DFE-580.
++c-help: Design information, usage details and updates are available from
++c-help: http://www.scyld.com/network/sundance.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ The sundance uses a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature.
++ This chip can receive into any byte alignment buffers, so word-oriented
++ archs do not need a copy-align of the IP header. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Ring sizes are a power of two only for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ There must be at least five Tx entries for the tx_full hysteresis, and
++ more than 31 requires modifying the Tx status handling error recovery.
++ Leave a inactive gap in the Tx ring for better cache behavior.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority.
++ Large receive rings waste memory and impact buffer accounting.
++ The driver need to protect against interrupt latency and the kernel
++ not reserving enough available memory.
++*/
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 32
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung. */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++/* Set iff a MII transceiver on any interface requires mdio preamble.
++ This only set with older tranceivers, so the extra
++ code size of a per-interface flag is not worthwhile. */
++static char mii_preamble_required = 0;
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#if LINUX_VERSION_CODE >= 0x20300
++#include <linux/spinlock.h>
++#elif LINUX_VERSION_CODE >= 0x20200
++#include <asm/spinlock.h>
++#endif
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(debug, "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to set forced full duplex (deprecated).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is designed for the Sundance Technologies "Alta" ST201 chip.
++The Kendin KS8723 is the same design with an integrated transceiver and
++new quirks.
++
++II. Board-specific settings
++
++This is an all-in-one chip, so there are no board-specific settings.
++
++III. Driver operation
++
++IIIa. Ring buffers
++
++This driver uses two statically allocated fixed-size descriptor lists
++formed into rings by a branch from the final descriptor to the beginning of
++the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
++Some chips explicitly use only 2^N sized rings, while others use a
++'next descriptor' pointer that the driver forms into rings.
++
++IIIb/c. Transmit/Receive Structure
++
++This driver uses a zero-copy receive and transmit scheme.
++The driver allocates full frame size skbuffs for the Rx ring buffers at
++open() time and passes the skb->data field to the chip as receive data
++buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
++a fresh skbuff is allocated and the frame is copied to the new skbuff.
++When the incoming frame is larger, the skbuff is passed directly up the
++protocol stack. Buffers consumed this way are replaced by newly allocated
++skbuffs in a later phase of receives.
++
++The RX_COPYBREAK value is chosen to trade-off the memory wasted by
++using a full-sized skbuff for small frames vs. the copying costs of larger
++frames. New boards are typically used in generously configured machines
++and the underfilled buffers have negligible impact compared to the benefit of
++a single allocation size, so the default value of zero results in never
++copying packets. When copying is done, the cost is usually mitigated by using
++a combined copy/checksum routine. Copying also preloads the cache, which is
++most useful with small frames.
++
++A subtle aspect of the operation is that the IP header at offset 14 in an
++ethernet frame isn't longword aligned for further processing.
++Unaligned buffers are permitted by the Sundance hardware, so
++frames are received into the skbuff at an offset of "+2", 16-byte aligning
++the IP header.
++
++IIId. Synchronization
++
++The driver runs as two independent, single-threaded flows of control. One
++is the send-packet routine, which enforces single-threaded use by the
++dev->tbusy flag. The other thread is the interrupt handler, which is single
++threaded by the hardware and interrupt handling software.
++
++The send packet thread has partial control over the Tx ring and 'dev->tbusy'
++flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++queue slot is empty, it clears the tbusy flag when finished otherwise it sets
++the 'lp->tx_full' flag.
++
++The interrupt handler has exclusive control over the Rx ring and records stats
++from the Tx ring. After reaping the stats, it marks the Tx queue entry as
++empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
++clears both the tx_full and tbusy flags.
++
++IV. Notes
++
++IVb. References
++
++The Sundance ST201 datasheet, preliminary version.
++The Kendin KS8723 datasheet, preliminary version.
++http://www.scyld.com/expert/100mbps.html
++http://www.scyld.com/expert/NWay.html
++
++IVc. Errata
++
++*/
++
++
++
++/* Work-around for Kendin chip bugs. This will be reversed after tracking
++ down all of the chip access quirks in memory mode. */
++#ifndef USE_MEM_OPS
++#define USE_IO_OPS 1
++#endif
++
++static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int sundance_pwr_event(void *dev_instance, int event);
++
++enum chip_capability_flags {CanHaveMII=1, KendinPktDropBug=2, };
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"D-Link DFE-580TX (Kendin/Sundance ST201 Alta)",
++ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, 0x14, 0xff},
++ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
++ {"D-Link DFE-580TX (Sundance ST201)",
++ {0x10021186, 0xffffffff, 0x10121186, 0xffffffff, },
++ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
++ {"D-Link DFE-550FX 100baseFx (Sundance ST201)",
++ {0x10031186, 0xffffffff, },
++ PCI_IOTYPE, 128, CanHaveMII|KendinPktDropBug},
++ {"OEM Sundance Technology ST201", {0x10021186, 0xffffffff, },
++ PCI_IOTYPE, 128, CanHaveMII},
++ {"Sundance Technology Alta", {0x020113F0, 0xffffffff, },
++ PCI_IOTYPE, 128, CanHaveMII},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info sundance_drv_id = {
++ "sundance", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ sundance_probe1, sundance_pwr_event };
++
++/* This driver was written to use PCI memory space, however x86-oriented
++ hardware often uses I/O space accesses. */
++#ifdef USE_IO_OPS
++#undef readb
++#undef readw
++#undef readl
++#undef writeb
++#undef writew
++#undef writel
++#define readb inb
++#define readw inw
++#define readl inl
++#define writeb outb
++#define writew outw
++#define writel outl
++#endif
++
++/* Offsets to the device registers.
++ Unlike software-only systems, device drivers interact with complex hardware.
++ It's not useful to define symbolic names for every register bit in the
++ device. The name can only partially document the semantics and make
++ the driver longer and more difficult to read.
++ In general, only the important configuration values or bits changed
++ multiple times should be defined symbolically.
++*/
++enum alta_offsets {
++ DMACtrl=0x00, TxListPtr=0x04, TxDMACtrl=0x08, TxDescPoll=0x0a,
++ RxDMAStatus=0x0c, RxListPtr=0x10, RxDMACtrl=0x14, RxDescPoll=0x16,
++ LEDCtrl=0x1a, ASICCtrl=0x30,
++ EEData=0x34, EECtrl=0x36, TxThreshold=0x3c,
++ FlashAddr=0x40, FlashData=0x44, WakeEvent=0x45, TxStatus=0x46,
++ DownCounter=0x48, IntrClear=0x4a, IntrEnable=0x4c, IntrStatus=0x4e,
++ MACCtrl0=0x50, MACCtrl1=0x52, StationAddr=0x54,
++ MaxFrameSize=0x5A, RxMode=0x5c, MIICtrl=0x5e,
++ MulticastFilter0=0x60, MulticastFilter1=0x64,
++ RxOctetsLow=0x68, RxOctetsHigh=0x6a, TxOctetsLow=0x6c, TxOctetsHigh=0x6e,
++ TxFramesOK=0x70, RxFramesOK=0x72, StatsCarrierError=0x74,
++ StatsLateColl=0x75, StatsMultiColl=0x76, StatsOneColl=0x77,
++ StatsTxDefer=0x78, RxMissed=0x79, StatsTxXSDefer=0x7a, StatsTxAbort=0x7b,
++ StatsBcastTx=0x7c, StatsBcastRx=0x7d, StatsMcastTx=0x7e, StatsMcastRx=0x7f,
++ /* Aliased and bogus values! */
++ RxStatus=0x0c,
++};
++
++/* Bits in the interrupt status/mask registers. */
++enum intr_status_bits {
++ IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
++ IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
++ IntrDrvRqst=0x0040,
++ StatsMax=0x0080, LinkChange=0x0100,
++ IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
++};
++
++/* Bits in the RxMode register. */
++enum rx_mode_bits {
++ AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
++ AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
++};
++/* Bits in MACCtrl. */
++enum mac_ctrl0_bits {
++ EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
++ EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
++};
++enum mac_ctrl1_bits {
++ StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
++ TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
++ RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
++};
++
++/* The Rx and Tx buffer descriptors.
++ Using only 32 bit fields simplifies software endian correction.
++ This structure must be aligned, and should avoid spanning cache lines.
++*/
++struct netdev_desc {
++ u32 next_desc;
++ u32 status;
++ struct desc_frag { u32 addr, length; } frag[1];
++};
++
++/* Bits in netdev_desc.status */
++enum desc_status_bits {
++ DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000,
++ DescTxDMADone=0x10000,
++ LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
++ within the structure. */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct netdev_desc rx_ring[RX_RING_SIZE];
++ struct netdev_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ int max_interrupt_work;
++
++ /* Note: Group variables for cache line effect. */
++ struct netdev_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ spinlock_t txlock; /* Group with Tx control cache line. */
++ struct netdev_desc *last_tx; /* Last Tx descriptor used. */
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++
++ /* These values keep track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* Multicast and receive mode. */
++ spinlock_t mcastlock; /* SMP lock multicast updates. */
++ u16 mcast_filter[4];
++ int multicast_filter_limit;
++
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ int link_status;
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++/* The station address location in the EEPROM. */
++#define EEPROM_SA_OFFSET 0x10
++
++static int eeprom_read(long ioaddr, int location);
++static int mdio_read(struct net_device *dev, int phy_id,
++ unsigned int location);
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value);
++static int netdev_open(struct net_device *dev);
++static void sundance_start(struct net_device *dev);
++static int change_mtu(struct net_device *dev, int new_mtu);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++#ifndef MODULE
++int sundance_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&sundance_drv_id, dev) < 0)
++ return -ENODEV;
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif
++
++static void *sundance_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ /* Perhaps NETIF_MSG_PROBE */
++ printk(KERN_INFO "%s: %s at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
++
++ for (i = 0; i < 3; i++)
++ ((u16 *)dev->dev_addr)[i] =
++ le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++ /* All failure checks before this point.
++ We do a request_region() only to register /proc/ioports info. */
++#ifdef USE_IO_OPS
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++#endif
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex)
++ np->medialock = 1;
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++ dev->change_mtu = &change_mtu;
++
++ if (1) {
++ int phy, phy_idx = 0;
++ np->phys[0] = 1; /* Default setting */
++ mii_preamble_required++;
++ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ if ((mii_status & 0x0040) == 0)
++ mii_preamble_required++;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ mii_preamble_required--;
++ np->mii_cnt = phy_idx;
++ if (phy_idx == 0)
++ printk(KERN_INFO "%s: No MII transceiver found!, ASIC status %x\n",
++ dev->name, (int)readl(ioaddr + ASICCtrl));
++ }
++
++ /* Allow forcing the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ if (np->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ if (np->mii_cnt)
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
++ /* Reset the chip to erase previous misconfiguration. */
++ if (np->msg_level & NETIF_MSG_MISC)
++ printk("ASIC Control is %x.\n", (int)readl(ioaddr + ASICCtrl));
++ writel(0x007f0000 | readl(ioaddr + ASICCtrl), ioaddr + ASICCtrl);
++ if (np->msg_level & NETIF_MSG_MISC)
++ printk("ASIC Control is now %x.\n", (int)readl(ioaddr + ASICCtrl));
++
++ return dev;
++}
++
++
++
++static int change_mtu(struct net_device *dev, int new_mtu)
++{
++ if ((new_mtu < 68) || (new_mtu > 8191)) /* Limited by RxDMAFrameLen */
++ return -EINVAL;
++ if (netif_running(dev))
++ return -EBUSY;
++ dev->mtu = new_mtu;
++ return 0;
++}
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
++static int eeprom_read(long ioaddr, int location)
++{
++ int boguscnt = 2000; /* Typical 190 ticks. */
++ writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
++ do {
++ if (! (readw(ioaddr + EECtrl) & 0x8000)) {
++ return readw(ioaddr + EEData);
++ }
++ } while (--boguscnt > 0);
++ return 0;
++}
++
++/* MII transceiver control section.
++ Read and write the MII registers using software-generated serial
++ MDIO protocol. See the MII specifications or DP83840A data sheet
++ for details.
++
++ The maximum data clock rate is 2.5 Mhz.
++ The timing is decoupled from the processor clock by flushing the write
++ from the CPU write buffer with a following read, and using PCI
++ transaction time. */
++#define mdio_in(mdio_addr) readb(mdio_addr)
++#define mdio_out(value, mdio_addr) writeb(value, mdio_addr)
++#define mdio_delay(mdio_addr) readb(mdio_addr)
++
++enum mii_reg_bits {
++ MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
++};
++#define MDIO_EnbIn (0)
++#define MDIO_WRITE0 (MDIO_EnbOutput)
++#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
++
++/* Generate the preamble required for initial synchronization and
++ a few older transceivers. */
++static void mdio_sync(long mdio_addr)
++{
++ int bits = 32;
++
++ /* Establish sync by sending at least 32 logic ones. */
++ while (--bits >= 0) {
++ mdio_out(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++}
++
++static int mdio_read(struct net_device *dev, int phy_id, unsigned int location)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int i, retval = 0;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 15; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Read the two transition, 16 data, and wire-idle bits. */
++ for (i = 19; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_Data) ? 1 : 0);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return (retval>>1) & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id,
++ unsigned int location, int value)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
++ int i;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ np->full_duplex = np->duplex_lock;
++ np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
++
++ sundance_start(dev);
++ netif_start_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
++ "MAC Control %x, %4.4x %4.4x.\n",
++ dev->name, (int)readl(ioaddr + RxStatus),
++ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + MACCtrl0),
++ (int)readw(ioaddr + MACCtrl1), (int)readw(ioaddr + MACCtrl0));
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++static void sundance_start(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ /* No reports have indicated that we need to reset the chip. */
++
++ writel(virt_to_bus(&np->rx_ring[np->cur_rx % RX_RING_SIZE]),
++ ioaddr + RxListPtr);
++ /* The Tx list pointer is written as packets are queued. */
++
++ /* Station address must be written as 16 bit words with the Kendin chip. */
++ for (i = 0; i < 6; i += 2)
++ writew((dev->dev_addr[i + 1] << 8) + dev->dev_addr[i],
++ ioaddr + StationAddr + i);
++
++ np->link_status = readb(ioaddr + MIICtrl) & 0xE0;
++ writew((np->full_duplex || (np->link_status & 0x20)) ? 0x120 : 0,
++ ioaddr + MACCtrl0);
++ writew(dev->mtu + 14, ioaddr + MaxFrameSize);
++ if (dev->mtu > 2047)
++ writel(readl(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl);
++
++ set_rx_mode(dev);
++ writew(0, ioaddr + DownCounter);
++ /* Set the chip to poll every N*320nsec. */
++ writeb(100, ioaddr + RxDescPoll);
++ writeb(127, ioaddr + TxDescPoll);
++#if 0
++ if (np->drv_flags & KendinPktDropBug)
++ writeb(0x01, ioaddr + DebugCtrl1);
++#endif
++
++ /* Enable interrupts by setting the interrupt mask. */
++ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
++ | StatsMax | LinkChange, ioaddr + IntrEnable);
++ writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
++ int duplex;
++
++ if (np->duplex_lock || mii_reg5 == 0xffff)
++ return;
++ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
++ "negotiated capability %4.4x.\n", dev->name,
++ duplex ? "full" : "half", np->phys[0], negotiated);
++ writew(duplex ? 0x20 : 0, ioaddr + MACCtrl0);
++ }
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++
++ if (np->msg_level & NETIF_MSG_TIMER) {
++ printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
++ "Tx %x Rx %x.\n",
++ dev->name, (int)readw(ioaddr + IntrEnable),
++ (int)readw(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
++ }
++ /* Note: This does not catch a 0 or 1 element stuck queue. */
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %4.4x,"
++ " resetting...\n", dev->name, (int)readw(ioaddr + TxStatus));
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring);
++ for (i = 0; i < RX_RING_SIZE; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
++ printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring);
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" %8.8x", np->tx_ring[i].status);
++ printk("\n");
++ }
++#endif
++
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ /* Trigger an immediate transmit demand. */
++ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
++ | StatsMax | LinkChange, ioaddr + IntrEnable);
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_rx = np->cur_tx = 0;
++ np->dirty_rx = np->dirty_tx = 0;
++
++ np->rx_buf_sz = dev->mtu + 20;
++ if (np->rx_buf_sz < PKT_BUF_SZ)
++ np->rx_buf_sz = PKT_BUF_SZ;
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].frag[0].length = 0;
++ np->rx_skbuff[i] = 0;
++ }
++ /* Wrap the ring. */
++ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
++
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ np->rx_ring[i].frag[0].addr = virt_to_le32desc(skb->tail);
++ np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
++ }
++ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
++
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ }
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ struct netdev_desc *txdesc;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % TX_RING_SIZE;
++ np->tx_skbuff[entry] = skb;
++ txdesc = &np->tx_ring[entry];
++
++ txdesc->next_desc = 0;
++ /* Note: disable the interrupt generation here before releasing. */
++ txdesc->status =
++ cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx | 1);
++ txdesc->frag[0].addr = virt_to_le32desc(skb->data);
++ txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
++ if (np->last_tx)
++ np->last_tx->next_desc = virt_to_le32desc(txdesc);
++ np->last_tx = txdesc;
++ np->cur_tx++;
++
++ /* On some architectures: explicitly flush cache lines here. */
++
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
++ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 2) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++
++ /* Side effect: The read wakes the potentially-idle transmit channel. */
++ if (readl(dev->base_addr + TxListPtr) == 0)
++ writel(virt_to_bus(&np->tx_ring[entry]), dev->base_addr + TxListPtr);
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d len %d queued in slot %d.\n",
++ dev->name, np->cur_tx, skb->len, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np;
++ long ioaddr;
++ int boguscnt;
++
++ ioaddr = dev->base_addr;
++ np = (struct netdev_private *)dev->priv;
++ boguscnt = np->max_interrupt_work;
++
++ do {
++ int intr_status = readw(ioaddr + IntrStatus);
++ if ((intr_status & ~IntrRxDone) == 0 || intr_status == 0xffff)
++ break;
++
++ writew(intr_status & (IntrRxDMADone | IntrPCIErr |
++ IntrDrvRqst |IntrTxDone|IntrTxDMADone |
++ StatsMax | LinkChange),
++ ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if (intr_status & IntrRxDMADone)
++ netdev_rx(dev);
++
++ if (intr_status & IntrTxDone) {
++ int txboguscnt = 32;
++ int tx_status = readw(ioaddr + TxStatus);
++ while (tx_status & 0x80) {
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk("%s: Transmit status is %4.4x.\n",
++ dev->name, tx_status);
++ if (tx_status & 0x1e) {
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk("%s: Transmit error status %4.4x.\n",
++ dev->name, tx_status);
++ np->stats.tx_errors++;
++ if (tx_status & 0x10) np->stats.tx_fifo_errors++;
++#ifdef ETHER_STATS
++ if (tx_status & 0x08) np->stats.collisions16++;
++#else
++ if (tx_status & 0x08) np->stats.collisions++;
++#endif
++ if (tx_status & 0x04) np->stats.tx_fifo_errors++;
++ if (tx_status & 0x02) np->stats.tx_window_errors++;
++ /* This reset has not been verified!. */
++ if (tx_status & 0x10) { /* Reset the Tx. */
++ writel(0x001c0000 | readl(ioaddr + ASICCtrl),
++ ioaddr + ASICCtrl);
++#if 0 /* Do we need to reset the Tx pointer here? */
++ writel(virt_to_bus(&np->tx_ring[np->dirty_tx]),
++ dev->base_addr + TxListPtr);
++#endif
++ }
++ if (tx_status & 0x1e) /* Restart the Tx. */
++ writew(TxEnable, ioaddr + MACCtrl1);
++ }
++ /* Yup, this is a documentation bug. It cost me *hours*. */
++ writew(0, ioaddr + TxStatus);
++ if (--txboguscnt < 0)
++ break;
++ tx_status = readw(ioaddr + TxStatus);
++ }
++ }
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % TX_RING_SIZE;
++ if ( ! (np->tx_ring[entry].status & cpu_to_le32(DescTxDMADone)))
++ break;
++ /* Free the original skb. */
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
++ netdev_error(dev, intr_status);
++
++ if (--boguscnt < 0) {
++ int intr_clear = readw(ioaddr + IntrClear);
++ get_stats(dev);
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x / 0x%4.4x .. 0x%4.4x.\n",
++ dev->name, intr_status, intr_clear,
++ (int)readw(ioaddr + IntrClear));
++ /* Re-enable us in 3.2msec. */
++ writew(1000, ioaddr + DownCounter);
++ writew(IntrDrvRqst, ioaddr + IntrEnable);
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readw(ioaddr + IntrStatus));
++
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % RX_RING_SIZE;
++ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
++ entry, np->rx_ring[entry].status);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while (np->rx_head_desc->status & cpu_to_le32(DescOwn)) {
++ struct netdev_desc *desc = np->rx_head_desc;
++ u32 frame_status = le32_to_cpu(desc->status);
++ int pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
++ frame_status);
++ if (--boguscnt < 0)
++ break;
++ if (frame_status & 0x001f4000) {
++ /* There was a error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
++ frame_status);
++ np->stats.rx_errors++;
++ if (frame_status & 0x00100000) np->stats.rx_length_errors++;
++ if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
++ if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
++ if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
++ if (frame_status & 0x00100000) {
++ printk(KERN_WARNING "%s: Oversized Ethernet frame,"
++ " status %8.8x.\n",
++ dev->name, frame_status);
++ }
++ } else {
++ struct sk_buff *skb;
++
++#ifndef final_version
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ ", bogus_cnt %d.\n",
++ pkt_len, boguscnt);
++#endif
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++ } else {
++ skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++ }
++ skb->protocol = eth_type_trans(skb, dev);
++ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ }
++ entry = (++np->cur_rx) % RX_RING_SIZE;
++ np->rx_head_desc = &np->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = np->dirty_rx % RX_RING_SIZE;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
++ np->rx_ring[entry].frag[0].addr = virt_to_le32desc(skb->tail);
++ }
++ /* Perhaps we need not reset this field. */
++ np->rx_ring[entry].frag[0].length =
++ cpu_to_le32(np->rx_buf_sz | LastFrag);
++ np->rx_ring[entry].status = 0;
++ }
++
++ /* No need to restart Rx engine, it will poll. */
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ if (intr_status & IntrDrvRqst) {
++ /* Stop the down counter and turn interrupts back on. */
++ printk(KERN_WARNING "%s: Turning interrupts back on.\n", dev->name);
++ writew(0, ioaddr + DownCounter);
++ writew(IntrRxDMADone | IntrPCIErr | IntrDrvRqst |
++ IntrTxDone | StatsMax | LinkChange, ioaddr + IntrEnable);
++ }
++ if (intr_status & LinkChange) {
++ int new_status = readb(ioaddr + MIICtrl) & 0xE0;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
++ " %4.4x partner %4.4x.\n", dev->name,
++ mdio_read(dev, np->phys[0], 4),
++ mdio_read(dev, np->phys[0], 5));
++ if ((np->link_status ^ new_status) & 0x80) {
++ if (new_status & 0x80)
++ netif_link_up(dev);
++ else
++ netif_link_down(dev);
++ }
++ np->link_status = new_status;
++ check_duplex(dev);
++ }
++ if (intr_status & StatsMax) {
++ get_stats(dev);
++ }
++ if (intr_status & IntrPCIErr) {
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* We must do a global reset of DMA to continue. */
++ }
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ if (readw(ioaddr + StationAddr) == 0xffff)
++ return &np->stats;
++
++ /* We do not spinlock statistics.
++ A window only exists if we have non-atomic adds, the error counts
++ are typically zero, and statistics are non-critical. */
++ np->stats.rx_missed_errors += readb(ioaddr + RxMissed);
++ np->stats.tx_packets += readw(ioaddr + TxFramesOK);
++ np->stats.rx_packets += readw(ioaddr + RxFramesOK);
++ np->stats.collisions += readb(ioaddr + StatsLateColl);
++ np->stats.collisions += readb(ioaddr + StatsMultiColl);
++ np->stats.collisions += readb(ioaddr + StatsOneColl);
++ readb(ioaddr + StatsCarrierError);
++ readb(ioaddr + StatsTxDefer);
++ for (i = StatsTxXSDefer; i <= StatsMcastRx; i++)
++ readb(ioaddr + i);
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
++ np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
++ np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
++ np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
++#else
++ readw(ioaddr + TxOctetsLow);
++ readw(ioaddr + TxOctetsHigh);
++ readw(ioaddr + RxOctetsLow);
++ readw(ioaddr + RxOctetsHigh);
++#endif
++
++ return &np->stats;
++}
++
++/* The little-endian AUTODIN II ethernet CRC calculations.
++ A big-endian version is also available.
++ This is slow but compact code. Do not use this routine for bulk data,
++ use a table-based routine instead.
++ This is common code and should be moved to net/core/crc.c.
++ Chips may use the upper or lower CRC bits, and may reverse and/or invert
++ them. Select the endian-ness that results in minimal calculations.
++*/
++static unsigned const ethernet_polynomial_le = 0xedb88320U;
++static inline unsigned ether_crc_le(int length, unsigned char *data)
++{
++ unsigned int crc = ~0; /* Initial value. */
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 8; --bit >= 0; current_octet >>= 1) {
++ if ((crc ^ current_octet) & 1) {
++ crc >>= 1;
++ crc ^= ethernet_polynomial_le;
++ } else
++ crc >>= 1;
++ }
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u16 mc_filter[4]; /* Multicast hash filter */
++ u32 rx_mode;
++ int i;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ memset(mc_filter, ~0, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ memset(mc_filter, 0xff, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
++ } else if (dev->mc_count) {
++ struct dev_mc_list *mclist;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
++ mc_filter);
++ }
++ rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
++ } else {
++ writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
++ return;
++ }
++ for (i = 0; i < 4; i++)
++ writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
++ writeb(rx_mode, ioaddr + RxMode);
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++static int sundance_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writew(0x0000, ioaddr + IntrEnable);
++ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
++ break;
++ case DRV_RESUME:
++ sundance_start(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ case DRV_PWR_WakeOn:
++ writeb(readb(ioaddr + WakeEvent) | 2, ioaddr + WakeEvent);
++ /* Fall through. */
++ case DRV_PWR_DOWN:
++ case DRV_PWR_UP:
++ acpi_set_pwr_state(np->pci_dev, event==DRV_PWR_UP ? ACPI_D0:ACPI_D3);
++ break;
++ default:
++ return -1;
++ }
++
++ return 0;
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
++ "Rx %4.4x Int %2.2x.\n",
++ dev->name, (int)readw(ioaddr + TxStatus),
++ (int)readl(ioaddr + RxStatus), (int)readw(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writew(0x0000, ioaddr + IntrEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
++
++ del_timer(&np->timer);
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < TX_RING_SIZE; i++)
++ printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
++ i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
++ np->tx_ring[i].frag[0].length);
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
++ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
++ i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
++ np->rx_ring[i].frag[0].length);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < TX_RING_SIZE; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&sundance_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&sundance_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++#ifdef USE_IO_OPS
++ release_region(root_net_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++#else
++ iounmap((char *)root_net_dev->base_addr);
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++
++#endif /* MODULE */
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` sundance.o"
++ * compile-cmd1: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c sundance.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c sundance.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/tulip.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/tulip.c,v
+retrieving revision 1.2
+diff -u -r1.2 tulip.c
+--- linux/src/drivers/net/tulip.c 26 Nov 2000 03:21:57 -0000 1.2
++++ linux/src/drivers/net/tulip.c 20 Aug 2004 10:32:54 -0000
+@@ -1,30 +1,39 @@
+-/* tulip.c: A DEC 21040-family ethernet driver for Linux. */
++/* tulip.c: A DEC 21040 family ethernet driver for Linux. */
+ /*
+- Written/copyright 1994-1999 by Donald Becker.
++ Written/copyright 1994-2003 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for the Digital "Tulip" Ethernet adapter interface.
+ It should work with most DEC 21*4*-based chips/ethercards, as well as
+ with work-alike chips from Lite-On (PNIC) and Macronix (MXIC) and ASIX.
+
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
+
+ Support and updates available at
+- http://cesdis.gsfc.nasa.gov/linux/drivers/tulip.html
+-
+- This driver also contains updates by Wolfgang Walter and others.
+- For this specific driver variant please use linux-kernel for
+- bug reports.
++ http://www.scyld.com/network/tulip.html
+ */
+
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"tulip.c:v0.97 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/tulip.html\n";
++
+ #define SMP_CHECK
+-static const char version[] = "tulip.c:v0.91g-ppc 7/16/99 becker@cesdis.gsfc.nasa.gov\n";
+
+-/* A few user-configurable values. */
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++static int debug = 2; /* Message enable: 0..31 = no..all messages. */
+
+ /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+ static int max_interrupt_work = 25;
+@@ -36,11 +45,14 @@
+ static int mtu[MAX_UNITS] = {0, }; /* Jumbo MTU for interfaces. */
+
+ /* The possible media types that can be set in options[] are: */
+-static const char * const medianame[] = {
++#define MEDIA_MASK 31
++static const char * const medianame[32] = {
+ "10baseT", "10base2", "AUI", "100baseTx",
+- "10baseT-FD", "100baseTx-FD", "100baseT4", "100baseFx",
+- "100baseFx-FD", "MII 10baseT", "MII 10baseT-FD", "MII",
+- "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FD", "MII 100baseT4",
++ "10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
++ "100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
++ "10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
++ "MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
++ "","","","", "","","","", "","","","Transceiver reset",
+ };
+
+ /* Set if the PCI BIOS detects the chips on a multiport board backwards. */
+@@ -50,15 +62,8 @@
+ static int reverse_probe = 0;
+ #endif
+
+-/* Keep the ring sizes a power of two for efficiency.
+- Making the Tx ring too large decreases the effectiveness of channel
+- bonding and packet priority.
+- There are no ill effects from too-large receive rings. */
+-#define TX_RING_SIZE 16
+-#define RX_RING_SIZE 32
+-
+ /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+-#ifdef __alpha__
++#ifdef __alpha__ /* Always copy to aligned IP headers. */
+ static int rx_copybreak = 1518;
+ #else
+ static int rx_copybreak = 100;
+@@ -77,50 +82,85 @@
+ ToDo: Non-Intel setting could be better.
+ */
+
+-#if defined(__alpha__)
++#if defined(__alpha__) || defined(__x86_64) || defined(__ia64)
+ static int csr0 = 0x01A00000 | 0xE000;
+ #elif defined(__i386__) || defined(__powerpc__) || defined(__sparc__)
++/* Do *not* rely on hardware endian correction for big-endian machines! */
+ static int csr0 = 0x01A00000 | 0x8000;
+ #else
+ #warning Processor architecture undefined!
+ static int csr0 = 0x00A00000 | 0x4800;
+ #endif
+
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ Typical is a 64 element hash table based on the Ethernet CRC.
++ This value does not apply to the 512 bit table chips.
++*/
++static int multicast_filter_limit = 32;
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the descriptor ring sizes a power of two for efficiency.
++ The Tx queue length limits transmit packets to a portion of the available
++ ring entries. It should be at least one element less to allow multicast
++ filter setup frames to be queued. It must be at least four for hysteresis.
++ Making the Tx queue too long decreases the effectiveness of channel
++ bonding and packet priority.
++ Large receive rings waste memory and confound network buffer limits.
++ These values have been carefully studied: changing these might mask a
++ problem, it won't fix it.
++*/
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10
++#define RX_RING_SIZE 32
++
+ /* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (4*HZ)
+-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
++#define TX_TIMEOUT (6*HZ)
++/* Preferred skbuff allocation size. */
++#define PKT_BUF_SZ 1536
+ /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+ to support a pre-NWay full-duplex signaling mechanism using short frames.
+ No one knows what it should be, but if left at its default value some
+ 10base2(!) packets trigger a full-duplex-request interrupt. */
+ #define FULL_DUPLEX_MAGIC 0x6969
+
+-#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
++/* The include file section. We start by doing checks and fix-ups for
++ missing compile flags. */
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
+ #warning You must compile this file with the correct options!
+ #warning See the last lines of the source file.
+ #error You must compile this driver with "-O".
+ #endif
+
+ #include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(CONFIG_MODVERSIONS) && defined(MODULE) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
+ #include <linux/version.h>
+-#ifdef MODULE
+-#ifdef MODVERSIONS
++#if defined(MODVERSIONS)
+ #include <linux/modversions.h>
+ #endif
+ #include <linux/module.h>
+-#else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
+-#endif
++
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+ #include <linux/netdevice.h>
+@@ -131,12 +171,23 @@
+ #include <asm/io.h>
+ #include <asm/unaligned.h>
+
+-/* Kernel compatibility defines, some common to David Hind's PCMCIA package.
+- This is only in the support-all-kernels source code. */
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
+
+-#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+ MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
++MODULE_LICENSE("GPL");
+ MODULE_PARM(debug, "i");
+ MODULE_PARM(max_interrupt_work, "i");
+ MODULE_PARM(reverse_probe, "i");
+@@ -144,54 +195,42 @@
+ MODULE_PARM(csr0, "i");
+ MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+ MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+-#endif
+-
+-#define RUN_AT(x) (jiffies + (x))
+-
+-#if (LINUX_VERSION_CODE >= 0x20100)
+-static char kernel_version[] = UTS_RELEASE;
+-#endif
+-
+-#if LINUX_VERSION_CODE < 0x20123
+-#define hard_smp_processor_id() smp_processor_id()
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
+-#define le16_to_cpu(val) (val)
+-#define le32_to_cpu(val) (val)
+-#define cpu_to_le32(val) (val)
+-#endif
+-#if LINUX_VERSION_CODE <= 0x20139
+-#define net_device_stats enet_statistics
+-#else
+-#define NETSTATS_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20155
+-/* Grrrr, the PCI code changed, but did not consider CardBus... */
+-#include <linux/bios32.h>
+-#define PCI_SUPPORT_VER1
+-#else
+-#define PCI_SUPPORT_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20159
+-#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
+-#else
+-#define dev_free_skb(skb) dev_kfree_skb(skb);
+-#endif
+-#if ! defined(CAP_NET_ADMIN)
+-#define capable(CAP_XXX) (suser())
+-#endif
+-#if ! defined(HAS_NETIF_QUEUE)
+-#define netif_wake_queue(dev) mark_bh(NET_BH);
+-#endif
+-
+-/* Condensed operations for readability. */
+-#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+-#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+-
+-#define tulip_debug debug
+-#ifdef TULIP_DEBUG
+-static int tulip_debug = TULIP_DEBUG;
+-#else
+-static int tulip_debug = 1;
++MODULE_PARM(multicast_filter_limit, "i");
++#ifdef MODULE_PARM_DESC
++MODULE_PARM_DESC(debug, "Tulip driver message level (0-31)");
++MODULE_PARM_DESC(options,
++ "Tulip: force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Tulip driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex, "Tulip: non-zero to set forced full duplex.");
++MODULE_PARM_DESC(rx_copybreak,
++ "Tulip breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Tulip breakpoint for switching to Rx-all-multicast");
++MODULE_PARM_DESC(reverse_probe, "Search PCI devices in reverse order to work "
++ "around misordered multiport NICS.");
++MODULE_PARM_DESC(csr0, "Special setting for the CSR0 PCI bus parameter "
++ "register.");
++#endif
++
++/* This driver was originally written to use I/O space access, but now
++ uses memory space by default. Override this this with -DUSE_IO_OPS. */
++#if (LINUX_VERSION_CODE < 0x20100) || ! defined(MODULE)
++#define USE_IO_OPS
++#endif
++#ifndef USE_IO_OPS
++#undef inb
++#undef inw
++#undef inl
++#undef outb
++#undef outw
++#undef outl
++#define inb readb
++#define inw readw
++#define inl readl
++#define outb writeb
++#define outw writew
++#define outl writel
+ #endif
+
+ /*
+@@ -203,7 +242,7 @@
+ single-chip ethernet controllers for PCI. Supported members of the family
+ are the 21040, 21041, 21140, 21140A, 21142, and 21143. Similar work-alike
+ chips from Lite-On, Macronics, ASIX, Compex and other listed below are also
+-supported.
++supported.
+
+ These chips are used on at least 140 unique PCI board designs. The great
+ number of chips and board designs supported is the reason for the
+@@ -221,7 +260,7 @@
+ is usually "autoselect". This should only be overridden when using
+ transceiver connections without link beat e.g. 10base2 or AUI, or (rarely!)
+ for forcing full-duplex when used with old link partners that do not do
+-autonegotiation.
++autonegotiation.
+
+ III. Driver operation
+
+@@ -244,7 +283,7 @@
+ information). For large frames the copying cost is non-trivial, and the
+ larger copy might flush the cache of useful data. A subtle aspect of this
+ choice is that the Tulip only receives into longword aligned buffers, thus
+-the IP header at offset 14 isn't longword aligned for further processing.
++the IP header at offset 14 is not longword aligned for further processing.
+ Copied frames are put into the new skbuff at an offset of "+2", thus copying
+ has the beneficial effect of aligning the IP header and preloading the
+ cache.
+@@ -256,13 +295,13 @@
+ threaded by the hardware and other software.
+
+ The send packet thread has partial control over the Tx ring and 'dev->tbusy'
+-flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
++flag. It sets the tbusy flag whenever it is queuing a Tx packet. If the next
+ queue slot is empty, it clears the tbusy flag when finished otherwise it sets
+ the 'tp->tx_full' flag.
+
+ The interrupt handler has exclusive control over the Rx ring and records stats
+-from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
+-we can't avoid the interrupt overhead by having the Tx routine reap the Tx
++from the Tx ring. (The Tx-done interrupt can not be selectively turned off, so
++we cannot avoid the interrupt overhead by having the Tx routine reap the Tx
+ stats.) After reaping the stats, it marks the queue entry as empty by setting
+ the 'base' to zero. Iff the 'tp->tx_full' flag is set, it clears both the
+ tx_full and tbusy flags.
+@@ -275,7 +314,7 @@
+
+ IVb. References
+
+-http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
++http://scyld.com/expert/NWay.html
+ http://www.digital.com (search for current 21*4* datasheets and "21X4 SROM")
+ http://www.national.com/pf/DP/DP83840A.html
+ http://www.asix.com.tw/pmac.htm
+@@ -290,73 +329,138 @@
+ The DEC SROM format is very badly designed not precisely defined, leading to
+ part of the media selection junkheap below. Some boards do not have EEPROM
+ media tables and need to be patched up. Worse, other boards use the DEC
+-design kit media table when it isn't correct for their board.
++design kit media table when it is not correct for their design.
+
+ We cannot use MII interrupts because there is no defined GPIO pin to attach
+ them. The MII transceiver status is polled using an kernel timer.
+
+ */
+
+-static struct device *
+-tulip_probe1(int pci_bus, int pci_devfn, struct device *dev, long ioaddr,
+- int irq, int chip_idx, int board_idx);
+-
+-/* This table drives the PCI probe routines. It's mostly boilerplate in all
+- of the drivers, and will likely be provided by some future kernel.
+- Note the matching code -- the first table entry matchs all 56** cards but
+- second only the 1234 card.
+-*/
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-#define PCI_ADDR0_IO (PCI_USES_IO|PCI_ADDR0)
++static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int tulip_pwr_event(void *dev_instance, int event);
++
++#ifdef USE_IO_OPS
++#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0
++#define TULIP_SIZE 0x80
++#define TULIP_SIZE1 0x100
++#else
++#define TULIP_IOTYPE PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1
++#define TULIP_SIZE 0x400 /* New PCI v2.1 recommends 4K min mem size. */
++#define TULIP_SIZE1 0x400 /* New PCI v2.1 recommends 4K min mem size. */
++#endif
+
+-struct pci_id_info {
+- const char *name;
+- u16 vendor_id, device_id, device_id_mask, flags;
+- int io_size, min_latency;
+- struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
+- long ioaddr, int irq, int chip_idx, int fnd_cnt);
++/* This much match tulip_tbl[]! Note 21142 == 21143. */
++enum tulip_chips {
++ DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
++ LC82C168, MX98713, MX98715, MX98725, AX88141, AX88140, PNIC2, COMET,
++ COMPEX9881, I21145, XIRCOM, CONEXANT,
++ /* These flags may be added to the chip type. */
++ HAS_VLAN=0x100,
+ };
+-#ifndef CARDBUS
+-static struct pci_id_info pci_tbl[] = {
+- { "Digital DC21040 Tulip",
+- 0x1011, 0x0002, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Digital DC21041 Tulip",
+- 0x1011, 0x0014, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Digital DS21140 Tulip",
+- 0x1011, 0x0009, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Digital DS21143 Tulip",
+- 0x1011, 0x0019, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Lite-On 82c168 PNIC",
+- 0x11AD, 0x0002, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "Macronix 98713 PMAC",
+- 0x10d9, 0x0512, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "Macronix 98715 PMAC",
+- 0x10d9, 0x0531, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "Macronix 98725 PMAC",
+- 0x10d9, 0x0531, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "ASIX AX88140",
+- 0x125B, 0x1400, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Lite-On LC82C115 PNIC-II",
+- 0x11AD, 0xc115, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "ADMtek AN981 Comet",
+- 0x1317, 0x0981, 0xffff, PCI_ADDR0_IO, 256, 32, tulip_probe1 },
+- { "Compex RL100-TX",
+- 0x11F6, 0x9881, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Intel 21145 Tulip",
+- 0x8086, 0x0039, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- { "Xircom Tulip clone",
+- 0x115d, 0x0003, 0xffff, PCI_ADDR0_IO, 128, 32, tulip_probe1 },
+- {0},
++
++static struct pci_id_info pci_id_tbl[] = {
++ { "Digital DC21040 Tulip", { 0x00021011, 0xffffffff },
++ TULIP_IOTYPE, 0x80, DC21040 },
++ { "Digital DC21041 Tulip", { 0x00141011, 0xffffffff },
++ TULIP_IOTYPE, 0x80, DC21041 },
++ { "Digital DS21140A Tulip", { 0x00091011, 0xffffffff, 0,0, 0x20,0xf0 },
++ TULIP_IOTYPE, 0x80, DC21140 },
++ { "Digital DS21140 Tulip", { 0x00091011, 0xffffffff },
++ TULIP_IOTYPE, 0x80, DC21140 },
++ { "Digital DS21143-xD Tulip", { 0x00191011, 0xffffffff, 0,0, 0x40,0xf0 },
++ TULIP_IOTYPE, TULIP_SIZE, DC21142 | HAS_VLAN },
++ { "Digital DS21143-xC Tulip", { 0x00191011, 0xffffffff, 0,0, 0x30,0xf0 },
++ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
++ { "Digital DS21142 Tulip", { 0x00191011, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE, DC21142 },
++ { "Kingston KNE110tx (PNIC)",
++ { 0x000211AD, 0xffffffff, 0xf0022646, 0xffffffff },
++ TULIP_IOTYPE, 256, LC82C168 },
++ { "Linksys LNE100TX (82c168 PNIC)", /* w/SYM */
++ { 0x000211AD, 0xffffffff, 0xffff11ad, 0xffffffff, 17,0xff },
++ TULIP_IOTYPE, 256, LC82C168 },
++ { "Linksys LNE100TX (82c169 PNIC)", /* w/ MII */
++ { 0x000211AD, 0xffffffff, 0xf00311ad, 0xffffffff, 32,0xff },
++ TULIP_IOTYPE, 256, LC82C168 },
++ { "Lite-On 82c168 PNIC", { 0x000211AD, 0xffffffff },
++ TULIP_IOTYPE, 256, LC82C168 },
++ { "Macronix 98713 PMAC", { 0x051210d9, 0xffffffff },
++ TULIP_IOTYPE, 256, MX98713 },
++ { "Macronix 98715 PMAC", { 0x053110d9, 0xffffffff },
++ TULIP_IOTYPE, 256, MX98715 },
++ { "Macronix 98725 PMAC", { 0x053110d9, 0xffffffff },
++ TULIP_IOTYPE, 256, MX98725 },
++ { "ASIX AX88141", { 0x1400125B, 0xffffffff, 0,0, 0x10, 0xf0 },
++ TULIP_IOTYPE, 128, AX88141 },
++ { "ASIX AX88140", { 0x1400125B, 0xffffffff },
++ TULIP_IOTYPE, 128, AX88140 },
++ { "Lite-On LC82C115 PNIC-II", { 0xc11511AD, 0xffffffff },
++ TULIP_IOTYPE, 256, PNIC2 },
++ { "ADMtek AN981 Comet", { 0x09811317, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-P", { 0x09851317, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C", { 0x19851317, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "D-Link DFE-680TXD v1.0 (ADMtek Centaur-C)", { 0x15411186, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C (Linksys v2)", { 0xab0213d1, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C (Linksys)", { 0xab0313d1, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C (Linksys)", { 0xab0813d1, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab081737, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Centaur-C (Linksys PCM200 v3)", { 0xab091737, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "STMicro STE10/100 Comet", { 0x0981104a, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "STMicro STE10/100A Comet", { 0x2774104a, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Comet-II", { 0x95111317, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Comet-II (9513)", { 0x95131317, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "SMC1255TX (ADMtek Comet)",
++ { 0x12161113, 0xffffffff, 0x125510b8, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "Accton EN1217/EN2242 (ADMtek Comet)", { 0x12161113, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "SMC1255TX (ADMtek Comet-II)", { 0x125510b8, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "ADMtek Comet-II (model 1020)", { 0x1020111a, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "Allied Telesyn A120 (ADMtek Comet)", { 0xa1201259, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { "Compex RL100-TX", { 0x988111F6, 0xffffffff },
++ TULIP_IOTYPE, 128, COMPEX9881 },
++ { "Intel 21145 Tulip", { 0x00398086, 0xffffffff },
++ TULIP_IOTYPE, 128, I21145 },
++ { "Xircom Tulip clone", { 0x0003115d, 0xffffffff },
++ TULIP_IOTYPE, 128, XIRCOM },
++ { "Davicom DM9102", { 0x91021282, 0xffffffff },
++ TULIP_IOTYPE, 0x80, DC21140 },
++ { "Davicom DM9100", { 0x91001282, 0xffffffff },
++ TULIP_IOTYPE, 0x80, DC21140 },
++ { "Macronix mxic-98715 (EN1217)", { 0x12171113, 0xffffffff },
++ TULIP_IOTYPE, 256, MX98715 },
++ { "Conexant LANfinity", { 0x180314f1, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, CONEXANT },
++ { "3Com 3cSOHO100B-TX (ADMtek Centaur)", { 0x930010b7, 0xffffffff },
++ TULIP_IOTYPE, TULIP_SIZE1, COMET },
++ { 0},
+ };
+-#endif /* !CARD_BUS */
+
+-/* This table use during operation for capabilities and media timer. */
++struct drv_id_info tulip_drv_id = {
++ "tulip", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ tulip_probe1, tulip_pwr_event };
++
++/* This table is used during operation for capabilities and media timer. */
+
+ static void tulip_timer(unsigned long data);
+-static void t21142_timer(unsigned long data);
++static void nway_timer(unsigned long data);
+ static void mxic_timer(unsigned long data);
+ static void pnic_timer(unsigned long data);
+ static void comet_timer(unsigned long data);
+@@ -364,23 +468,27 @@
+ enum tbl_flag {
+ HAS_MII=1, HAS_MEDIA_TABLE=2, CSR12_IN_SROM=4, ALWAYS_CHECK_MII=8,
+ HAS_PWRDWN=0x10, MC_HASH_ONLY=0x20, /* Hash-only multicast filter. */
+- HAS_PNICNWAY=0x80, HAS_NWAY143=0x40, /* Uses internal NWay xcvr. */
+- HAS_8023X=0x100,
++ HAS_PNICNWAY=0x80, HAS_NWAY=0x40, /* Uses internal NWay xcvr. */
++ HAS_INTR_MITIGATION=0x100, IS_ASIX=0x200, HAS_8023X=0x400,
++ COMET_MAC_ADDR=0x0800,
+ };
++
++/* Note: this table must match enum tulip_chips above. */
+ static struct tulip_chip_table {
+ char *chip_name;
+- int io_size;
++ int io_size; /* Unused */
+ int valid_intrs; /* CSR7 interrupt enable settings */
+ int flags;
+ void (*media_timer)(unsigned long data);
+ } tulip_tbl[] = {
+ { "Digital DC21040 Tulip", 128, 0x0001ebef, 0, tulip_timer },
+- { "Digital DC21041 Tulip", 128, 0x0001ebff, HAS_MEDIA_TABLE, tulip_timer },
++ { "Digital DC21041 Tulip", 128, 0x0001ebff,
++ HAS_MEDIA_TABLE | HAS_NWAY, tulip_timer },
+ { "Digital DS21140 Tulip", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, tulip_timer },
+ { "Digital DS21143 Tulip", 128, 0x0801fbff,
+- HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY143,
+- t21142_timer },
++ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY
++ | HAS_INTR_MITIGATION, nway_timer },
+ { "Lite-On 82c168 PNIC", 256, 0x0001ebef,
+ HAS_MII | HAS_PNICNWAY, pnic_timer },
+ { "Macronix 98713 PMAC", 128, 0x0001ebef,
+@@ -390,38 +498,37 @@
+ { "Macronix 98725 PMAC", 256, 0x0001ebef,
+ HAS_MEDIA_TABLE, mxic_timer },
+ { "ASIX AX88140", 128, 0x0001fbff,
+- HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY, tulip_timer },
++ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
++ { "ASIX AX88141", 128, 0x0001fbff,
++ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX, tulip_timer },
+ { "Lite-On PNIC-II", 256, 0x0801fbff,
+- HAS_MII | HAS_NWAY143 | HAS_8023X, t21142_timer },
++ HAS_MII | HAS_NWAY | HAS_8023X, nway_timer },
+ { "ADMtek Comet", 256, 0x0001abef,
+- MC_HASH_ONLY, comet_timer },
++ HAS_MII | MC_HASH_ONLY | COMET_MAC_ADDR, comet_timer },
+ { "Compex 9881 PMAC", 128, 0x0001ebef,
+ HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM, mxic_timer },
+ { "Intel DS21145 Tulip", 128, 0x0801fbff,
+- HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY143,
+- t21142_timer },
++ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
++ nway_timer },
+ { "Xircom tulip work-alike", 128, 0x0801fbff,
+- HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY143,
+- t21142_timer },
++ HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_PWRDWN | HAS_NWAY,
++ nway_timer },
++ { "Conexant LANfinity", 256, 0x0001ebef,
++ HAS_MII | HAS_PWRDWN, tulip_timer },
+ {0},
+ };
+-/* This matches the table above. Note 21142 == 21143. */
+-enum chips {
+- DC21040=0, DC21041=1, DC21140=2, DC21142=3, DC21143=3,
+- LC82C168, MX98713, MX98715, MX98725, AX88140, PNIC2, COMET, COMPEX9881,
+- I21145,
+-};
+
+ /* A full-duplex map for media types. */
+ enum MediaIs {
+ MediaIsFD = 1, MediaAlwaysFD=2, MediaIsMII=4, MediaIsFx=8,
+ MediaIs100=16};
+-static const char media_cap[] =
+-{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20 };
++static const char media_cap[32] =
++{0,0,0,16, 3,19,16,24, 27,4,7,5, 0,20,23,20, 28,31,0,0, };
+ static u8 t21040_csr13[] = {2,0x0C,8,4, 4,0,0,0, 0,0,0,0, 4,0,0,0};
++
+ /* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+-static u16 t21041_csr13[] = { 0xEF05, 0xEF0D, 0xEF0D, 0xEF05, 0xEF05, };
+-static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
++static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
++static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x6F3F, 0x6F3D, };
+ static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+ static u16 t21142_csr13[] = { 0x0001, 0x0009, 0x0009, 0x0000, 0x0001, };
+@@ -437,12 +544,20 @@
+
+ /* The bits in the CSR5 status registers, mostly interrupt sources. */
+ enum status_bits {
+- TimerInt=0x800, SytemError=0x2000, TPLnkFail=0x1000, TPLnkPass=0x10,
+- NormalIntr=0x10000, AbnormalIntr=0x8000,
+- RxJabber=0x200, RxDied=0x100, RxNoBuf=0x80, RxIntr=0x40,
++ TimerInt=0x800, TPLnkFail=0x1000, TPLnkPass=0x10,
++ NormalIntr=0x10000, AbnormalIntr=0x8000, PCIBusError=0x2000,
++ RxJabber=0x200, RxStopped=0x100, RxNoBuf=0x80, RxIntr=0x40,
+ TxFIFOUnderflow=0x20, TxJabber=0x08, TxNoBuf=0x04, TxDied=0x02, TxIntr=0x01,
+ };
+
++/* The configuration bits in CSR6. */
++enum csr6_mode_bits {
++ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
++ AcceptBroadcast=0x0100, AcceptAllMulticast=0x0080,
++ AcceptAllPhys=0x0040, AcceptRunt=0x0008,
++};
++
++
+ /* The Tulip Rx and Tx buffer descriptors. */
+ struct tulip_rx_desc {
+ s32 status;
+@@ -470,7 +585,7 @@
+ */
+ #define DESC_RING_WRAP 0x02000000
+
+-#define EEPROM_SIZE 128 /* 2 << EEPROM_ADDRLEN */
++#define EEPROM_SIZE 512 /* support 256*16 EEPROMs */
+
+ struct medialeaf {
+ u8 type;
+@@ -493,239 +608,147 @@
+ unsigned char *info;
+ };
+
++#define PRIV_ALIGN 15 /* Required alignment mask */
+ struct tulip_private {
+- char devname[8]; /* Used only for kernel debugging. */
+- const char *product_name;
+- struct device *next_module;
+ struct tulip_rx_desc rx_ring[RX_RING_SIZE];
+ struct tulip_tx_desc tx_ring[TX_RING_SIZE];
+- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
++ /* The saved addresses of Rx/Tx-in-place packet buffers. */
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+- /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+- char *rx_buffs; /* Address of temporary Rx buffers. */
++ struct net_device *next_module;
++ void *priv_addr; /* Unaligned address of dev->priv for kfree */
++ /* Multicast filter control. */
+ u16 setup_frame[96]; /* Pseudo-Tx frame to init address table. */
+- int chip_id;
+- int revision;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ int multicast_filter_limit;
++ struct pci_dev *pci_dev;
++ int chip_id, revision;
+ int flags;
++ int max_interrupt_work;
++ int msg_level;
++ unsigned int csr0, csr6; /* Current CSR0, CSR6 settings. */
++ /* Note: cache line pairing and isolation of Rx vs. Tx indicies. */
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++ unsigned int rx_dead:1; /* We have no Rx buffers. */
++
+ struct net_device_stats stats;
+- struct timer_list timer; /* Media selection timer. */
+- int interrupt; /* In-interrupt flag. */
+- unsigned int cur_rx, cur_tx; /* The next free ring entry */
+- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
++ unsigned int cur_tx, dirty_tx;
+ unsigned int tx_full:1; /* The Tx queue is full. */
++
++ /* Media selection state. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int full_duplex_lock:1;
+ unsigned int fake_addr:1; /* Multiport board faked address. */
+- unsigned int default_port:4; /* Last dev->if_port value. */
+ unsigned int media2:4; /* Secondary monitored media port. */
+- unsigned int medialock:1; /* Don't sense media type. */
++ unsigned int medialock:1; /* Do not sense media type. */
+ unsigned int mediasense:1; /* Media sensing in progress. */
+ unsigned int nway:1, nwayset:1; /* 21143 internal NWay. */
+- unsigned int csr0; /* CSR0 setting. */
+- unsigned int csr6; /* Current CSR6 control settings. */
++ unsigned int default_port; /* Last dev->if_port value. */
+ unsigned char eeprom[EEPROM_SIZE]; /* Serial EEPROM contents. */
+- void (*link_change)(struct device *dev, int csr5);
+- u16 to_advertise; /* NWay capabilities advertised. */
++ struct timer_list timer; /* Media selection timer. */
++ void (*link_change)(struct net_device *dev, int csr5);
+ u16 lpar; /* 21143 Link partner ability. */
+- u16 advertising[4];
++ u16 sym_advertise, mii_advertise; /* NWay to-advertise. */
++ u16 advertising[4]; /* MII advertise, from SROM table. */
+ signed char phys[4], mii_cnt; /* MII device addresses. */
++ spinlock_t mii_lock;
+ struct mediatable *mtable;
+ int cur_index; /* Current media index. */
+ int saved_if_port;
+- unsigned char pci_bus, pci_devfn;
+- int ttimer;
+- int susp_rx;
+- unsigned long nir;
+- int pad0, pad1; /* Used for 8-byte alignment */
+ };
+
+-static void parse_eeprom(struct device *dev);
++static void start_link(struct net_device *dev);
++static void parse_eeprom(struct net_device *dev);
+ static int read_eeprom(long ioaddr, int location, int addr_len);
+-static int mdio_read(struct device *dev, int phy_id, int location);
+-static void mdio_write(struct device *dev, int phy_id, int location, int value);
+-static void select_media(struct device *dev, int startup);
+-static int tulip_open(struct device *dev);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
++static int tulip_open(struct net_device *dev);
+ /* Chip-specific media selection (timer functions prototyped above). */
+-static void t21142_lnk_change(struct device *dev, int csr5);
+-static void t21142_start_nway(struct device *dev);
+-static void pnic_lnk_change(struct device *dev, int csr5);
+-static void pnic_do_nway(struct device *dev);
+-
+-static void tulip_tx_timeout(struct device *dev);
+-static void tulip_init_ring(struct device *dev);
+-static int tulip_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int tulip_refill_rx(struct device *dev);
+-static int tulip_rx(struct device *dev);
++static int check_duplex(struct net_device *dev);
++static void select_media(struct net_device *dev, int startup);
++static void init_media(struct net_device *dev);
++static void nway_lnk_change(struct net_device *dev, int csr5);
++static void nway_start(struct net_device *dev);
++static void pnic_lnk_change(struct net_device *dev, int csr5);
++static void pnic_do_nway(struct net_device *dev);
++
++static void tulip_tx_timeout(struct net_device *dev);
++static void tulip_init_ring(struct net_device *dev);
++static int tulip_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static int tulip_rx(struct net_device *dev);
+ static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+-static int tulip_close(struct device *dev);
+-static struct net_device_stats *tulip_get_stats(struct device *dev);
++static int tulip_close(struct net_device *dev);
++static struct net_device_stats *tulip_get_stats(struct net_device *dev);
+ #ifdef HAVE_PRIVATE_IOCTL
+-static int private_ioctl(struct device *dev, struct ifreq *rq, int cmd);
++static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ #endif
+-static void set_rx_mode(struct device *dev);
++static void set_rx_mode(struct net_device *dev);
+
+
+
+ /* A list of all installed Tulip devices. */
+-static struct device *root_tulip_dev = NULL;
+-
+-#ifndef CARDBUS
+-int tulip_probe(struct device *dev)
+-{
+- int cards_found = 0;
+- int pci_index = 0;
+- unsigned char pci_bus, pci_device_fn;
+-
+- if ( ! pcibios_present())
+- return -ENODEV;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- u16 vendor, device, pci_command, new_command, subvendor;
+- int chip_idx;
+- int irq;
+- long ioaddr;
+-
+- if (pcibios_find_class
+- (PCI_CLASS_NETWORK_ETHERNET << 8,
+- reverse_probe ? 0xfe - pci_index : pci_index,
+- &pci_bus, &pci_device_fn) != PCIBIOS_SUCCESSFUL) {
+- if (reverse_probe)
+- continue;
+- else
+- break;
+- }
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_SUBSYSTEM_VENDOR_ID, &subvendor);
+-
+- if( subvendor == 0x1376 ){
+- printk("tulip: skipping LMC card.\n");
+- continue;
+- }
+-
+- for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor == pci_tbl[chip_idx].vendor_id
+- && (device & pci_tbl[chip_idx].device_id_mask) ==
+- pci_tbl[chip_idx].device_id)
+- break;
+- if (pci_tbl[chip_idx].vendor_id == 0)
+- continue;
+-
+- {
+-#if defined(PCI_SUPPORT_VER2)
+- struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
+- ioaddr = pdev->base_address[0] & ~3;
+- irq = pdev->irq;
+-#else
+- u32 pci_ioaddr;
+- u8 pci_irq_line;
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- ioaddr = pci_ioaddr & ~3;
+- irq = pci_irq_line;
+-#endif
+- }
+-
+- if (debug > 2)
+- printk(KERN_INFO "Found %s at PCI I/O address %#lx.\n",
+- pci_tbl[chip_idx].name, ioaddr);
+-
+- if (check_region(ioaddr, pci_tbl[chip_idx].io_size))
+- continue;
+-
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled the"
+- " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+- pci_bus, pci_device_fn, pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+- dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, dev, ioaddr,
+- irq, chip_idx, cards_found);
+-
+- /* Get and check the bus-master and latency values. */
+- if (dev) {
+- u8 pci_latency;
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < 10) {
+- printk(KERN_INFO " PCI latency timer (CFLT) is "
+- "unreasonably low at %d. Setting to 64 clocks.\n",
+- pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, 64);
+- }
+- }
+- dev = 0;
+- cards_found++;
+- }
+-
+- return cards_found ? 0 : -ENODEV;
+-}
+-#endif /* not CARDBUS */
++static struct net_device *root_tulip_dev = NULL;
+
+-static struct device *tulip_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, long ioaddr, int irq,
+- int chip_idx, int board_idx)
++static void *tulip_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int pci_tbl_idx, int find_cnt)
+ {
+- static int did_version = 0; /* Already printed version info. */
++ struct net_device *dev;
+ struct tulip_private *tp;
++ void *priv_mem;
+ /* See note below on the multiport cards. */
+- static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
++ static unsigned char last_phys_addr[6] = {0x02, 'L', 'i', 'n', 'u', 'x'};
+ static int last_irq = 0;
+ static int multiport_cnt = 0; /* For four-port boards w/one EEPROM */
+ u8 chip_rev;
+- int i;
++ int i, chip_idx = pci_id_tbl[pci_tbl_idx].drv_flags & 0xff;
+ unsigned short sum;
+ u8 ee_data[EEPROM_SIZE];
+
+- if (tulip_debug > 0 && did_version++ == 0)
+- printk(KERN_INFO "%s", version);
++ /* Bring the 21041/21143 out of sleep mode.
++ Caution: Snooze mode does not work with some boards! */
++ if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
++ pci_write_config_dword(pdev, 0x40, 0x00000000);
++
++ if (inl(ioaddr + CSR5) == 0xffffffff) {
++ printk(KERN_ERR "The Tulip chip at %#lx is not functioning.\n", ioaddr);
++ return 0;
++ }
+
+- dev = init_etherdev(dev, 0);
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
+
+ /* Make certain the data structures are quadword aligned. */
+- tp = (void *)(((long)kmalloc(sizeof(*tp), GFP_KERNEL | GFP_DMA) + 7) & ~7);
++ priv_mem = kmalloc(sizeof(*tp) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++ dev->priv = tp = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(tp, 0, sizeof(*tp));
+- dev->priv = tp;
++ tp->mii_lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
++ tp->priv_addr = priv_mem;
+
+ tp->next_module = root_tulip_dev;
+ root_tulip_dev = dev;
+
+- pcibios_read_config_byte(pci_bus, pci_devfn, PCI_REVISION_ID, &chip_rev);
+-
+- /* Bring the 21041/21143 out of sleep mode.
+- Caution: Snooze mode does not work with some boards! */
+- if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
+- pcibios_write_config_dword(pci_bus, pci_devfn, 0x40, 0x00000000);
++ pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
+
+ printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+- dev->name, tulip_tbl[chip_idx].chip_name, chip_rev, ioaddr);
++ dev->name, pci_id_tbl[pci_tbl_idx].name, chip_rev, ioaddr);
+
+- /* Stop the chip's Tx and Rx processes. */
+- outl(inl(ioaddr + CSR6) & ~0x2002, ioaddr + CSR6);
++ /* Stop the Tx and Rx processes. */
++ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
+ /* Clear the missed-packet counter. */
+- (volatile int)inl(ioaddr + CSR8);
++ inl(ioaddr + CSR8);
+
+ if (chip_idx == DC21041 && inl(ioaddr + CSR9) & 0x8000) {
+ printk(" 21040 compatible mode,");
+ chip_idx = DC21040;
+ }
+
+- /* The station address ROM is read byte serially. The register must
+- be polled, waiting for the value to be read bit serially from the
+- EEPROM.
+- */
++ /* The SROM/EEPROM interface varies dramatically. */
+ sum = 0;
+ if (chip_idx == DC21040) {
+ outl(0, ioaddr + CSR9); /* Reset the pointer with a dummy write. */
+@@ -749,16 +772,18 @@
+ }
+ } else if (chip_idx == COMET) {
+ /* No need to read the EEPROM. */
+- put_unaligned(inl(ioaddr + 0xA4), (u32 *)dev->dev_addr);
+- put_unaligned(inl(ioaddr + 0xA8), (u16 *)(dev->dev_addr + 4));
++ put_unaligned(le32_to_cpu(inl(ioaddr + 0xA4)), (u32 *)dev->dev_addr);
++ put_unaligned(le16_to_cpu(inl(ioaddr + 0xA8)),
++ (u16 *)(dev->dev_addr + 4));
+ for (i = 0; i < 6; i ++)
+ sum += dev->dev_addr[i];
+ } else {
+ /* A serial EEPROM interface, we read now and sort it out later. */
+ int sa_offset = 0;
+ int ee_addr_size = read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
++ int eeprom_word_cnt = 1 << ee_addr_size;
+
+- for (i = 0; i < sizeof(ee_data)/2; i++)
++ for (i = 0; i < eeprom_word_cnt; i++)
+ ((u16 *)ee_data)[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i, ee_addr_size));
+
+@@ -768,7 +793,12 @@
+ for (i = 0; i < 8; i ++)
+ if (ee_data[i] != ee_data[16+i])
+ sa_offset = 20;
+- if (ee_data[0] == 0xff && ee_data[1] == 0xff && ee_data[2] == 0) {
++ if (chip_idx == CONEXANT) {
++ /* Check that the tuple type and length is correct. */
++ if (ee_data[0x198] == 0x04 && ee_data[0x199] == 6)
++ sa_offset = 0x19A;
++ } else if (ee_data[0] == 0xff && ee_data[1] == 0xff &&
++ ee_data[2] == 0) {
+ sa_offset = 2; /* Grrr, damn Matrox boards. */
+ multiport_cnt = 4;
+ }
+@@ -806,27 +836,33 @@
+ printk(", IRQ %d.\n", irq);
+ last_irq = irq;
+
+- /* We do a request_region() only to register /proc/ioports info. */
+- /* Note that proper size is tulip_tbl[chip_idx].chip_name, but... */
+- request_region(ioaddr, tulip_tbl[chip_idx].io_size, dev->name);
++#ifdef USE_IO_OPS
++ /* We do a request_region() to register /proc/ioports info. */
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++#endif
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+- tp->pci_bus = pci_bus;
+- tp->pci_devfn = pci_devfn;
++ tp->pci_dev = pdev;
++ tp->msg_level = (1 << debug) - 1;
+ tp->chip_id = chip_idx;
+ tp->revision = chip_rev;
+- tp->flags = tulip_tbl[chip_idx].flags;
++ tp->flags = tulip_tbl[chip_idx].flags
++ | (pci_id_tbl[pci_tbl_idx].drv_flags & 0xffffff00);
++ tp->rx_copybreak = rx_copybreak;
++ tp->max_interrupt_work = max_interrupt_work;
++ tp->multicast_filter_limit = multicast_filter_limit;
+ tp->csr0 = csr0;
+
+ /* BugFixes: The 21143-TD hangs with PCI Write-and-Invalidate cycles.
+ And the ASIX must have a burst limit or horrible things happen. */
+ if (chip_idx == DC21143 && chip_rev == 65)
+ tp->csr0 &= ~0x01000000;
+- else if (chip_idx == AX88140)
++ else if (tp->flags & IS_ASIX)
+ tp->csr0 |= 0x2000;
+
++ /* We support a zillion ways to set the media type. */
+ #ifdef TULIP_FULL_DUPLEX
+ tp->full_duplex = 1;
+ tp->full_duplex_lock = 1;
+@@ -839,16 +875,19 @@
+ #endif
+
+ /* The lower four bits are the media type. */
+- if (board_idx >= 0 && board_idx < MAX_UNITS) {
+- tp->default_port = options[board_idx] & 15;
+- if ((options[board_idx] & 0x90) || full_duplex[board_idx] > 0)
++ if (find_cnt >= 0 && find_cnt < MAX_UNITS) {
++ if (options[find_cnt] & 0x1f)
++ tp->default_port = options[find_cnt] & 0x1f;
++ if ((options[find_cnt] & 0x200) || full_duplex[find_cnt] > 0)
+ tp->full_duplex = 1;
+- if (mtu[board_idx] > 0)
+- dev->mtu = mtu[board_idx];
++ if (mtu[find_cnt] > 0)
++ dev->mtu = mtu[find_cnt];
+ }
+ if (dev->mem_start)
+- tp->default_port = dev->mem_start;
++ tp->default_port = dev->mem_start & 0x1f;
+ if (tp->default_port) {
++ printk(KERN_INFO "%s: Transceiver selection forced to %s.\n",
++ dev->name, medianame[tp->default_port & MEDIA_MASK]);
+ tp->medialock = 1;
+ if (media_cap[tp->default_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+@@ -858,11 +897,9 @@
+
+ if (media_cap[tp->default_port] & MediaIsMII) {
+ u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+- tp->to_advertise = media2advert[tp->default_port - 9];
+- } else if (tp->flags & HAS_8023X)
+- tp->to_advertise = 0x05e1;
+- else
+- tp->to_advertise = 0x01e1;
++ tp->mii_advertise = media2advert[tp->default_port - 9];
++ tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
++ }
+
+ /* This is logically part of probe1(), but too complex to write inline. */
+ if (tp->flags & HAS_MEDIA_TABLE) {
+@@ -870,16 +907,49 @@
+ parse_eeprom(dev);
+ }
+
++ /* The Tulip-specific entries in the device structure. */
++ dev->open = &tulip_open;
++ dev->hard_start_xmit = &tulip_start_xmit;
++ dev->stop = &tulip_close;
++ dev->get_stats = &tulip_get_stats;
++#ifdef HAVE_PRIVATE_IOCTL
++ dev->do_ioctl = &private_ioctl;
++#endif
++#ifdef HAVE_MULTICAST
++ dev->set_multicast_list = &set_rx_mode;
++#endif
++
++ if (tp->flags & HAS_NWAY)
++ tp->link_change = nway_lnk_change;
++ else if (tp->flags & HAS_PNICNWAY)
++ tp->link_change = pnic_lnk_change;
++ start_link(dev);
++ if (chip_idx == COMET) {
++ /* Set the Comet LED configuration. */
++ outl(0xf0000000, ioaddr + CSR9);
++ }
++
++ return dev;
++}
++
++/* Start the link, typically called at probe1() time but sometimes later with
++ multiport cards. */
++static void start_link(struct net_device *dev)
++{
++ struct tulip_private *tp = (struct tulip_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
+ if ((tp->flags & ALWAYS_CHECK_MII) ||
+ (tp->mtable && tp->mtable->has_mii) ||
+ ( ! tp->mtable && (tp->flags & HAS_MII))) {
+- int phy, phy_idx;
++ int phyn, phy_idx = 0;
+ if (tp->mtable && tp->mtable->has_mii) {
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == 11) {
+ tp->cur_index = i;
+ tp->saved_if_port = dev->if_port;
+- select_media(dev, 1);
++ select_media(dev, 2);
+ dev->if_port = tp->saved_if_port;
+ break;
+ }
+@@ -887,33 +957,38 @@
+ /* Find the connected MII xcvrs.
+ Doing this in open() would allow detecting external xcvrs later,
+ but takes much time. */
+- for (phy = 0, phy_idx = 0; phy < 32 && phy_idx < sizeof(tp->phys);
+- phy++) {
++ for (phyn = 1; phyn <= 32 && phy_idx < sizeof(tp->phys); phyn++) {
++ int phy = phyn & 0x1f;
+ int mii_status = mdio_read(dev, phy, 1);
+ if ((mii_status & 0x8301) == 0x8001 ||
+ ((mii_status & 0x8000) == 0 && (mii_status & 0x7800) != 0)) {
+ int mii_reg0 = mdio_read(dev, phy, 0);
+ int mii_advert = mdio_read(dev, phy, 4);
+- int reg4 = ((mii_status>>6) & tp->to_advertise) | 1;
+- tp->phys[phy_idx] = phy;
+- tp->advertising[phy_idx++] = reg4;
++ int to_advert;
++
++ if (tp->mii_advertise)
++ to_advert = tp->mii_advertise;
++ else if (tp->advertising[phy_idx])
++ to_advert = tp->advertising[phy_idx];
++ else /* Leave unchanged. */
++ tp->mii_advertise = to_advert = mii_advert;
++
++ tp->phys[phy_idx++] = phy;
+ printk(KERN_INFO "%s: MII transceiver #%d "
+ "config %4.4x status %4.4x advertising %4.4x.\n",
+ dev->name, phy, mii_reg0, mii_status, mii_advert);
+ /* Fixup for DLink with miswired PHY. */
+- if (mii_advert != reg4) {
++ if (mii_advert != to_advert) {
+ printk(KERN_DEBUG "%s: Advertising %4.4x on PHY %d,"
+ " previously advertising %4.4x.\n",
+- dev->name, reg4, phy, mii_advert);
+- printk(KERN_DEBUG "%s: Advertising %4.4x (to advertise"
+- " is %4.4x).\n",
+- dev->name, reg4, tp->to_advertise);
+- mdio_write(dev, phy, 4, reg4);
++ dev->name, to_advert, phy, mii_advert);
++ mdio_write(dev, phy, 4, to_advert);
+ }
+ /* Enable autonegotiation: some boards default to off. */
+- mdio_write(dev, phy, 0, mii_reg0 |
+- (tp->full_duplex ? 0x1100 : 0x1000) |
+- (media_cap[tp->default_port]&MediaIs100 ? 0x2000:0));
++ mdio_write(dev, phy, 0, (mii_reg0 & ~0x3000) |
++ (tp->full_duplex ? 0x0100 : 0x0000) |
++ ((media_cap[tp->default_port] & MediaIs100) ?
++ 0x2000 : 0x1000));
+ }
+ }
+ tp->mii_cnt = phy_idx;
+@@ -924,36 +999,21 @@
+ }
+ }
+
+- /* The Tulip-specific entries in the device structure. */
+- dev->open = &tulip_open;
+- dev->hard_start_xmit = &tulip_start_xmit;
+- dev->stop = &tulip_close;
+- dev->get_stats = &tulip_get_stats;
+-#ifdef HAVE_PRIVATE_IOCTL
+- dev->do_ioctl = &private_ioctl;
+-#endif
+-#ifdef HAVE_MULTICAST
+- dev->set_multicast_list = &set_rx_mode;
+-#endif
+-
+- if ((tp->flags & HAS_NWAY143) || tp->chip_id == DC21041)
+- tp->link_change = t21142_lnk_change;
+- else if (tp->flags & HAS_PNICNWAY)
+- tp->link_change = pnic_lnk_change;
+-
+ /* Reset the xcvr interface and turn on heartbeat. */
+- switch (chip_idx) {
++ switch (tp->chip_id) {
++ case DC21040:
++ outl(0x00000000, ioaddr + CSR13);
++ outl(0x00000004, ioaddr + CSR13);
++ break;
+ case DC21041:
+- tp->to_advertise = 0x0061;
++ /* This is nway_start(). */
++ if (tp->sym_advertise == 0)
++ tp->sym_advertise = 0x0061;
+ outl(0x00000000, ioaddr + CSR13);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+- outl(inl(ioaddr + CSR6) | 0x0200, ioaddr + CSR6);
+- outl(0x0000EF05, ioaddr + CSR13);
+- break;
+- case DC21040:
+- outl(0x00000000, ioaddr + CSR13);
+- outl(0x00000004, ioaddr + CSR13);
++ outl(inl(ioaddr + CSR6) | FullDuplex, ioaddr + CSR6);
++ outl(0x0000EF01, ioaddr + CSR13);
+ break;
+ case DC21140: default:
+ if (tp->mtable)
+@@ -967,7 +1027,7 @@
+ outl(0x0000, ioaddr + CSR14);
+ outl(0x820E0000, ioaddr + CSR6);
+ } else
+- t21142_start_nway(dev);
++ nway_start(dev);
+ break;
+ case LC82C168:
+ if ( ! tp->mii_cnt) {
+@@ -979,35 +1039,36 @@
+ outl(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
+ }
+ break;
+- case MX98713: case COMPEX9881:
++ case COMPEX9881:
+ outl(0x00000000, ioaddr + CSR6);
+ outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+ outl(0x00000001, ioaddr + CSR13);
+ break;
+- case MX98715: case MX98725:
++ case MX98713: case MX98715: case MX98725:
+ outl(0x01a80000, ioaddr + CSR6);
+ outl(0xFFFFFFFF, ioaddr + CSR14);
+ outl(0x00001000, ioaddr + CSR12);
+ break;
+ case COMET:
+- /* No initialization necessary. */
+ break;
+ }
+
+- if (tulip_tbl[chip_idx].flags & HAS_PWRDWN)
+- pcibios_write_config_dword(pci_bus, pci_devfn, 0x40, 0x40000000);
+-
+- return dev;
++ if (tp->flags & HAS_PWRDWN)
++ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+ }
++
+
+ /* Serial EEPROM section. */
+ /* The main routine to parse the very complicated SROM structure.
+ Search www.digital.com for "21X4 SROM" to get details.
+ This code is very complex, and will require changes to support
+- additional cards, so I'll be verbose about what is going on.
++ additional cards, so I will be verbose about what is going on.
+ */
+
+-/* Known cards that have old-style EEPROMs. */
++/* Known cards that have old-style EEPROMs.
++ Writing this table is described at
++ http://www.scyld.com/network/tulip-media.html
++*/
+ static struct fixups {
+ char *name;
+ unsigned char addr0, addr1, addr2;
+@@ -1051,14 +1112,15 @@
+ #define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+ #endif
+
+-static void parse_eeprom(struct device *dev)
++static void parse_eeprom(struct net_device *dev)
+ {
+ /* The last media info list parsed, for multiport boards. */
+ static struct mediatable *last_mediatable = NULL;
+ static unsigned char *last_ee_data = NULL;
+ static int controller_index = 0;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+- unsigned char *ee_data = tp->eeprom;
++ unsigned char *p, *ee_data = tp->eeprom;
++ int new_advertise = 0;
+ int i;
+
+ tp->mtable = 0;
+@@ -1079,59 +1141,76 @@
+ } else
+ printk(KERN_INFO "%s: Missing EEPROM, this interface may "
+ "not work correctly!\n",
+- dev->name);
++ dev->name);
+ return;
+ }
+- /* Do a fix-up based on the vendor half of the station address prefix. */
+- for (i = 0; eeprom_fixups[i].name; i++) {
+- if (dev->dev_addr[0] == eeprom_fixups[i].addr0
+- && dev->dev_addr[1] == eeprom_fixups[i].addr1
+- && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
+- if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
+- i++; /* An Accton EN1207, not an outlaw Maxtech. */
+- memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+- sizeof(eeprom_fixups[i].newtable));
+- printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
+- " substitute media control info.\n",
+- dev->name, eeprom_fixups[i].name);
+- break;
++ /* Do a fix-up based on the vendor half of the station address. */
++ for (i = 0; eeprom_fixups[i].name; i++) {
++ if (dev->dev_addr[0] == eeprom_fixups[i].addr0
++ && dev->dev_addr[1] == eeprom_fixups[i].addr1
++ && dev->dev_addr[2] == eeprom_fixups[i].addr2) {
++ if (dev->dev_addr[2] == 0xE8 && ee_data[0x1a] == 0x55)
++ i++; /* An Accton EN1207, not an outlaw Maxtech. */
++ memcpy(ee_data + 26, eeprom_fixups[i].newtable,
++ sizeof(eeprom_fixups[i].newtable));
++ printk(KERN_INFO "%s: Old format EEPROM on '%s' board.\n"
++ KERN_INFO "%s: Using substitute media control info.\n",
++ dev->name, eeprom_fixups[i].name, dev->name);
++ break;
++ }
++ }
++ if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
++ printk(KERN_INFO "%s: Old style EEPROM with no media selection "
++ "information.\n",
++ dev->name);
++ return;
+ }
+- }
+- if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+- printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+- "information.\n",
+- dev->name);
+- return;
+- }
+ }
+
+ controller_index = 0;
+- if (ee_data[19] > 1) { /* Multiport board. */
++ if (ee_data[19] > 1) {
++ struct net_device *prev_dev;
++ struct tulip_private *otp;
++ /* This is a multiport board. The probe order may be "backwards", so
++ we patch up already found devices. */
+ last_ee_data = ee_data;
++ for (prev_dev = tp->next_module; prev_dev; prev_dev = otp->next_module) {
++ otp = (struct tulip_private *)prev_dev->priv;
++ if (otp->eeprom[0] == 0xff && otp->mtable == 0) {
++ parse_eeprom(prev_dev);
++ start_link(prev_dev);
++ } else
++ break;
++ }
++ controller_index = 0;
+ }
+ subsequent_board:
+
++ p = (void *)ee_data + ee_data[27 + controller_index*3];
+ if (ee_data[27] == 0) { /* No valid media table. */
+ } else if (tp->chip_id == DC21041) {
+- unsigned char *p = (void *)ee_data + ee_data[27 + controller_index*3];
+ int media = get_u16(p);
+ int count = p[2];
+ p += 3;
+
+ printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+ dev->name, media,
+- media & 0x0800 ? "Autosense" : medianame[media & 15]);
++ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+- unsigned char media_code = *p++;
+- if (media_code & 0x40)
++ unsigned char media_block = *p++;
++ int media_code = media_block & MEDIA_MASK;
++ if (media_block & 0x40)
+ p += 6;
++ switch(media_code) {
++ case 0: new_advertise |= 0x0020; break;
++ case 4: new_advertise |= 0x0040; break;
++ }
+ printk(KERN_INFO "%s: 21041 media #%d, %s.\n",
+- dev->name, media_code & 15, medianame[media_code & 15]);
++ dev->name, media_code, medianame[media_code]);
+ }
+ } else {
+- unsigned char *p = (void *)ee_data + ee_data[27];
+ unsigned char csr12dir = 0;
+- int count, new_advertise = 0;
++ int count;
+ struct mediatable *mtable;
+ u16 media = get_u16(p);
+
+@@ -1152,7 +1231,7 @@
+ mtable->csr15dir = mtable->csr15val = 0;
+
+ printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
+- media & 0x0800 ? "Autosense" : medianame[media & 15]);
++ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ for (i = 0; i < count; i++) {
+ struct medialeaf *leaf = &mtable->mleaf[i];
+
+@@ -1164,16 +1243,28 @@
+ mtable->has_mii = 1;
+ p += 4;
+ } else {
+- leaf->type = p[1];
+- if (p[1] == 0x05) {
+- mtable->has_reset = i;
+- leaf->media = p[2] & 0x0f;
+- } else if (p[1] & 1) {
++ switch(leaf->type = p[1]) {
++ case 5:
++ mtable->has_reset = i + 1; /* Assure non-zero */
++ /* Fall through */
++ case 6:
++ leaf->media = 31;
++ break;
++ case 1: case 3:
+ mtable->has_mii = 1;
+ leaf->media = 11;
+- } else {
++ break;
++ case 2:
++ if ((p[2] & 0x3f) == 0) {
++ u32 base15 = (p[2] & 0x40) ? get_u16(p + 7) : 0x0008;
++ u16 *p1 = (u16 *)(p + (p[2] & 0x40 ? 9 : 3));
++ mtable->csr15dir = (get_unaligned(p1 + 0)<<16) + base15;
++ mtable->csr15val = (get_unaligned(p1 + 1)<<16) + base15;
++ }
++ /* Fall through. */
++ case 0: case 4:
+ mtable->has_nonmii = 1;
+- leaf->media = p[2] & 0x0f;
++ leaf->media = p[2] & MEDIA_MASK;
+ switch (leaf->media) {
+ case 0: new_advertise |= 0x0020; break;
+ case 4: new_advertise |= 0x0040; break;
+@@ -1181,36 +1272,30 @@
+ case 5: new_advertise |= 0x0100; break;
+ case 6: new_advertise |= 0x0200; break;
+ }
+- if (p[1] == 2 && leaf->media == 0) {
+- if (p[2] & 0x40) {
+- u32 base15 = get_unaligned((u16*)&p[7]);
+- mtable->csr15dir =
+- (get_unaligned((u16*)&p[9])<<16) + base15;
+- mtable->csr15val =
+- (get_unaligned((u16*)&p[11])<<16) + base15;
+- } else {
+- mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+- mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+- }
+- }
++ break;
++ default:
++ leaf->media = 19;
+ }
+ leaf->leafdata = p + 2;
+ p += (p[0] & 0x3f) + 1;
+ }
+- if (tulip_debug > 1 && leaf->media == 11) {
++ if ((tp->msg_level & NETIF_MSG_LINK) &&
++ leaf->media == 11) {
+ unsigned char *bp = leaf->leafdata;
+ printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
+ "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+ dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ }
+- printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
+- "by a %s (%d) block.\n",
+- dev->name, i, medianame[leaf->media], leaf->media,
+- block_name[leaf->type], leaf->type);
++ if (tp->msg_level & NETIF_MSG_PROBE)
++ printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
++ "by a %s (%d) block.\n",
++ dev->name, i, medianame[leaf->media], leaf->media,
++ leaf->type < 6 ? block_name[leaf->type] : "UNKNOWN",
++ leaf->type);
+ }
+ if (new_advertise)
+- tp->to_advertise = new_advertise;
++ tp->sym_advertise = new_advertise;
+ }
+ }
+ /* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+@@ -1225,7 +1310,7 @@
+ #define EE_ENB (0x4800 | EE_CS)
+
+ /* Delay between EEPROM clock transitions.
+- Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
++ Even at 33Mhz current PCI implementations do not overrun the EEPROM clock.
+ We add a bus turn-around to insure that this remains true. */
+ #define eeprom_delay() inl(ee_addr)
+
+@@ -1253,6 +1338,7 @@
+ retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+ }
+ outl(EE_ENB, ee_addr);
++ eeprom_delay();
+
+ for (i = 16; i > 0; i--) {
+ outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+@@ -1287,36 +1373,42 @@
+ #define MDIO_ENB_IN 0x40000
+ #define MDIO_DATA_READ 0x80000
+
+-static int mdio_read(struct device *dev, int phy_id, int location)
++static const unsigned char comet_miireg2offset[32] = {
++ 0xB4, 0xB8, 0xBC, 0xC0, 0xC4, 0xC8, 0xCC, 0, 0,0,0,0, 0,0,0,0,
++ 0,0xD0,0,0, 0,0,0,0, 0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
++
++static int mdio_read(struct net_device *dev, int phy_id, int location)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+- int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+ int retval = 0;
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
++ unsigned long flags;
++
++ if (location & ~0x1f)
++ return 0xffff;
++
++ if (tp->chip_id == COMET && phy_id == 30) {
++ if (comet_miireg2offset[location])
++ return inl(ioaddr + comet_miireg2offset[location]);
++ return 0xffff;
++ }
+
++ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
+ inl(ioaddr + 0xA0);
++ inl(ioaddr + 0xA0);
++ inl(ioaddr + 0xA0);
+ while (--i > 0)
+ if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+- return retval & 0xffff;
+- return 0xffff;
+- }
+-
+- if (tp->chip_id == COMET) {
+- if (phy_id == 1) {
+- if (location < 7)
+- return inl(ioaddr + 0xB4 + (location<<2));
+- else if (location == 17)
+- return inl(ioaddr + 0xD0);
+- else if (location >= 29 && location <= 31)
+- return inl(ioaddr + 0xD4 + ((location-29)<<2));
+- }
+- return 0xffff;
++ break;
++ spin_unlock_irqrestore(&tp->mii_lock, flags);
++ return retval & 0xffff;
+ }
+
+ /* Establish sync by sending at least 32 logic ones. */
+@@ -1343,17 +1435,29 @@
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
++ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return (retval>>1) & 0xffff;
+ }
+
+-static void mdio_write(struct device *dev, int phy_id, int location, int value)
++static void mdio_write(struct net_device *dev, int phy_id, int location, int val)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+- int cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
++ int cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | (val & 0xffff);
+ long ioaddr = dev->base_addr;
+ long mdio_addr = ioaddr + CSR9;
++ unsigned long flags;
++
++ if (location & ~0x1f)
++ return;
++
++ if (tp->chip_id == COMET && phy_id == 30) {
++ if (comet_miireg2offset[location])
++ outl(val, ioaddr + comet_miireg2offset[location]);
++ return;
++ }
+
++ spin_lock_irqsave(&tp->mii_lock, flags);
+ if (tp->chip_id == LC82C168) {
+ int i = 1000;
+ outl(cmd, ioaddr + 0xA0);
+@@ -1361,18 +1465,7 @@
+ if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+ break;
+ while (--i > 0);
+- return;
+- }
+-
+- if (tp->chip_id == COMET) {
+- if (phy_id != 1)
+- return;
+- if (location < 7)
+- outl(value, ioaddr + 0xB4 + (location<<2));
+- else if (location == 17)
+- outl(value, ioaddr + 0xD0);
+- else if (location >= 29 && location <= 31)
+- outl(value, ioaddr + 0xD4 + ((location-29)<<2));
++ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+@@ -1398,21 +1491,21 @@
+ outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+ mdio_delay();
+ }
++ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ return;
+ }
+
+
+ static int
+-tulip_open(struct device *dev)
++tulip_open(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 3*HZ;
+- int i;
+
+ /* Wake the chip from sleep/snooze mode. */
+ if (tp->flags & HAS_PWRDWN)
+- pcibios_write_config_dword(tp->pci_bus, tp->pci_devfn, 0x40, 0);
++ pci_write_config_dword(tp->pci_dev, 0x40, 0);
+
+ /* On some chip revs we must set the MII/SYM port before the reset!? */
+ if (tp->mii_cnt || (tp->mtable && tp->mtable->has_mii))
+@@ -1421,67 +1514,93 @@
+ /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+ outl(0x00000001, ioaddr + CSR0);
+
+- if (request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev))
+- return -EAGAIN;
+ MOD_INC_USE_COUNT;
+
++ /* This would be done after interrupts are initialized, but we do not want
++ to frob the transceiver only to fail later. */
++ if (request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
+ /* Deassert reset.
+ Wait the specified 50 PCI cycles after a reset by initializing
+ Tx and Rx queues and the address filter list. */
+ outl(tp->csr0, ioaddr + CSR0);
+
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: tulip_open() irq %d.\n", dev->name, dev->irq);
+
+ tulip_init_ring(dev);
+
+-#if 0
+ if (tp->chip_id == PNIC2) {
+- u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+- u32 addr_high = cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)));
+- addr_high = (dev->dev_addr[4]<<8) + (dev->dev_addr[5]<<0);
+- outl((dev->dev_addr[0]<<8) + dev->dev_addr[1] +
+- (dev->dev_addr[2]<<24) + (dev->dev_addr[3]<<16),
++ u32 addr_high = (dev->dev_addr[1]<<8) + (dev->dev_addr[0]<<0);
++ /* This address setting does not appear to impact chip operation?? */
++ outl((dev->dev_addr[5]<<8) + dev->dev_addr[4] +
++ (dev->dev_addr[3]<<24) + (dev->dev_addr[2]<<16),
+ ioaddr + 0xB0);
+ outl(addr_high + (addr_high<<16), ioaddr + 0xB8);
+ }
+-#endif
+ if (tp->flags & MC_HASH_ONLY) {
+ u32 addr_low = cpu_to_le32(get_unaligned((u32 *)dev->dev_addr));
+- u32 addr_high = cpu_to_le32(get_unaligned((u16 *)(dev->dev_addr+4)));
+- if (tp->chip_id == AX88140) {
++ u32 addr_high = cpu_to_le16(get_unaligned((u16 *)(dev->dev_addr+4)));
++ if (tp->flags & IS_ASIX) {
+ outl(0, ioaddr + CSR13);
+ outl(addr_low, ioaddr + CSR14);
+ outl(1, ioaddr + CSR13);
+ outl(addr_high, ioaddr + CSR14);
+- } else if (tp->chip_id == COMET) {
++ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(addr_low, ioaddr + 0xA4);
+ outl(addr_high, ioaddr + 0xA8);
+ outl(0, ioaddr + 0xAC);
+ outl(0, ioaddr + 0xB0);
+ }
+- } else {
+- /* This is set_rx_mode(), but without starting the transmitter. */
+- u16 *eaddrs = (u16 *)dev->dev_addr;
+- u16 *setup_frm = &tp->setup_frame[15*6];
+-
+- /* 21140 bug: you must add the broadcast address. */
+- memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+- /* Fill the final entry of the table with our physical address. */
+- *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+- *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+- *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+- /* Put the setup frame on the Tx list. */
+- tp->tx_ring[0].length = cpu_to_le32(0x08000000 | 192);
+- tp->tx_ring[0].buffer1 = virt_to_le32desc(tp->setup_frame);
+- tp->tx_ring[0].status = cpu_to_le32(DescOwned);
+-
+- tp->cur_tx++;
+ }
+
+ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
+ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
+
++ if ( ! tp->full_duplex_lock)
++ tp->full_duplex = 0;
++ init_media(dev);
++ if (media_cap[dev->if_port] & MediaIsMII)
++ check_duplex(dev);
++ set_rx_mode(dev);
++
++ /* Start the Tx to process setup frame. */
++ outl(tp->csr6, ioaddr + CSR6);
++ outl(tp->csr6 | TxOn, ioaddr + CSR6);
++
++ netif_start_tx_queue(dev);
++
++ /* Enable interrupts by setting the interrupt mask. */
++ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
++ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
++ outl(0, ioaddr + CSR2); /* Rx poll demand */
++
++ if (tp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 "
++ "%8.8x.\n", dev->name, (int)inl(ioaddr + CSR0),
++ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6));
++
++ /* Set the timer to switch to check for link beat and perhaps switch
++ to an alternate media type. */
++ init_timer(&tp->timer);
++ tp->timer.expires = jiffies + next_tick;
++ tp->timer.data = (unsigned long)dev;
++ tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
++ add_timer(&tp->timer);
++
++ return 0;
++}
++
++static void init_media(struct net_device *dev)
++{
++ struct tulip_private *tp = (struct tulip_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
+ tp->saved_if_port = dev->if_port;
+ if (dev->if_port == 0)
+ dev->if_port = tp->default_port;
+@@ -1501,7 +1620,7 @@
+ }
+ }
+ if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+- int looking_for = tp->mtable->defaultmedia & 15;
++ int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+ for (i = 0; i < tp->mtable->leafcount; i++)
+ if (tp->mtable->mleaf[i].media == looking_for) {
+ printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+@@ -1518,13 +1637,27 @@
+ tp->csr6 = 0;
+ tp->cur_index = i;
+ tp->nwayset = 0;
+- if (dev->if_port == 0 && tp->chip_id == DC21041) {
+- tp->nway = 1;
++
++ if (dev->if_port) {
++ if (tp->chip_id == DC21143 &&
++ (media_cap[dev->if_port] & MediaIsMII)) {
++ /* We must reset the media CSRs when we force-select MII mode. */
++ outl(0x0000, ioaddr + CSR13);
++ outl(0x0000, ioaddr + CSR14);
++ outl(0x0008, ioaddr + CSR15);
++ }
++ select_media(dev, 1);
++ return;
+ }
+- if (dev->if_port == 0 && tp->chip_id == DC21142) {
++ switch(tp->chip_id) {
++ case DC21041:
++ /* tp->nway = 1;*/
++ nway_start(dev);
++ break;
++ case DC21142:
+ if (tp->mii_cnt) {
+ select_media(dev, 1);
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Using MII transceiver %d, status "
+ "%4.4x.\n",
+ dev->name, tp->phys[0], mdio_read(dev, tp->phys[0], 1));
+@@ -1534,13 +1667,15 @@
+ outl(0x0000, ioaddr + CSR13);
+ outl(0x0000, ioaddr + CSR14);
+ } else
+- t21142_start_nway(dev);
+- } else if (tp->chip_id == PNIC2) {
+- t21142_start_nway(dev);
+- } else if (tp->chip_id == LC82C168 && ! tp->medialock) {
++ nway_start(dev);
++ break;
++ case PNIC2:
++ nway_start(dev);
++ break;
++ case LC82C168:
+ if (tp->mii_cnt) {
+ dev->if_port = 11;
+- tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
++ tp->csr6 = 0x814C0000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0001, ioaddr + CSR15);
+ } else if (inl(ioaddr + CSR5) & TPLnkPass)
+ pnic_do_nway(dev);
+@@ -1550,65 +1685,39 @@
+ tp->csr6 = 0x00420000;
+ outl(0x0001B078, ioaddr + 0xB8);
+ outl(0x0201B078, ioaddr + 0xB8);
+- next_tick = 1*HZ;
+ }
+- } else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
+- && ! tp->medialock) {
++ break;
++ case MX98713: case COMPEX9881:
+ dev->if_port = 0;
+- tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
++ tp->csr6 = 0x01880000 | (tp->full_duplex ? FullDuplex : 0);
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+- } else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
++ break;
++ case MX98715: case MX98725:
+ /* Provided by BOLO, Macronix - 12/10/1998. */
+ dev->if_port = 0;
+- tp->csr6 = 0x01a80200;
++ tp->csr6 = 0x01a80000 | FullDuplex;
+ outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+ outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+- } else if (tp->chip_id == DC21143 &&
+- media_cap[dev->if_port] & MediaIsMII) {
+- /* We must reset the media CSRs when we force-select MII mode. */
+- outl(0x0000, ioaddr + CSR13);
+- outl(0x0000, ioaddr + CSR14);
+- outl(0x0008, ioaddr + CSR15);
+- } else if (tp->chip_id == COMET) {
+- dev->if_port = 0;
++ break;
++ case COMET: case CONEXANT:
++ /* Enable automatic Tx underrun recovery. */
++ outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
++ dev->if_port = tp->mii_cnt ? 11 : 0;
+ tp->csr6 = 0x00040000;
+- } else if (tp->chip_id == AX88140) {
++ break;
++ case AX88140: case AX88141:
+ tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+- } else
++ break;
++ default:
+ select_media(dev, 1);
+-
+- /* Start the chip's Tx to process setup frame. */
+- outl(tp->csr6, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2000, ioaddr + CSR6);
+-
+- dev->tbusy = 0;
+- tp->interrupt = 0;
+- dev->start = 1;
+-
+- /* Enable interrupts by setting the interrupt mask. */
+- outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+- outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
+- outl(0, ioaddr + CSR2); /* Rx poll demand */
+-
+- if (tulip_debug > 2) {
+- printk(KERN_DEBUG "%s: Done tulip_open(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+- dev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
+- inl(ioaddr + CSR6));
+ }
+- /* Set the timer to switch to check for link beat and perhaps switch
+- to an alternate media type. */
+- init_timer(&tp->timer);
+- tp->timer.expires = RUN_AT(next_tick);
+- tp->timer.data = (unsigned long)dev;
+- tp->timer.function = tulip_tbl[tp->chip_id].media_timer;
+- add_timer(&tp->timer);
+-
+- return 0;
+ }
+
+-/* Set up the transceiver control registers for the selected media type. */
+-static void select_media(struct device *dev, int startup)
++/* Set up the transceiver control registers for the selected media type.
++ STARTUP indicates to reset the transceiver. It is set to '2' for
++ the initial card detection, and '1' during resume or open().
++*/
++static void select_media(struct net_device *dev, int startup)
+ {
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+@@ -1619,9 +1728,12 @@
+ if (mtable) {
+ struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+ unsigned char *p = mleaf->leafdata;
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Media table type %d.\n",
++ dev->name, mleaf->type);
+ switch (mleaf->type) {
+ case 0: /* 21140 non-MII xcvr. */
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+ " with control setting %2.2x.\n",
+ dev->name, p[1]);
+@@ -1637,20 +1749,20 @@
+ for (i = 0; i < 5; i++)
+ setup[i] = get_u16(&p[i*2 + 1]);
+
+- dev->if_port = p[0] & 15;
++ dev->if_port = p[0] & MEDIA_MASK;
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+ tp->full_duplex = 1;
+
+ if (startup && mtable->has_reset) {
+- struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
++ struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset-1];
+ unsigned char *rst = rleaf->leafdata;
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+ dev->name);
+ for (i = 0; i < rst[0]; i++)
+ outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+ }
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+ "%4.4x/%4.4x.\n",
+ dev->name, medianame[dev->if_port], setup[0], setup[1]);
+@@ -1666,7 +1778,7 @@
+ outl(csr13val, ioaddr + CSR13);
+ } else {
+ csr13val = 1;
+- csr14val = 0x0003FF7F;
++ csr14val = 0x0003FFFF;
+ csr15dir = (setup[0]<<16) | 0x0008;
+ csr15val = (setup[1]<<16) | 0x0008;
+ if (dev->if_port <= 4)
+@@ -1679,11 +1791,11 @@
+ outl(csr15val, ioaddr + CSR15); /* Data */
+ if (startup) outl(csr13val, ioaddr + CSR13);
+ }
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
+ dev->name, csr15dir, csr15val);
+ if (mleaf->type == 4)
+- new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
++ new_csr6 = 0x820A0000 | ((setup[2] & 0x71) << 18);
+ else
+ new_csr6 = 0x82420000;
+ break;
+@@ -1692,7 +1804,6 @@
+ int phy_num = p[0];
+ int init_length = p[1];
+ u16 *misc_info;
+- u16 to_advertise;
+
+ dev->if_port = 11;
+ new_csr6 = 0x020E0000;
+@@ -1719,13 +1830,15 @@
+ for (i = 0; i < init_length; i++)
+ outl(init_sequence[i], ioaddr + CSR12);
+ }
+- to_advertise = (get_u16(&misc_info[1]) & tp->to_advertise) | 1;
+- tp->advertising[phy_num] = to_advertise;
+- if (tulip_debug > 1)
+- printk(KERN_DEBUG "%s: Advertising %4.4x on PHY %d (%d).\n",
+- dev->name, to_advertise, phy_num, tp->phys[phy_num]);
+- /* Bogus: put in by a committee? */
+- mdio_write(dev, tp->phys[phy_num], 4, to_advertise);
++ tp->advertising[phy_num] = get_u16(&misc_info[1]) | 1;
++ if (startup < 2) {
++ if (tp->mii_advertise == 0)
++ tp->mii_advertise = tp->advertising[phy_num];
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
++ dev->name, tp->mii_advertise, tp->phys[phy_num]);
++ mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
++ }
+ break;
+ }
+ default:
+@@ -1733,16 +1846,16 @@
+ dev->name, mleaf->type);
+ new_csr6 = 0x020E0000;
+ }
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+- inl(ioaddr + CSR12) & 0xff);
++ (int)inl(ioaddr + CSR12) & 0xff);
+ } else if (tp->chip_id == DC21041) {
+ int port = dev->if_port <= 4 ? dev->if_port : 0;
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+ dev->name, medianame[port == 3 ? 12: port],
+- inl(ioaddr + CSR12));
++ (int)inl(ioaddr + CSR12));
+ outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+ outl(t21041_csr14[port], ioaddr + CSR14);
+ outl(t21041_csr15[port], ioaddr + CSR15);
+@@ -1751,9 +1864,10 @@
+ } else if (tp->chip_id == LC82C168) {
+ if (startup && ! tp->medialock)
+ dev->if_port = tp->mii_cnt ? 11 : 0;
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+- dev->name, inl(ioaddr + 0xB8), medianame[dev->if_port]);
++ dev->name, (int)inl(ioaddr + 0xB8),
++ medianame[dev->if_port]);
+ if (tp->mii_cnt) {
+ new_csr6 = 0x810C0000;
+ outl(0x0001, ioaddr + CSR15);
+@@ -1777,7 +1891,7 @@
+ } else if (tp->chip_id == DC21040) { /* 21040 */
+ /* Turn on the xcvr interface. */
+ int csr12 = inl(ioaddr + CSR12);
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ if (media_cap[dev->if_port] & MediaAlwaysFD)
+@@ -1800,17 +1914,18 @@
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ new_csr6 = 0x020E0000;
+ } else if (media_cap[dev->if_port] & MediaIsFx) {
+- new_csr6 = 0x028600000;
++ new_csr6 = 0x02860000;
+ } else
+- new_csr6 = 0x038600000;
+- if (tulip_debug > 1)
++ new_csr6 = 0x038E0000;
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No media description table, assuming "
+ "%s transceiver, CSR12 %2.2x.\n",
+ dev->name, medianame[dev->if_port],
+- inl(ioaddr + CSR12));
++ (int)inl(ioaddr + CSR12));
+ }
+
+- tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
++ tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) |
++ (tp->full_duplex ? FullDuplex : 0);
+ return;
+ }
+
+@@ -1820,7 +1935,7 @@
+ Return 0 if everything is OK.
+ Return < 0 if the transceiver is missing or has no link beat.
+ */
+-static int check_duplex(struct device *dev)
++static int check_duplex(struct net_device *dev)
+ {
+ long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+@@ -1828,36 +1943,37 @@
+
+ if (tp->full_duplex_lock)
+ return 0;
+- mii_reg1 = mdio_read(dev, tp->phys[0], 1);
+ mii_reg5 = mdio_read(dev, tp->phys[0], 5);
+- if (tulip_debug > 1)
+- printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
+- "%4.4x.\n", dev->name, mii_reg1, mii_reg5);
+- if (mii_reg1 == 0xffff)
++ negotiated = mii_reg5 & tp->mii_advertise;
++
++ if (tp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_INFO "%s: MII link partner %4.4x, negotiated %4.4x.\n",
++ dev->name, mii_reg5, negotiated);
++ if (mii_reg5 == 0xffff)
+ return -2;
+- if ((mii_reg1 & 0x0004) == 0) {
++ if ((mii_reg5 & 0x4000) == 0 && /* No negotiation. */
++ ((mii_reg1 = mdio_read(dev, tp->phys[0], 1)) & 0x0004) == 0) {
+ int new_reg1 = mdio_read(dev, tp->phys[0], 1);
+ if ((new_reg1 & 0x0004) == 0) {
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO "%s: No link beat on the MII interface,"
+ " status %4.4x.\n", dev->name, new_reg1);
+ return -1;
+ }
+ }
+- negotiated = mii_reg5 & tp->advertising[0];
+ duplex = ((negotiated & 0x0300) == 0x0100
+ || (negotiated & 0x00C0) == 0x0040);
+ /* 100baseTx-FD or 10T-FD, but not 100-HD */
+ if (tp->full_duplex != duplex) {
+ tp->full_duplex = duplex;
+- if (negotiated & 0x038) /* 100mbps. */
++ if (negotiated & 0x0380) /* 100mbps. */
+ tp->csr6 &= ~0x00400000;
+- if (tp->full_duplex) tp->csr6 |= 0x0200;
+- else tp->csr6 &= ~0x0200;
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
+- if (tulip_debug > 0)
+- printk(KERN_INFO "%s: Setting %s-duplex based on MII"
++ if (tp->full_duplex) tp->csr6 |= FullDuplex;
++ else tp->csr6 &= ~FullDuplex;
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII "
+ "#%d link partner capability of %4.4x.\n",
+ dev->name, tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_reg5);
+@@ -1868,31 +1984,32 @@
+
+ static void tulip_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u32 csr12 = inl(ioaddr + CSR12);
+ int next_tick = 2*HZ;
+
+- if (tulip_debug > 2) {
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
+ " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
+- dev->name, medianame[dev->if_port], inl(ioaddr + CSR5),
+- inl(ioaddr + CSR6), csr12, inl(ioaddr + CSR13),
+- inl(ioaddr + CSR14), inl(ioaddr + CSR15));
+- }
++ dev->name, medianame[dev->if_port], (int)inl(ioaddr + CSR5),
++ (int)inl(ioaddr + CSR6), csr12, (int)inl(ioaddr + CSR13),
++ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
++
+ switch (tp->chip_id) {
+ case DC21040:
+- if (!tp->medialock && csr12 & 0x0002) { /* Network error */
+- printk(KERN_INFO "%s: No link beat found.\n",
+- dev->name);
++ if (!tp->medialock && (csr12 & 0x0002)) { /* Network error */
++ if (tp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_INFO "%s: No link beat found.\n",
++ dev->name);
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ select_media(dev, 0);
+ dev->trans_start = jiffies;
+ }
+ break;
+ case DC21041:
+- if (tulip_debug > 2)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: 21041 media tick CSR12 %8.8x.\n",
+ dev->name, csr12);
+ if (tp->medialock) break;
+@@ -1905,8 +2022,10 @@
+ dev->if_port = 2;
+ else
+ dev->if_port = 1;
+- printk(KERN_INFO "%s: No 21041 10baseT link beat, Media switched to %s.\n",
+- dev->name, medianame[dev->if_port]);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: No 21041 10baseT link beat, Media "
++ "switched to %s.\n",
++ dev->name, medianame[dev->if_port]);
+ outl(0, ioaddr + CSR13); /* Reset */
+ outl(t21041_csr14[dev->if_port], ioaddr + CSR14);
+ outl(t21041_csr15[dev->if_port], ioaddr + CSR15);
+@@ -1921,8 +2040,9 @@
+ next_tick = (30*HZ); /* 30 sec. */
+ tp->mediasense = 0;
+ } else if ((csr12 & 0x0004) == 0) {
+- printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
+- dev->name);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: 21041 media switched to 10baseT.\n",
++ dev->name);
+ dev->if_port = 0;
+ select_media(dev, 0);
+ next_tick = (24*HZ)/10; /* 2.4 sec. */
+@@ -1943,10 +2063,10 @@
+ /* Not much that can be done.
+ Assume this a generic MII or SYM transceiver. */
+ next_tick = 60*HZ;
+- if (tulip_debug > 2)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
+ "CSR12 0x%2.2x.\n",
+- dev->name, inl(ioaddr + CSR6), csr12 & 0xff);
++ dev->name, (int)inl(ioaddr + CSR6), csr12 & 0xff);
+ break;
+ }
+ mleaf = &tp->mtable->mleaf[tp->cur_index];
+@@ -1957,7 +2077,7 @@
+ int offset = mleaf->type == 4 ? 5 : 2;
+ s8 bitnum = p[offset];
+ if (p[offset+1] & 0x80) {
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG"%s: Transceiver monitor tick "
+ "CSR12=%#2.2x, no media sense.\n",
+ dev->name, csr12);
+@@ -1967,7 +2087,7 @@
+ }
+ break;
+ }
+- if (tulip_debug > 2)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
+ " bit %d is %d, expecting %d.\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+@@ -1976,9 +2096,9 @@
+ /* Check that the specified bit has the proper value. */
+ if ((bitnum < 0) !=
+ ((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
+- if (tulip_debug > 1)
+- printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+- medianame[mleaf->media]);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Link beat detected for %s.\n",
++ dev->name, medianame[mleaf->media & MEDIA_MASK]);
+ if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
+ goto actually_mii;
+ break;
+@@ -1993,15 +2113,15 @@
+ dev->if_port = tp->mtable->mleaf[tp->cur_index].media;
+ if (media_cap[dev->if_port] & MediaIsFD)
+ goto select_next_media; /* Skip FD entries. */
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: No link beat on media %s,"
+ " trying transceiver type %s.\n",
+- dev->name, medianame[mleaf->media & 15],
++ dev->name, medianame[mleaf->media & MEDIA_MASK],
+ medianame[tp->mtable->mleaf[tp->cur_index].media]);
+ select_media(dev, 0);
+ /* Restart the transmit process. */
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ next_tick = (24*HZ)/10;
+ break;
+ }
+@@ -2017,45 +2137,45 @@
+ }
+ break;
+ }
+- tp->timer.expires = RUN_AT(next_tick);
++ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+ }
+
+-/* Handle the 21143 uniquely: do autoselect with NWay, not the EEPROM list
+- of available transceivers. */
+-static void t21142_timer(unsigned long data)
++/* Handle internal NWay transceivers uniquely.
++ These exist on the 21041, 21143 (in SYM mode) and the PNIC2.
++ */
++static void nway_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+ int next_tick = 60*HZ;
+ int new_csr6 = 0;
+
+- if (tulip_debug > 2)
+- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
++ if (tp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_INFO"%s: N-Way autonegotiation status %8.8x, %s.\n",
+ dev->name, csr12, medianame[dev->if_port]);
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ check_duplex(dev);
+- next_tick = 60*HZ;
+ } else if (tp->nwayset) {
+- /* Don't screw up a negotiated session! */
+- if (tulip_debug > 1)
++ /* Do not screw up a negotiated session! */
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
+ dev->name, medianame[dev->if_port], csr12);
+ } else if (tp->medialock) {
+ ;
+ } else if (dev->if_port == 3) {
+ if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
+ "trying NWay.\n", dev->name, csr12);
+- t21142_start_nway(dev);
++ nway_start(dev);
+ next_tick = 3*HZ;
+ }
+ } else if ((csr12 & 0x7000) != 0x5000) {
+ /* Negotiation failed. Search media types. */
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
+ dev->name, csr12);
+ if (!(csr12 & 4)) { /* 10mbps link beat good. */
+@@ -2074,15 +2194,15 @@
+ outw(8, ioaddr + CSR15);
+ outl(1, ioaddr + CSR13);
+ }
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
+ dev->name, medianame[dev->if_port]);
+- if (new_csr6 != (tp->csr6 & ~0x00D5)) {
+- tp->csr6 &= 0x00D5;
++ if (new_csr6 != (tp->csr6 & ~0x20D7)) {
++ tp->csr6 &= 0x20D7;
+ tp->csr6 |= new_csr6;
+ outl(0x0301, ioaddr + CSR12);
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ }
+ next_tick = 3*HZ;
+ }
+@@ -2093,49 +2213,69 @@
+ tulip_tx_timeout(dev);
+ }
+
+- tp->timer.expires = RUN_AT(next_tick);
++ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+ }
+
+-static void t21142_start_nway(struct device *dev)
++static void nway_start(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+- int csr14 = ((tp->to_advertise & 0x0780) << 9) |
+- ((tp->to_advertise&0x0020)<<1) | 0xffbf;
++ int csr14 = ((tp->sym_advertise & 0x0780) << 9) |
++ ((tp->sym_advertise&0x0020)<<1) | 0xffbf;
+
+ dev->if_port = 0;
+ tp->nway = tp->mediasense = 1;
+ tp->nwayset = tp->lpar = 0;
+- if (debug > 1)
+- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, %8.8x.\n",
+- dev->name, csr14);
++ if (tp->chip_id == PNIC2) {
++ tp->csr6 = 0x01000000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
++ return;
++ }
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Restarting internal NWay autonegotiation, "
++ "%8.8x.\n", dev->name, csr14);
+ outl(0x0001, ioaddr + CSR13);
+ outl(csr14, ioaddr + CSR14);
+- tp->csr6 = 0x82420000 | (tp->to_advertise & 0x0040 ? 0x0200 : 0);
++ tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0)
++ | (tp->csr6 & 0x20ff);
+ outl(tp->csr6, ioaddr + CSR6);
+ if (tp->mtable && tp->mtable->csr15dir) {
+ outl(tp->mtable->csr15dir, ioaddr + CSR15);
+ outl(tp->mtable->csr15val, ioaddr + CSR15);
+- } else
++ } else if (tp->chip_id != PNIC2)
+ outw(0x0008, ioaddr + CSR15);
+- outl(0x1301, ioaddr + CSR12); /* Trigger NWAY. */
++ if (tp->chip_id == DC21041) /* Trigger NWAY. */
++ outl(0xEF01, ioaddr + CSR12);
++ else
++ outl(0x1301, ioaddr + CSR12);
+ }
+
+-static void t21142_lnk_change(struct device *dev, int csr5)
++static void nway_lnk_change(struct net_device *dev, int csr5)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int csr12 = inl(ioaddr + CSR12);
+
+- if (tulip_debug > 1)
++ if (tp->chip_id == PNIC2) {
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO"%s: PNIC-2 link status changed, CSR5/12/14 %8.8x"
++ " %8.8x, %8.8x.\n",
++ dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
++ dev->if_port = 5;
++ tp->lpar = csr12 >> 16;
++ tp->nwayset = 1;
++ tp->csr6 = 0x01000000 | (tp->csr6 & 0xffff);
++ outl(tp->csr6, ioaddr + CSR6);
++ return;
++ }
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
+- "%8.8x.\n", dev->name, csr12, csr5, inl(ioaddr + CSR14));
++ "%8.8x.\n", dev->name, csr12, csr5, (int)inl(ioaddr + CSR14));
+
+ /* If NWay finished and we have a negotiated partner capability. */
+ if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
+ int setup_done = 0;
+- int negotiated = tp->to_advertise & (csr12 >> 16);
++ int negotiated = tp->sym_advertise & (csr12 >> 16);
+ tp->lpar = csr12 >> 16;
+ tp->nwayset = 1;
+ if (negotiated & 0x0100) dev->if_port = 5;
+@@ -2144,16 +2284,16 @@
+ else if (negotiated & 0x0020) dev->if_port = 0;
+ else {
+ tp->nwayset = 0;
+- if ((csr12 & 2) == 0 && (tp->to_advertise & 0x0180))
++ if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
+ dev->if_port = 3;
+ }
+ tp->full_duplex = (media_cap[dev->if_port] & MediaAlwaysFD) ? 1:0;
+
+- if (tulip_debug > 1) {
++ if (tp->msg_level & NETIF_MSG_LINK) {
+ if (tp->nwayset)
+ printk(KERN_INFO "%s: Switching to %s based on link "
+ "negotiation %4.4x & %4.4x = %4.4x.\n",
+- dev->name, medianame[dev->if_port], tp->to_advertise,
++ dev->name, medianame[dev->if_port], tp->sym_advertise,
+ tp->lpar, negotiated);
+ else
+ printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
+@@ -2172,86 +2312,86 @@
+ }
+ }
+ if ( ! setup_done) {
+- tp->csr6 = dev->if_port & 1 ? 0x83860000 : 0x82420000;
++ tp->csr6 = (dev->if_port & 1 ? 0x838E0000 : 0x82420000)
++ | (tp->csr6 & 0x20ff);
+ if (tp->full_duplex)
+- tp->csr6 |= 0x0200;
++ tp->csr6 |= FullDuplex;
+ outl(1, ioaddr + CSR13);
+ }
+-#if 0 /* Restart shouldn't be needed. */
++#if 0 /* Restart should not be needed. */
+ outl(tp->csr6 | 0x0000, ioaddr + CSR6);
+- if (debug > 2)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
+ dev->name, inl(ioaddr + CSR5));
+ #endif
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
+- if (debug > 2)
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
+- dev->name, tp->csr6, inl(ioaddr + CSR6),
+- inl(ioaddr + CSR12));
++ dev->name, tp->csr6, (int)inl(ioaddr + CSR6),
++ (int)inl(ioaddr + CSR12));
+ } else if ((tp->nwayset && (csr5 & 0x08000000)
+ && (dev->if_port == 3 || dev->if_port == 5)
+ && (csr12 & 2) == 2) ||
+ (tp->nway && (csr5 & (TPLnkFail)))) {
+ /* Link blew? Maybe restart NWay. */
+ del_timer(&tp->timer);
+- t21142_start_nway(dev);
+- tp->timer.expires = RUN_AT(3*HZ);
++ nway_start(dev);
++ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+ } else if (dev->if_port == 3 || dev->if_port == 5) {
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK) /* TIMER? */
+ printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
+ dev->name, medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
+ if ((csr12 & 2) && ! tp->medialock) {
+ del_timer(&tp->timer);
+- t21142_start_nway(dev);
+- tp->timer.expires = RUN_AT(3*HZ);
++ nway_start(dev);
++ tp->timer.expires = jiffies + 3*HZ;
+ add_timer(&tp->timer);
+- }
++ } else if (dev->if_port == 5)
++ outl(inl(ioaddr + CSR14) & ~0x080, ioaddr + CSR14);
+ } else if (dev->if_port == 0 || dev->if_port == 4) {
+ if ((csr12 & 4) == 0)
+ printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
+ dev->name);
+ } else if (!(csr12 & 4)) { /* 10mbps link beat good. */
+- if (tulip_debug)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
+ dev->name);
+ dev->if_port = 0;
+ } else if (tp->nwayset) {
+- if (tulip_debug)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
+ dev->name, medianame[dev->if_port], tp->csr6);
+ } else { /* 100mbps link beat good. */
+- if (tulip_debug)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
+ dev->name);
+ dev->if_port = 3;
+- tp->csr6 = 0x83860000;
++ tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
+ outl(0x0003FF7F, ioaddr + CSR14);
+ outl(0x0301, ioaddr + CSR12);
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+ }
+
+ static void mxic_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+- if (tulip_debug > 3) {
++ if (tp->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
+- inl(ioaddr + CSR12));
+- }
+- if (next_tick) {
+- tp->timer.expires = RUN_AT(next_tick);
+- add_timer(&tp->timer);
++ (int)inl(ioaddr + CSR12));
+ }
++ tp->timer.expires = jiffies + next_tick;
++ add_timer(&tp->timer);
+ }
+
+-static void pnic_do_nway(struct device *dev)
++static void pnic_do_nway(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -2270,26 +2410,27 @@
+ outl(0x1F868, ioaddr + 0xB8);
+ if (phy_reg & 0x30000000) {
+ tp->full_duplex = 1;
+- new_csr6 |= 0x00000200;
++ new_csr6 |= FullDuplex;
+ }
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6); /* Restart Tx */
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+ }
+ }
+ }
+-static void pnic_lnk_change(struct device *dev, int csr5)
++
++static void pnic_lnk_change(struct net_device *dev, int csr5)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int phy_reg = inl(ioaddr + 0xB8);
+
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
+ dev->name, phy_reg, csr5);
+ if (inl(ioaddr + CSR5) & TPLnkFail) {
+@@ -2308,7 +2449,7 @@
+ }
+ static void pnic_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+@@ -2322,7 +2463,7 @@
+ int phy_reg = inl(ioaddr + 0xB8);
+ int csr5 = inl(ioaddr + CSR5);
+
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
+ "CSR5 %8.8x.\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
+@@ -2334,11 +2475,11 @@
+ pnic_do_nway(dev);
+ next_tick = 60*HZ;
+ } else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
+ "CSR5 %8.8x, PHY %3.3x.\n",
+ dev->name, medianame[dev->if_port], csr12,
+- inl(ioaddr + CSR5), inl(ioaddr + 0xB8));
++ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + 0xB8));
+ next_tick = 3*HZ;
+ if (tp->medialock) {
+ } else if (tp->nwayset && (dev->if_port & 1)) {
+@@ -2356,10 +2497,10 @@
+ }
+ if (tp->csr6 != new_csr6) {
+ tp->csr6 = new_csr6;
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6); /* Restart Tx */
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6); /* Restart Tx */
++ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ dev->trans_start = jiffies;
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Changing PNIC configuration to %s "
+ "%s-duplex, CSR6 %8.8x.\n",
+ dev->name, medianame[dev->if_port],
+@@ -2367,36 +2508,45 @@
+ }
+ }
+ }
+- tp->timer.expires = RUN_AT(next_tick);
++ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+ }
+
+ static void comet_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+ int next_tick = 60*HZ;
+
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TIMER)
+ printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
+ "%4.4x.\n",
+- dev->name, inl(ioaddr + 0xB8), inl(ioaddr + 0xC8));
+- tp->timer.expires = RUN_AT(next_tick);
++ dev->name, mdio_read(dev, tp->phys[0], 1),
++ mdio_read(dev, tp->phys[0], 5));
++ check_duplex(dev);
++ tp->timer.expires = jiffies + next_tick;
+ add_timer(&tp->timer);
+ }
+
+-static void tulip_tx_timeout(struct device *dev)
++static void tulip_tx_timeout(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (media_cap[dev->if_port] & MediaIsMII) {
+ /* Do nothing -- the media monitor should handle this. */
+- if (tulip_debug > 1)
+- printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
+- dev->name);
+- } else if (tp->chip_id == DC21040) {
++ int mii_bmsr = mdio_read(dev, tp->phys[0], 1);
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_WARNING "%s: Transmit timeout using MII device,"
++ " status %4.4x.\n",
++ dev->name, mii_bmsr);
++ if ( ! (mii_bmsr & 0x0004)) { /* No link beat present */
++ dev->trans_start = jiffies;
++ netif_link_down(dev);
++ return;
++ }
++ } else switch (tp->chip_id) {
++ case DC21040:
+ if ( !tp->medialock && inl(ioaddr + CSR12) & 0x0002) {
+ dev->if_port = (dev->if_port == 2 ? 0 : 2);
+ printk(KERN_INFO "%s: transmit timed out, switching to "
+@@ -2405,38 +2555,49 @@
+ select_media(dev, 0);
+ }
+ dev->trans_start = jiffies;
+- return;
+- } else if (tp->chip_id == DC21041) {
++ return; /* Note: not break! */
++ case DC21041: {
+ int csr12 = inl(ioaddr + CSR12);
+
+ printk(KERN_WARNING "%s: 21041 transmit timed out, status %8.8x, "
+ "CSR12 %8.8x, CSR13 %8.8x, CSR14 %8.8x, resetting...\n",
+- dev->name, inl(ioaddr + CSR5), csr12,
+- inl(ioaddr + CSR13), inl(ioaddr + CSR14));
++ dev->name, (int)inl(ioaddr + CSR5), csr12,
++ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14));
+ tp->mediasense = 1;
+ if ( ! tp->medialock) {
+ if (dev->if_port == 1 || dev->if_port == 2)
+- if (csr12 & 0x0004) {
+- dev->if_port = 2 - dev->if_port;
+- } else
+- dev->if_port = 0;
++ dev->if_port = (csr12 & 0x0004) ? 2 - dev->if_port : 0;
+ else
+ dev->if_port = 1;
+ select_media(dev, 0);
+ }
+- } else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
+- || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881) {
+- printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
++ break;
++ }
++ case DC21142:
++ if (tp->nwayset) {
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, "
++ "SIA %8.8x %8.8x %8.8x %8.8x, restarting NWay .\n",
++ dev->name, (int)inl(ioaddr + CSR5),
++ (int)inl(ioaddr + CSR12), (int)inl(ioaddr + CSR13),
++ (int)inl(ioaddr + CSR14), (int)inl(ioaddr + CSR15));
++ nway_start(dev);
++ break;
++ }
++ /* Fall through. */
++ case DC21140: case MX98713: case COMPEX9881:
++ printk(KERN_WARNING "%s: %s transmit timed out, status %8.8x, "
+ "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
+- dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12),
+- inl(ioaddr + CSR13), inl(ioaddr + CSR14), inl(ioaddr + CSR15));
++ dev->name, tulip_tbl[tp->chip_id].chip_name,
++ (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12),
++ (int)inl(ioaddr + CSR13), (int)inl(ioaddr + CSR14),
++ (int)inl(ioaddr + CSR15));
+ if ( ! tp->medialock && tp->mtable) {
+ do
+ --tp->cur_index;
+ while (tp->cur_index >= 0
+ && (media_cap[tp->mtable->mleaf[tp->cur_index].media]
+ & MediaIsFD));
+- if (--tp->cur_index < 0) {
++ if (tp->cur_index < 0) {
+ /* We start again, but should instead look for default. */
+ tp->cur_index = tp->mtable->leafcount - 1;
+ }
+@@ -2444,15 +2605,21 @@
+ printk(KERN_WARNING "%s: transmit timed out, switching to %s "
+ "media.\n", dev->name, medianame[dev->if_port]);
+ }
+- } else {
++ break;
++ case PNIC2:
++ printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
++ "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
++ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR6),
++ (int)inl(ioaddr + CSR7), (int)inl(ioaddr + CSR12));
++ break;
++ default:
+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
+ "%8.8x, resetting...\n",
+- dev->name, inl(ioaddr + CSR5), inl(ioaddr + CSR12));
+- dev->if_port = 0;
++ dev->name, (int)inl(ioaddr + CSR5), (int)inl(ioaddr + CSR12));
+ }
+
+-#if defined(way_too_many_messages)
+- if (tulip_debug > 3) {
++#if defined(way_too_many_messages) && defined(__i386__)
++ if (tp->msg_level & NETIF_MSG_TXERR) {
+ int i;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
+@@ -2478,11 +2645,13 @@
+ }
+ #endif
+
+- /* Stop and restart the chip's Tx processes . */
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ /* Stop and restart the Tx process.
++ The pwr_event approach of empty/init_rings() may be better... */
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
++ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+
+ dev->trans_start = jiffies;
+ tp->stats.tx_errors++;
+@@ -2491,38 +2660,39 @@
+
+
+ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+-static void tulip_init_ring(struct device *dev)
++static void tulip_init_ring(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+- tp->tx_full = 0;
++ tp->rx_dead = tp->tx_full = 0;
+ tp->cur_rx = tp->cur_tx = 0;
+ tp->dirty_rx = tp->dirty_tx = 0;
+- tp->susp_rx = 0;
+- tp->ttimer = 0;
+- tp->nir = 0;
++
++ tp->rx_buf_sz = dev->mtu + 18;
++ if (tp->rx_buf_sz < PKT_BUF_SZ)
++ tp->rx_buf_sz = PKT_BUF_SZ;
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ tp->rx_ring[i].status = 0x00000000;
+- tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
++ tp->rx_ring[i].length = cpu_to_le32(tp->rx_buf_sz);
+ tp->rx_ring[i].buffer2 = virt_to_le32desc(&tp->rx_ring[i+1]);
+ tp->rx_skbuff[i] = NULL;
+ }
+ /* Mark the last entry as wrapping the ring. */
+- tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
++ tp->rx_ring[i-1].length |= cpu_to_le32(DESC_RING_WRAP);
+ tp->rx_ring[i-1].buffer2 = virt_to_le32desc(&tp->rx_ring[0]);
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ /* Note the receive buffer must be longword aligned.
+ dev_alloc_skb() provides 16 byte alignment. But do *not*
+ use skb_reserve() to align the IP header! */
+- struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ);
++ struct sk_buff *skb = dev_alloc_skb(tp->rx_buf_sz);
+ tp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+- tp->rx_ring[i].status = cpu_to_le32(DescOwned); /* Owned by Tulip chip */
++ tp->rx_ring[i].status = cpu_to_le32(DescOwned);
+ tp->rx_ring[i].buffer1 = virt_to_le32desc(skb->tail);
+ }
+ tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+@@ -2538,18 +2708,18 @@
+ }
+
+ static int
+-tulip_start_xmit(struct sk_buff *skb, struct device *dev)
++tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+- int entry;
++ int entry, q_used_cnt;
+ u32 flag;
+
+- /* Block a timer-based transmit from overlapping. This could better be
+- done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start < TX_TIMEOUT)
+- return 1;
+- tulip_tx_timeout(dev);
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tulip_tx_timeout(dev);
+ return 1;
+ }
+
+@@ -2558,15 +2728,16 @@
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = tp->cur_tx % TX_RING_SIZE;
++ q_used_cnt = tp->cur_tx - tp->dirty_tx;
+
+ tp->tx_skbuff[entry] = skb;
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(skb->data);
+
+- if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
++ if (q_used_cnt < TX_QUEUE_LEN/2) {/* Typical path */
+ flag = 0x60000000; /* No interrupt */
+- } else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
++ } else if (q_used_cnt == TX_QUEUE_LEN/2) {
+ flag = 0xe0000000; /* Tx-done intr. */
+- } else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
++ } else if (q_used_cnt < TX_QUEUE_LEN) {
+ flag = 0x60000000; /* No Tx-done intr. */
+ } else { /* Leave room for set_rx_mode() to fill entries. */
+ tp->tx_full = 1;
+@@ -2579,7 +2750,15 @@
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ tp->cur_tx++;
+ if ( ! tp->tx_full)
+- clear_bit(0, (void*)&dev->tbusy);
++ netif_unpause_tx_queue(dev);
++ else {
++ netif_stop_tx_queue(dev);
++ /* Check for a just-cleared queue race.
++ Note that this code path differs from other drivers because we
++ set the tx_full flag early. */
++ if ( ! tp->tx_full)
++ netif_resume_tx_queue(dev);
++ }
+
+ dev->trans_start = jiffies;
+ /* Trigger an immediate transmit demand. */
+@@ -2592,55 +2771,26 @@
+ after the Tx thread. */
+ static void tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+ {
+- struct device *dev = (struct device *)dev_instance;
++ struct net_device *dev = (struct net_device *)dev_instance;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+- int csr5;
+- int entry;
+- int missed;
+- int rx = 0;
+- int tx = 0;
+- int oi = 0;
+- int maxrx = RX_RING_SIZE;
+- int maxtx = TX_RING_SIZE;
+- int maxoi = TX_RING_SIZE;
+-
+-#if defined(__i386__) && defined(SMP_CHECK)
+- if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+- printk(KERN_ERR "%s: Duplicate entry of the interrupt handler by "
+- "processor %d.\n",
+- dev->name, hard_smp_processor_id());
+- dev->interrupt = 0;
+- return;
+- }
+-#else
+- if (dev->interrupt) {
+- printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+- return;
+- }
+- dev->interrupt = 1;
+-#endif
+-
+- tp->nir++;
++ int csr5, work_budget = tp->max_interrupt_work;
+
+ do {
+ csr5 = inl(ioaddr + CSR5);
+- /* Acknowledge all of the current interrupt sources ASAP. */
+- outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+-
+- if (tulip_debug > 4)
+- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+- dev->name, csr5, inl(dev->base_addr + CSR5));
+-
+ if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
+ break;
+
+- if (csr5 & (RxIntr | RxNoBuf)) {
+- rx += tulip_rx(dev);
+- tulip_refill_rx(dev);
+- }
++ if (tp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
++ dev->name, csr5, (int)inl(dev->base_addr + CSR5));
++ /* Acknowledge all of the current interrupt sources ASAP. */
++ outl(csr5 & 0x0001ffff, ioaddr + CSR5);
++
++ if (csr5 & (RxIntr | RxNoBuf))
++ work_budget -= tulip_rx(dev);
+
+- if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
++ if (csr5 & (TxNoBuf | TxDied | TxIntr)) {
+ unsigned int dirty_tx;
+
+ for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+@@ -2653,14 +2803,12 @@
+ /* Check for Rx filter setup frames. */
+ if (tp->tx_skbuff[entry] == NULL)
+ continue;
+-
++
+ if (status & 0x8000) {
+ /* There was an major error, log it. */
+-#ifndef final_version
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+ dev->name, status);
+-#endif
+ tp->stats.tx_errors++;
+ if (status & 0x4104) tp->stats.tx_aborted_errors++;
+ if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+@@ -2672,6 +2820,9 @@
+ if (status & 0x0100) tp->stats.collisions16++;
+ #endif
+ } else {
++ if (tp->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit complete, status "
++ "%8.8x.\n", dev->name, status);
+ #ifdef ETHER_STATS
+ if (status & 0x0001) tp->stats.tx_deferred++;
+ #endif
+@@ -2683,9 +2834,8 @@
+ }
+
+ /* Free the original skb. */
+- dev_free_skb(tp->tx_skbuff[entry]);
++ dev_free_skb_irq(tp->tx_skbuff[entry]);
+ tp->tx_skbuff[entry] = 0;
+- tx++;
+ }
+
+ #ifndef final_version
+@@ -2696,22 +2846,22 @@
+ }
+ #endif
+
+- if (tp->tx_full && dev->tbusy
+- && tp->cur_tx - dirty_tx < TX_RING_SIZE - 2) {
++ if (tp->tx_full && tp->cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
+ /* The ring is no longer full, clear tbusy. */
+ tp->tx_full = 0;
+- dev->tbusy = 0;
+- netif_wake_queue(dev);
++ netif_resume_tx_queue(dev);
+ }
+
+ tp->dirty_tx = dirty_tx;
+- if (csr5 & TxDied) {
+- if (tulip_debug > 2)
+- printk(KERN_WARNING "%s: The transmitter stopped."
+- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+- dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ }
++
++ if (tp->rx_dead) {
++ tulip_rx(dev);
++ if (tp->cur_rx - tp->dirty_rx < RX_RING_SIZE - 3) {
++ printk(KERN_ERR "%s: Restarted Rx at %d / %d.\n",
++ dev->name, tp->cur_rx, tp->dirty_rx);
++ outl(0, ioaddr + CSR2); /* Rx poll demand */
++ tp->rx_dead = 0;
+ }
+ }
+
+@@ -2720,130 +2870,98 @@
+ if (csr5 == 0xffffffff)
+ break;
+ if (csr5 & TxJabber) tp->stats.tx_errors++;
++ if (csr5 & PCIBusError) {
++ printk(KERN_ERR "%s: PCI Fatal Bus Error, %8.8x.\n",
++ dev->name, csr5);
++ }
+ if (csr5 & TxFIFOUnderflow) {
+ if ((tp->csr6 & 0xC000) != 0xC000)
+ tp->csr6 += 0x4000; /* Bump up the Tx threshold */
+ else
+ tp->csr6 |= 0x00200000; /* Store-n-forward. */
++ if (tp->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_WARNING "%s: Tx threshold increased, "
++ "new CSR6 %x.\n", dev->name, tp->csr6);
++ }
++ if (csr5 & TxDied) {
++ /* This is normal when changing Tx modes. */
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk(KERN_WARNING "%s: The transmitter stopped."
++ " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
++ dev->name, csr5, (int)inl(ioaddr + CSR6), tp->csr6);
++ }
++ if (csr5 & (TxDied | TxFIFOUnderflow | PCIBusError)) {
+ /* Restart the transmit process. */
+- outl(tp->csr6 | 0x0002, ioaddr + CSR6);
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
+- outl(0, ioaddr + CSR1);
++ outl(tp->csr6 | RxOn, ioaddr + CSR6);
++ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
+ }
+- if (csr5 & RxDied) { /* Missed a Rx frame. */
+- tp->stats.rx_errors++;
++ if (csr5 & (RxStopped | RxNoBuf)) {
++ /* Missed a Rx frame or mode change. */
+ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+- outl(tp->csr6 | 0x2002, ioaddr + CSR6);
++ if (tp->flags & COMET_MAC_ADDR) {
++ outl(tp->mc_filter[0], ioaddr + 0xAC);
++ outl(tp->mc_filter[1], ioaddr + 0xB0);
++ }
++ tulip_rx(dev);
++ if (csr5 & RxNoBuf)
++ tp->rx_dead = 1;
++ outl(tp->csr6 | RxOn | TxOn, ioaddr + CSR6);
++ }
++ if (csr5 & TimerInt) {
++ if (tp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
++ dev->name, csr5);
++ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+ }
+ if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
+ if (tp->link_change)
+ (tp->link_change)(dev, csr5);
+ }
+- if (csr5 & SytemError) {
+- printk(KERN_ERR "%s: (%lu) System Error occured\n", dev->name, tp->nir);
+- }
+ /* Clear all error sources, included undocumented ones! */
+ outl(0x0800f7ba, ioaddr + CSR5);
+- oi++;
+ }
+- if (csr5 & TimerInt) {
+-#if 0
+- if (tulip_debug > 2)
+- printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+- dev->name, csr5);
+- outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+-#endif
+- tp->ttimer = 0;
+- oi++;
+- }
+- if (tx > maxtx || rx > maxrx || oi > maxoi) {
+- if (tulip_debug > 1)
++ if (--work_budget < 0) {
++ if (tp->msg_level & NETIF_MSG_DRV)
+ printk(KERN_WARNING "%s: Too much work during an interrupt, "
+- "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
++ "csr5=0x%8.8x.\n", dev->name, csr5);
+ /* Acknowledge all interrupt sources. */
+-#if 0
+- /* Clear all interrupting sources, set timer to re-enable. */
+- outl(((~csr5) & 0x0001ebef) | NormalIntr | AbnormalIntr | TimerInt,
+- ioaddr + CSR7);
+- outl(12, ioaddr + CSR11);
+- tp->ttimer = 1;
+-#endif
++ outl(0x8001ffff, ioaddr + CSR5);
++ if (tp->flags & HAS_INTR_MITIGATION) {
++ /* Josip Loncaric at ICASE did extensive experimentation
++ to develop a good interrupt mitigation setting.*/
++ outl(0x8b240000, ioaddr + CSR11);
++ } else {
++ /* Mask all interrupting sources, set timer to re-enable. */
++ outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt,
++ ioaddr + CSR7);
++ outl(0x0012, ioaddr + CSR11);
++ }
+ break;
+ }
+ } while (1);
+
+- tulip_refill_rx(dev);
+-
+- /* check if we card is in suspend mode */
+- entry = tp->dirty_rx % RX_RING_SIZE;
+- if (tp->rx_skbuff[entry] == NULL) {
+- if (tulip_debug > 1)
+- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+- if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
+- if (tulip_debug > 1)
+- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+- outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+- ioaddr + CSR7);
+- outl(TimerInt, ioaddr + CSR5);
+- outl(12, ioaddr + CSR11);
+- tp->ttimer = 1;
+- }
+- }
+-
+- if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
+- tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+- }
+-
+- if (tulip_debug > 4)
++ if (tp->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+- dev->name, inl(ioaddr + CSR5));
++ dev->name, (int)inl(ioaddr + CSR5));
+
+-#if defined(__i386__)
+- clear_bit(0, (void*)&dev->interrupt);
+-#else
+- dev->interrupt = 0;
+-#endif
+ return;
+ }
+
+-static int tulip_refill_rx(struct device *dev)
+-{
+- struct tulip_private *tp = (struct tulip_private *)dev->priv;
+- int entry;
+- int refilled = 0;
+-
+- /* Refill the Rx ring buffers. */
+- for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+- entry = tp->dirty_rx % RX_RING_SIZE;
+- if (tp->rx_skbuff[entry] == NULL) {
+- struct sk_buff *skb;
+- skb = tp->rx_skbuff[entry] = dev_alloc_skb(PKT_BUF_SZ);
+- if (skb == NULL)
+- break;
+- skb->dev = dev; /* Mark as being used by this device. */
+- tp->rx_ring[entry].buffer1 = virt_to_le32desc(skb->tail);
+- refilled++;
+- }
+- tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+- }
+- return refilled;
+-}
+-
+-static int tulip_rx(struct device *dev)
++static int tulip_rx(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int entry = tp->cur_rx % RX_RING_SIZE;
+ int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+- int received = 0;
++ int work_done = 0;
+
+- if (tulip_debug > 4)
++ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+ tp->rx_ring[entry].status);
+ /* If we own the next entry, it is a new packet. Send it up. */
+ while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+ s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+- if (tulip_debug > 5)
++ if (tp->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ dev->name, entry, status);
+ if (--rx_work_limit < 0)
+@@ -2852,7 +2970,7 @@
+ if ((status & 0x38000300) != 0x0300) {
+ /* Ingore earlier buffers. */
+ if ((status & 0xffff) != 0x7fff) {
+- if (tulip_debug > 1)
++ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_WARNING "%s: Oversized Ethernet frame "
+ "spanned multiple buffers, status %8.8x!\n",
+ dev->name, status);
+@@ -2860,7 +2978,7 @@
+ }
+ } else if (status & RxDescFatalErr) {
+ /* There was a fatal error. */
+- if (tulip_debug > 2)
++ if (tp->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ dev->name, status);
+ tp->stats.rx_errors++; /* end of a packet.*/
+@@ -2884,28 +3002,21 @@
+ #endif
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+- if (pkt_len < rx_copybreak
++ if (pkt_len < tp->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+-#if ! defined(__alpha__)
++#if (LINUX_VERSION_CODE >= 0x20100)
+ eth_copy_and_sum(skb, tp->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ #else
+ memcpy(skb_put(skb, pkt_len), tp->rx_skbuff[entry]->tail,
+ pkt_len);
+ #endif
+- } else { /* Pass up the skb already on the Rx ring. */
+- char *temp = skb_put(skb = tp->rx_skbuff[entry], pkt_len);
++ work_done++;
++ } else { /* Pass up the skb already on the Rx ring. */
++ skb_put(skb = tp->rx_skbuff[entry], pkt_len);
+ tp->rx_skbuff[entry] = NULL;
+-#ifndef final_version
+- if (le32desc_to_virt(tp->rx_ring[entry].buffer1) != temp)
+- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+- "do not match in tulip_rx: %p vs. %p / %p.\n",
+- dev->name,
+- le32desc_to_virt(tp->rx_ring[entry].buffer1),
+- skb->head, temp);
+-#endif
+ }
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+@@ -2915,43 +3026,36 @@
+ tp->stats.rx_bytes += pkt_len;
+ #endif
+ }
+- received++;
+ entry = (++tp->cur_rx) % RX_RING_SIZE;
+ }
+
+- return received;
++ /* Refill the Rx ring buffers. */
++ for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
++ entry = tp->dirty_rx % RX_RING_SIZE;
++ if (tp->rx_skbuff[entry] == NULL) {
++ struct sk_buff *skb;
++ skb = tp->rx_skbuff[entry] = dev_alloc_skb(tp->rx_buf_sz);
++ if (skb == NULL) {
++ if (tp->cur_rx - tp->dirty_rx == RX_RING_SIZE)
++ printk(KERN_ERR "%s: No kernel memory to allocate "
++ "receive buffers.\n", dev->name);
++ break;
++ }
++ skb->dev = dev; /* Mark as being used by this device. */
++ tp->rx_ring[entry].buffer1 = virt_to_le32desc(skb->tail);
++ work_done++;
++ }
++ tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
++ }
++
++ return work_done;
+ }
+
+-static int tulip_close(struct device *dev)
++static void empty_rings(struct net_device *dev)
+ {
+- long ioaddr = dev->base_addr;
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- dev->tbusy = 1;
+-
+- if (tulip_debug > 1)
+- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+- dev->name, inl(ioaddr + CSR5));
+-
+- /* Disable interrupts by clearing the interrupt mask. */
+- outl(0x00000000, ioaddr + CSR7);
+- /* Stop the Tx and Rx processes. */
+- outl(inl(ioaddr + CSR6) & ~0x2002, ioaddr + CSR6);
+- /* 21040 -- Leave the card in 10baseT state. */
+- if (tp->chip_id == DC21040)
+- outl(0x00000004, ioaddr + CSR13);
+-
+- if (inl(ioaddr + CSR6) != 0xffffffff)
+- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
+-
+- del_timer(&tp->timer);
+-
+- free_irq(dev->irq, dev);
+-
+- dev->if_port = tp->saved_if_port;
+-
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = tp->rx_skbuff[i];
+@@ -2971,86 +3075,165 @@
+ dev_free_skb(tp->tx_skbuff[i]);
+ tp->tx_skbuff[i] = 0;
+ }
++}
++
++static int tulip_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct tulip_private *tp = (struct tulip_private *)dev->priv;
++
++ netif_stop_tx_queue(dev);
++
++ if (tp->msg_level & NETIF_MSG_IFDOWN)
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
++ dev->name, (int)inl(ioaddr + CSR5));
+
++ /* Disable interrupts by clearing the interrupt mask. */
++ outl(0x00000000, ioaddr + CSR7);
++ /* Stop the Tx and Rx processes. */
++ outl(inl(ioaddr + CSR6) & ~TxOn & ~RxOn, ioaddr + CSR6);
++ /* 21040 -- Leave the card in 10baseT state. */
++ if (tp->chip_id == DC21040)
++ outl(0x00000004, ioaddr + CSR13);
++
++ if (inl(ioaddr + CSR6) != 0xffffffff)
++ tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
++
++ del_timer(&tp->timer);
++
++ free_irq(dev->irq, dev);
++
++ dev->if_port = tp->saved_if_port;
++
++ empty_rings(dev);
+ /* Leave the driver in snooze, not sleep, mode. */
+ if (tp->flags & HAS_PWRDWN)
+- pcibios_write_config_dword(tp->pci_bus, tp->pci_devfn, 0x40,
+- 0x40000000);
++ pci_write_config_dword(tp->pci_dev, 0x40, 0x40000000);
+
+ MOD_DEC_USE_COUNT;
+
+ return 0;
+ }
+
+-static struct net_device_stats *tulip_get_stats(struct device *dev)
++static struct net_device_stats *tulip_get_stats(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
++ int csr8 = inl(ioaddr + CSR8);
+
+- if (dev->start)
+- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
++ if (netif_running(dev) && csr8 != 0xffffffff)
++ tp->stats.rx_missed_errors += (u16)csr8;
+
+ return &tp->stats;
+ }
+
+ #ifdef HAVE_PRIVATE_IOCTL
+-/* Provide ioctl() calls to examine the MII xcvr state. */
+-static int private_ioctl(struct device *dev, struct ifreq *rq, int cmd)
++/* Provide ioctl() calls to examine the MII xcvr state.
++ We emulate a MII management registers for chips without MII.
++ The two numeric constants are because some clueless person
++ changed value for the symbolic name.
++ */
++static int private_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ u16 *data = (u16 *)&rq->ifr_data;
+- int phy = tp->phys[0] & 0x1f;
+- long flags;
++ u32 *data32 = (void *)&rq->ifr_data;
++ unsigned int phy = tp->phys[0];
++ unsigned int regnum = data[1];
+
+ switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
+ if (tp->mii_cnt)
+ data[0] = phy;
+- else if (tp->flags & HAS_NWAY143)
++ else if (tp->flags & HAS_NWAY)
+ data[0] = 32;
+ else if (tp->chip_id == COMET)
+ data[0] = 1;
+ else
+ return -ENODEV;
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+- if (data[0] == 32 && (tp->flags & HAS_NWAY143)) {
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
+ int csr12 = inl(ioaddr + CSR12);
+ int csr14 = inl(ioaddr + CSR14);
+- switch (data[1]) {
+- case 0: {
+- data[3] = (csr14<<5) & 0x1000;
+- break; }
++ switch (regnum) {
++ case 0:
++ if (((csr14<<5) & 0x1000) ||
++ (dev->if_port == 5 && tp->nwayset))
++ data[3] = 0x1000;
++ else
++ data[3] = (media_cap[dev->if_port]&MediaIs100 ? 0x2000 : 0)
++ | (media_cap[dev->if_port]&MediaIsFD ? 0x0100 : 0);
++ break;
+ case 1:
+- data[3] = 0x7848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0)
+- + (csr12&0x06 ? 0x04 : 0);
++ data[3] = 0x1848 + ((csr12&0x7000) == 0x5000 ? 0x20 : 0)
++ + ((csr12&0x06) == 6 ? 0 : 4);
++ if (tp->chip_id != DC21041)
++ data[3] |= 0x6048;
+ break;
+ case 4: {
+- data[3] = ((csr14>>9)&0x07C0) +
+- ((inl(ioaddr + CSR6)>>3)&0x0040) + ((csr14>>1)&0x20) + 1;
++ /* Advertised value, bogus 10baseTx-FD value from CSR6. */
++ data[3] = ((inl(ioaddr + CSR6)>>3)&0x0040)+((csr14>>1)&0x20)+1;
++ if (tp->chip_id != DC21041)
++ data[3] |= ((csr14>>9)&0x03C0);
+ break;
+ }
+- case 5: data[3] = csr12 >> 16; break;
++ case 5: data[3] = tp->lpar; break;
+ default: data[3] = 0; break;
+ }
+ } else {
+- save_flags(flags);
+- cli();
+- data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+- restore_flags(flags);
++ data[3] = mdio_read(dev, data[0] & 0x1f, regnum);
+ }
+ return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+- if (data[0] == 32 && (tp->flags & HAS_NWAY143)) {
+- if (data[1] == 5)
+- tp->to_advertise = data[2];
++ if (regnum & ~0x1f)
++ return -EINVAL;
++ if (data[0] == phy) {
++ u16 value = data[2];
++ switch (regnum) {
++ case 0: /* Check for autonegotiation on or reset. */
++ tp->full_duplex_lock = (value & 0x9000) ? 0 : 1;
++ if (tp->full_duplex_lock)
++ tp->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: tp->mii_advertise = data[2]; break;
++ }
++ }
++ if (data[0] == 32 && (tp->flags & HAS_NWAY)) {
++ u16 value = data[2];
++ if (regnum == 0) {
++ if ((value & 0x1200) == 0x1200)
++ nway_start(dev);
++ } else if (regnum == 4)
++ tp->sym_advertise = value;
+ } else {
+- save_flags(flags);
+- cli();
+- mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+- restore_flags(flags);
++ mdio_write(dev, data[0] & 0x1f, regnum, data[2]);
++ }
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = tp->msg_level;
++ data32[1] = tp->multicast_filter_limit;
++ data32[2] = tp->max_interrupt_work;
++ data32[3] = tp->rx_copybreak;
++ data32[4] = inl(ioaddr + CSR11);
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ tp->msg_level = data32[0];
++ tp->multicast_filter_limit = data32[1];
++ tp->max_interrupt_work = data32[2];
++ tp->rx_copybreak = data32[3];
++ if (tp->flags & HAS_INTR_MITIGATION) {
++ u32 *d = (u32 *)&rq->ifr_data;
++ outl(data32[4], ioaddr + CSR11);
++ printk(KERN_NOTICE "%s: Set interrupt mitigate paramters %8.8x.\n",
++ dev->name, d[0]);
+ }
+ return 0;
+ default:
+@@ -3089,19 +3272,19 @@
+ static unsigned const ethernet_polynomial = 0x04c11db7U;
+ static inline u32 ether_crc(int length, unsigned char *data)
+ {
+- int crc = -1;
++ int crc = -1;
+
+- while(--length >= 0) {
++ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1)
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+- }
+- return crc;
++ }
++ return crc;
+ }
+
+-static void set_rx_mode(struct device *dev)
++static void set_rx_mode(struct net_device *dev)
+ {
+ struct tulip_private *tp = (struct tulip_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -3109,37 +3292,56 @@
+
+ tp->csr6 &= ~0x00D5;
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+- tp->csr6 |= 0x00C0;
+- csr6 |= 0x00C0;
++ tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
++ csr6 |= AcceptAllMulticast | AcceptAllPhys;
+ /* Unconditionally log net taps. */
+ printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
+- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
++ } else if ((dev->mc_count > tp->multicast_filter_limit) ||
++ (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well -- accept all multicasts. */
+- tp->csr6 |= 0x0080;
+- csr6 |= 0x0080;
++ tp->csr6 |= AcceptAllMulticast;
++ csr6 |= AcceptAllMulticast;
+ } else if (tp->flags & MC_HASH_ONLY) {
+ /* Some work-alikes have only a 64-entry hash filter table. */
+ /* Should verify correctness on big-endian/__powerpc__ */
+ struct dev_mc_list *mclist;
+ int i;
+- u32 mc_filter[2]; /* Multicast hash filter */
+- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+- tp->csr6 |= 0x0080;
+- csr6 |= 0x0080;
++ if (dev->mc_count > tp->multicast_filter_limit) {
++ tp->csr6 |= AcceptAllMulticast;
++ csr6 |= AcceptAllMulticast;
+ } else {
+- mc_filter[1] = mc_filter[0] = 0;
++ u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
++ int filterbit;
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+- i++, mclist = mclist->next)
+- set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, mc_filter);
+- if (tp->chip_id == AX88140) {
++ i++, mclist = mclist->next) {
++ if (tp->flags & COMET_MAC_ADDR)
++ filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
++ else
++ filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
++ filterbit &= 0x3f;
++ set_bit(filterbit, mc_filter);
++ if (tp->msg_level & NETIF_MSG_RXFILTER)
++ printk(KERN_INFO "%s: Added filter for %2.2x:%2.2x:%2.2x:"
++ "%2.2x:%2.2x:%2.2x %8.8x bit %d.\n", dev->name,
++ mclist->dmi_addr[0], mclist->dmi_addr[1],
++ mclist->dmi_addr[2], mclist->dmi_addr[3],
++ mclist->dmi_addr[4], mclist->dmi_addr[5],
++ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
++ }
++ if (mc_filter[0] == tp->mc_filter[0] &&
++ mc_filter[1] == tp->mc_filter[1])
++ ; /* No change. */
++ else if (tp->flags & IS_ASIX) {
+ outl(2, ioaddr + CSR13);
+ outl(mc_filter[0], ioaddr + CSR14);
+ outl(3, ioaddr + CSR13);
+ outl(mc_filter[1], ioaddr + CSR14);
+- } else if (tp->chip_id == COMET) { /* Has a simple hash filter. */
++ } else if (tp->flags & COMET_MAC_ADDR) {
+ outl(mc_filter[0], ioaddr + 0xAC);
+ outl(mc_filter[1], ioaddr + 0xB0);
+ }
++ tp->mc_filter[0] = mc_filter[0];
++ tp->mc_filter[1] = mc_filter[1];
+ }
+ } else {
+ u16 *eaddrs, *setup_frm = tp->setup_frame;
+@@ -3153,14 +3355,16 @@
+ u16 hash_table[32];
+ tx_flags = 0x08400000 | 192; /* Use hash filter. */
+ memset(hash_table, 0, sizeof(hash_table));
+- set_bit(255, hash_table); /* Broadcast entry */
++ set_bit(255, hash_table); /* Broadcast entry */
+ /* This should work on big-endian machines as well. */
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next)
+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
+ hash_table);
+- for (i = 0; i < 32; i++)
+- *setup_frm++ = *setup_frm++ = hash_table[i];
++ for (i = 0; i < 32; i++) {
++ *setup_frm++ = hash_table[i];
++ *setup_frm++ = hash_table[i];
++ }
+ setup_frm = &tp->setup_frame[13*6];
+ } else {
+ /* We have <= 14 addresses so we can use the wonderful
+@@ -3168,9 +3372,9 @@
+ for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ eaddrs = (u16 *)mclist->dmi_addr;
+- *setup_frm++ = *setup_frm++ = *eaddrs++;
+- *setup_frm++ = *setup_frm++ = *eaddrs++;
+- *setup_frm++ = *setup_frm++ = *eaddrs++;
++ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
++ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
++ *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
+ }
+ /* Fill the unused entries with the broadcast address. */
+ memset(setup_frm, 0xff, (15-i)*12);
+@@ -3178,9 +3382,9 @@
+ }
+ /* Fill the final entry with our physical address. */
+ eaddrs = (u16 *)dev->dev_addr;
+- *setup_frm++ = *setup_frm++ = eaddrs[0];
+- *setup_frm++ = *setup_frm++ = eaddrs[1];
+- *setup_frm++ = *setup_frm++ = eaddrs[2];
++ *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
++ *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
++ *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+ /* Now add this frame to the Tx list. */
+ if (tp->cur_tx - tp->dirty_tx > TX_RING_SIZE - 2) {
+ /* Same setup recently queued, we need not add it. */
+@@ -3188,14 +3392,14 @@
+ unsigned long flags;
+ unsigned int entry;
+
+- save_flags(flags); cli();
++ spin_lock_irqsave(&tp->mii_lock, flags);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+
+ if (entry != 0) {
+ /* Avoid a chip errata by prefixing a dummy entry. */
+ tp->tx_skbuff[entry] = 0;
+ tp->tx_ring[entry].length =
+- (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
++ (entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP):0;
+ tp->tx_ring[entry].buffer1 = 0;
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ entry = tp->cur_tx++ % TX_RING_SIZE;
+@@ -3209,35 +3413,145 @@
+ tp->tx_ring[entry].buffer1 = virt_to_le32desc(tp->setup_frame);
+ tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+ if (tp->cur_tx - tp->dirty_tx >= TX_RING_SIZE - 2) {
+- set_bit(0, (void*)&dev->tbusy);
++ netif_stop_tx_queue(dev);
+ tp->tx_full = 1;
+ }
+- restore_flags(flags);
++ spin_unlock_irqrestore(&tp->mii_lock, flags);
+ /* Trigger an immediate transmit demand. */
+ outl(0, ioaddr + CSR1);
+ }
+ }
+- outl(csr6 | 0x0000, ioaddr + CSR6);
++ outl(csr6, ioaddr + CSR6);
+ }
+
++
++static int tulip_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct tulip_private *tp = (struct tulip_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ if (tp->msg_level & NETIF_MSG_LINK)
++ printk("%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND: {
++ int csr6 = inl(ioaddr + CSR6);
++ /* Disable interrupts, stop the chip, gather stats. */
++ if (csr6 != 0xffffffff) {
++ int csr8 = inl(ioaddr + CSR8);
++ outl(0x00000000, ioaddr + CSR7);
++ outl(csr6 & ~TxOn & ~RxOn, ioaddr + CSR6);
++ tp->stats.rx_missed_errors += (unsigned short)csr8;
++ }
++ empty_rings(dev);
++ /* Put the 21143 into sleep mode. */
++ if (tp->flags & HAS_PWRDWN)
++ pci_write_config_dword(tp->pci_dev, 0x40,0x80000000);
++ break;
++ }
++ case DRV_RESUME:
++ if (tp->flags & HAS_PWRDWN)
++ pci_write_config_dword(tp->pci_dev, 0x40, 0x0000);
++ outl(tp->csr0, ioaddr + CSR0);
++ tulip_init_ring(dev);
++ outl(virt_to_bus(tp->rx_ring), ioaddr + CSR3);
++ outl(virt_to_bus(tp->tx_ring), ioaddr + CSR4);
++ if (tp->mii_cnt) {
++ dev->if_port = 11;
++ if (tp->mtable && tp->mtable->has_mii)
++ select_media(dev, 1);
++ tp->csr6 = 0x820E0000;
++ dev->if_port = 11;
++ outl(0x0000, ioaddr + CSR13);
++ outl(0x0000, ioaddr + CSR14);
++ } else if (! tp->medialock)
++ nway_start(dev);
++ else
++ select_media(dev, 1);
++ outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
++ outl(tp->csr6 | TxOn | RxOn, ioaddr + CSR6);
++ outl(0, ioaddr + CSR2); /* Rx poll demand */
++ set_rx_mode(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ printk(KERN_ERR "%s: Tulip CardBus interface was detached while "
++ "still active.\n", dev->name);
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ if (tp->msg_level & NETIF_MSG_DRV)
++ printk(KERN_DEBUG "%s: Unregistering device.\n", dev->name);
++ unregister_netdev(dev);
++#ifdef USE_IO_OPS
++ release_region(dev->base_addr, pci_id_tbl[tp->chip_id].io_size);
++#else
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_tulip_dev; *devp; devp = next) {
++ next = &((struct tulip_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (tp->priv_addr)
++ kfree(tp->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ default:
++ break;
++ }
++
++ return 0;
++}
++
+ #ifdef CARDBUS
+
+ #include <pcmcia/driver_ops.h>
+
+ static dev_node_t *tulip_attach(dev_locator_t *loc)
+ {
+- struct device *dev;
+- u16 dev_id;
+- u32 io;
++ struct net_device *dev;
++ long ioaddr;
++ struct pci_dev *pdev;
+ u8 bus, devfn, irq;
++ u32 dev_id;
++ u32 pciaddr;
++ int i, chip_id = 4; /* DC21143 */
+
+ if (loc->bus != LOC_PCI) return NULL;
+ bus = loc->b.pci.bus; devfn = loc->b.pci.devfn;
+ printk(KERN_INFO "tulip_attach(bus %d, function %d)\n", bus, devfn);
+- pcibios_read_config_dword(bus, devfn, PCI_BASE_ADDRESS_0, &io);
+- pcibios_read_config_word(bus, devfn, PCI_DEVICE_ID, &dev_id);
+- pcibios_read_config_byte(bus, devfn, PCI_INTERRUPT_LINE, &irq);
+- dev = tulip_probe1(bus, devfn, NULL, io & ~3, irq, DC21142, 0);
++ pdev = pci_find_slot(bus, devfn);
++#ifdef USE_IO_OPS
++ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &pciaddr);
++ ioaddr = pciaddr & PCI_BASE_ADDRESS_IO_MASK;
++#else
++ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &pciaddr);
++ ioaddr = (long)ioremap(pciaddr & PCI_BASE_ADDRESS_MEM_MASK,
++ pci_id_tbl[DC21142].io_size);
++#endif
++ pci_read_config_dword(pdev, 0, &dev_id);
++ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &irq);
++ if (ioaddr == 0 || irq == 0) {
++ printk(KERN_ERR "The Tulip CardBus Ethernet interface at %d/%d was "
++ "not assigned an %s.\n"
++ KERN_ERR " It will not be activated.\n",
++ bus, devfn, ioaddr == 0 ? "address" : "IRQ");
++ return NULL;
++ }
++ for (i = 0; pci_id_tbl[i].id.pci; i++) {
++ if (pci_id_tbl[i].id.pci == (dev_id & pci_id_tbl[i].id.pci_mask)) {
++ chip_id = i; break;
++ }
++ }
++ dev = tulip_probe1(pdev, NULL, ioaddr, irq, chip_id, 0);
+ if (dev) {
+ dev_node_t *node = kmalloc(sizeof(dev_node_t), GFP_KERNEL);
+ strcpy(node->dev_name, dev->name);
+@@ -3251,54 +3565,49 @@
+
+ static void tulip_suspend(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_suspend(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+- if (strcmp((*devp)->name, node->dev_name) == 0) break;
+- }
+- if (*devp) {
+- long ioaddr = (*devp)->base_addr;
+- struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+- int csr6 = inl(ioaddr + CSR6);
+- /* Disable interrupts, stop the chip, gather stats. */
+- if (csr6 != 0xffffffff) {
+- outl(0x00000000, ioaddr + CSR7);
+- outl(csr6 & ~0x2002, ioaddr + CSR6);
+- tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
++ if (strcmp((*devp)->name, node->dev_name) == 0) {
++ tulip_pwr_event(*devp, DRV_SUSPEND);
++ break;
+ }
+- tulip_close(*devp);
+- /* Put the 21143 into sleep mode. */
+- pcibios_write_config_dword(tp->pci_bus,tp->pci_devfn, 0x40,0x80000000);
+ }
+ }
+
+ static void tulip_resume(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_resume(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+- if (strcmp((*devp)->name, node->dev_name) == 0) break;
+- }
+- if (*devp) {
+- struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+- pcibios_write_config_dword(tp->pci_bus, tp->pci_devfn, 0x40, 0x0000);
+- tulip_open(*devp);
++ if (strcmp((*devp)->name, node->dev_name) == 0) {
++ tulip_pwr_event(*devp, DRV_RESUME);
++ break;
++ }
+ }
+ }
+
+ static void tulip_detach(dev_node_t *node)
+ {
+- struct device **devp, **next;
++ struct net_device **devp, **next;
+ printk(KERN_INFO "tulip_detach(%s)\n", node->dev_name);
+ for (devp = &root_tulip_dev; *devp; devp = next) {
+ next = &((struct tulip_private *)(*devp)->priv)->next_module;
+ if (strcmp((*devp)->name, node->dev_name) == 0) break;
+ }
+ if (*devp) {
++ struct tulip_private *tp = (struct tulip_private *)(*devp)->priv;
+ unregister_netdev(*devp);
++#ifdef USE_IO_OPS
++ release_region((*devp)->base_addr, pci_id_tbl[DC21142].io_size);
++#else
++ iounmap((char *)(*devp)->base_addr);
++#endif
+ kfree(*devp);
++ if (tp->priv_addr)
++ kfree(tp->priv_addr);
+ *devp = *next;
+ kfree(node);
+ MOD_DEC_USE_COUNT;
+@@ -3315,42 +3624,60 @@
+ #ifdef MODULE
+ int init_module(void)
+ {
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ #ifdef CARDBUS
+- reverse_probe = 0; /* Not used. */
+ register_driver(&tulip_ops);
+ return 0;
+ #else
+- return tulip_probe(NULL);
++ return pci_drv_register(&tulip_drv_id, NULL);
+ #endif
++ reverse_probe = 0; /* Not used. */
+ }
+
+ void cleanup_module(void)
+ {
+- struct device *next_dev;
++ struct net_device *next_dev;
+
+ #ifdef CARDBUS
+ unregister_driver(&tulip_ops);
++#else
++ pci_drv_unregister(&tulip_drv_id);
+ #endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_tulip_dev) {
+- struct tulip_private *tp = (struct tulip_private *)root_tulip_dev->priv;
+- next_dev = tp->next_module;
++ struct tulip_private *tp = (struct tulip_private*)root_tulip_dev->priv;
+ unregister_netdev(root_tulip_dev);
++#ifdef USE_IO_OPS
+ release_region(root_tulip_dev->base_addr,
+- tulip_tbl[tp->chip_id].io_size);
++ pci_id_tbl[tp->chip_id].io_size);
++#else
++ iounmap((char *)root_tulip_dev->base_addr);
++#endif
++ next_dev = tp->next_module;
++ if (tp->priv_addr)
++ kfree(tp->priv_addr);
+ kfree(root_tulip_dev);
+ root_tulip_dev = next_dev;
+ }
+ }
+-
++#else
++int tulip_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&tulip_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++ reverse_probe = 0; /* Not used. */
++}
+ #endif /* MODULE */
+
+ /*
+ * Local variables:
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c tulip.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c tulip.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -D__KERNEL__ -Wall -Wstrict-prototypes -O6 -c tulip.c -o tulip_cb.o -I/usr/src/pcmcia-cs-3.0.9/include/"
++ * compile-command: "make KERNVER=`uname -r` tulip.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c"
++ * cardbus-compile-command: "gcc -DCARDBUS -DMODULE -Wall -Wstrict-prototypes -O6 -c tulip.c -o tulip_cb.o -I/usr/src/pcmcia/include/"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/via-rhine.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/via-rhine.c,v
+retrieving revision 1.1
+diff -u -r1.1 via-rhine.c
+--- linux/src/drivers/net/via-rhine.c 26 Apr 1999 05:52:45 -0000 1.1
++++ linux/src/drivers/net/via-rhine.c 20 Aug 2004 10:32:54 -0000
+@@ -1,35 +1,54 @@
+ /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+ /*
+- Written 1998 by Donald Becker.
++ Written 1998-2003 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License (GPL), incorporated herein by reference.
+- Drivers derived from this code also fall under the GPL and must retain
+- this authorship and copyright notice.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is designed for the VIA VT86c100A Rhine-II PCI Fast Ethernet
+ controller. It also works with the older 3043 Rhine-I chip.
+
+- The author may be reached as becker@cesdis.edu, or
+- Donald Becker
+- 312 Severn Ave. #W302
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
+ Annapolis MD 21403
+
+- Support and updates available at
+- http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html
++ Support information and updates available at
++ http://www.scyld.com/network/via-rhine.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
+ */
+
+-static const char *versionA =
+-"via-rhine.c:v1.00 9/5/98 Written by Donald Becker\n";
+-static const char *versionB =
+-" http://cesdis.gsfc.nasa.gov/linux/drivers/via-rhine.html\n";
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"via-rhine.c:v1.16 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/via-rhine.html\n";
++
++/* Automatically extracted configuration info:
++probe-func: via_rhine_probe
++config-in: tristate 'VIA "Rhine" vt86c100, vt3043, and vt3065 series PCI Ethernet support' CONFIG_VIA_RHINE
++
++c-help-name: VIA Rhine series PCI Ethernet support
++c-help-symbol: CONFIG_VIA_RHINE
++c-help: This driver is for the VIA Rhine (v3043) and Rhine-II
++c-help: (vt3065 AKA vt86c100) network adapter chip series.
++c-help: More specific information and updates are available from
++c-help: http://www.scyld.com/network/via-rhine.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
+
+-/* A few user-configurable values. These may be modified when a driver
+- module is loaded.*/
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
+
+-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+ static int max_interrupt_work = 20;
+-static int min_pci_latency = 64;
+
+ /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+@@ -39,6 +58,11 @@
+ Both 'options[]' and 'full_duplex[]' should exist for driver
+ interoperability.
+ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
+ */
+ #define MAX_UNITS 8 /* More are supported, limit only on options */
+ static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+@@ -50,39 +74,57 @@
+
+ /* Operational parameters that are set at compile time. */
+
+-/* Keep the ring sizes a power of two for compile efficiency.
+- The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+- Making the Tx ring too large decreases the effectiveness of channel
++/* Making the Tx ring too large decreases the effectiveness of channel
+ bonding and packet priority.
+ There are no ill effects from too-large receive rings. */
+-#define TX_RING_SIZE 8
+-#define RX_RING_SIZE 16
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
++#define RX_RING_SIZE 32
+
+ /* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (2*HZ)
++#define TX_TIMEOUT (6*HZ)
+
+-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
+
+ /* Include files, designed to support most kernel versions 2.0.0 and later. */
+ #include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
+ #include <linux/version.h>
+-#ifdef MODULE
+-#ifdef MODVERSIONS
++#if defined(MODVERSIONS)
+ #include <linux/modversions.h>
+ #endif
+ #include <linux/module.h>
+-#else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
+-#endif
+
+ #include <linux/kernel.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+ #include <linux/netdevice.h>
+@@ -92,10 +134,24 @@
+ #include <asm/bitops.h>
+ #include <asm/io.h>
+
+-/* This driver was written to use PCI memory space, however some boards
+- only work with I/O space accesses. */
+-#define VIA_USE_IO
+-#ifdef VIA_USE_IO
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Condensed bus+endian portability operations. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
++
++/* This driver was written to use PCI memory space, however most versions
++ of the Rhine only work correctly with I/O space accesses. */
++#if defined(VIA_USE_MEMORY)
++#warning Many adapters using the VIA Rhine chip are not configured to work
++#warning with PCI memory space accesses.
++#else
++#define USE_IO_OPS
+ #undef readb
+ #undef readw
+ #undef readl
+@@ -110,50 +166,29 @@
+ #define writel outl
+ #endif
+
+-/* Kernel compatibility defines, some common to David Hind's PCMCIA package.
+- This is only in the support-all-kernels source code. */
+-
+-#define RUN_AT(x) (jiffies + (x))
+-
+-#if (LINUX_VERSION_CODE >= 0x20100)
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
+ char kernel_version[] = UTS_RELEASE;
+-#else
+-#ifndef __alpha__
+-#define ioremap vremap
+-#define iounmap vfree
+-#endif
+ #endif
+-#if defined(MODULE) && LINUX_VERSION_CODE > 0x20115
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+ MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
++MODULE_LICENSE("GPL");
+ MODULE_PARM(max_interrupt_work, "i");
+-MODULE_PARM(min_pci_latency, "i");
+ MODULE_PARM(debug, "i");
+ MODULE_PARM(rx_copybreak, "i");
+ MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
+ MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
+-#endif
+-#if LINUX_VERSION_CODE < 0x20123
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
+-#endif
+-#if LINUX_VERSION_CODE <= 0x20139
+-#define net_device_stats enet_statistics
+-#else
+-#define NETSTATS_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20155 || defined(CARDBUS)
+-/* Grrrr, the PCI code changed, but did not consider CardBus... */
+-#include <linux/bios32.h>
+-#define PCI_SUPPORT_VER1
+-#else
+-#define PCI_SUPPORT_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20159
+-#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
+-#else
+-#define dev_free_skb(skb) dev_kfree_skb(skb);
+-#endif
+-
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex "
++ "(deprecated, use options[] instead).");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
+
+ /*
+ Theory of Operation
+@@ -230,63 +265,72 @@
+
+ IVb. References
+
+-Preliminary VT86C100A manual from http://www.via.com.tw/
+-http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+-http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
++This driver was originally written using a preliminary VT86C100A manual
++from
++ http://www.via.com.tw/
++The usual background material was used:
++ http://www.scyld.com/expert/100mbps.html
++ http://scyld.com/expert/NWay.html
++
++Additional information is now available, especially for the newer chips.
++ http://www.via.com.tw/en/Networking/DS6105LOM100.pdf
+
+ IVc. Errata
+
+ The VT86C100A manual is not reliable information.
+-The chip does not handle unaligned transmit or receive buffers, resulting
+-in significant performance degradation for bounce buffer copies on transmit
+-and unaligned IP headers on receive.
++The 3043 chip does not handle unaligned transmit or receive buffers,
++resulting in significant performance degradation for bounce buffer
++copies on transmit and unaligned IP headers on receive.
+ The chip does not pad to minimum transmit length.
+
++There is a bug with the transmit descriptor pointer handling when the
++chip encounters a transmit error.
++
+ */
+
+
+
+-/* This table drives the PCI probe routines. It's mostly boilerplate in all
+- of the drivers, and will likely be provided by some future kernel.
+- Note the matching code -- the first table entry matchs all 56** cards but
+- second only the 1234 card.
+-*/
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-struct pci_id_info {
+- const char *name;
+- u16 vendor_id, device_id, device_id_mask, flags;
+- int io_size;
+- struct device *(*probe1)(int pci_bus, int pci_devfn, struct device *dev,
+- long ioaddr, int irq, int chip_idx, int fnd_cnt);
++static void *via_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int via_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags {
++ CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4, HasV1TxStat=8,
++ ReqTxAlign=0x10, HasWOL=0x20, HasIPChecksum=0x40, HasVLAN=0x80,
++
+ };
+
+-static struct device *via_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, long ioaddr, int irq,
+- int chp_idx, int fnd_cnt);
++#if defined(VIA_USE_MEMORY)
++#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
++#define RHINE_I_IOSIZE 128
++#define RHINEII_IOSIZE 4096
++#else
++#define RHINE_IOTYPE (PCI_USES_IO | PCI_USES_MASTER | PCI_ADDR0)
++#define RHINE_I_IOSIZE 128
++#define RHINEII_IOSIZE 256
++#endif
+
+ static struct pci_id_info pci_tbl[] = {
+- { "VIA VT86C100A Rhine-II", 0x1106, 0x6100, 0xffff,
+- PCI_USES_MEM|PCI_USES_IO|PCI_USES_MEM|PCI_USES_MASTER, 128, via_probe1},
+- { "VIA VT3043 Rhine", 0x1106, 0x3043, 0xffff,
+- PCI_USES_IO|PCI_USES_MEM|PCI_USES_MASTER, 128, via_probe1},
++ { "VIA VT3043 Rhine", { 0x30431106, 0xffffffff,},
++ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
++ { "VIA VT86C100A Rhine", { 0x61001106, 0xffffffff,},
++ RHINE_IOTYPE, RHINE_I_IOSIZE, CanHaveMII | ReqTxAlign | HasV1TxStat },
++ { "VIA VT6102 Rhine-II", { 0x30651106, 0xffffffff,},
++ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
++ { "VIA VT6105LOM Rhine-III (3106)", { 0x31061106, 0xffffffff,},
++ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
++ /* Duplicate entry, with 'M' features enabled. */
++ { "VIA VT6105M Rhine-III (3106)", { 0x31061106, 0xffffffff,},
++ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII|HasWOL|HasIPChecksum|HasVLAN},
++ { "VIA VT6105M Rhine-III (3053 prototype)", { 0x30531106, 0xffffffff,},
++ RHINE_IOTYPE, RHINEII_IOSIZE, CanHaveMII | HasWOL },
+ {0,}, /* 0 terminated list. */
+ };
+
+-
+-/* A chip capabilities table, matching the entries in pci_tbl[] above. */
+-enum chip_capability_flags {CanHaveMII=1, };
+-struct chip_info {
+- int io_size;
+- int flags;
+-} static cap_tbl[] = {
+- {128, CanHaveMII, },
+- {128, CanHaveMII, },
++struct drv_id_info via_rhine_drv_id = {
++ "via-rhine", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_tbl,
++ via_probe1, via_pwr_event
+ };
+
+-
+ /* Offsets to the device registers.
+ */
+ enum register_offsets {
+@@ -294,9 +338,10 @@
+ IntrStatus=0x0C, IntrEnable=0x0E,
+ MulticastFilter0=0x10, MulticastFilter1=0x14,
+ RxRingPtr=0x18, TxRingPtr=0x1C,
+- MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIConfig=0x6E,
+- MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72,
+- Config=0x78, RxMissed=0x7C, RxCRCErrs=0x7E,
++ MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
++ MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
++ Config=0x78, ConfigA=0x7A, RxMissed=0x7C, RxCRCErrs=0x7E,
++ StickyHW=0x83, WOLcrClr=0xA4, WOLcgClr=0xA7, PwrcsrClr=0xAC,
+ };
+
+ /* Bits in the interrupt status/mask registers. */
+@@ -308,21 +353,18 @@
+ IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+ IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+ IntrRxWakeUp=0x8000,
+- IntrNormalSummary=0x0003, IntrAbnormalSummary=0x8260,
++ IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+ };
+
+-
+ /* The Rx and Tx buffer descriptors. */
+ struct rx_desc {
+- u16 rx_status;
+- u16 rx_length;
++ s32 rx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+ };
+ struct tx_desc {
+- u16 tx_status;
+- u16 tx_own;
++ s32 tx_status;
+ u32 desc_length;
+ u32 addr;
+ u32 next_desc;
+@@ -330,9 +372,19 @@
+
+ /* Bits in *_desc.status */
+ enum rx_status_bits {
+- RxDescOwn=0x80000000, RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F};
++ RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F};
+ enum desc_status_bits {
+- DescOwn=0x8000, DescEndPacket=0x4000, DescIntr=0x1000,
++ DescOwn=0x80000000, DescEndPacket=0x4000, DescIntr=0x1000,
++};
++
++/* Bits in rx.desc_length for extended status. */
++enum rx_info_bits {
++ RxTypeTag=0x00010000,
++ RxTypeUDP=0x00020000, RxTypeTCP=0x00040000, RxTypeIP=0x00080000,
++ RxTypeUTChksumOK=0x00100000, RxTypeIPChksumOK=0x00200000,
++ /* Summarized. */
++ RxTypeCsumMask=0x003E0000,
++ RxTypeUDPSumOK=0x003A0000, RxTypeTCPSumOK=0x003C0000,
+ };
+
+ /* Bits in ChipCmd. */
+@@ -343,6 +395,9 @@
+ CmdNoTxPoll=0x0800, CmdReset=0x8000,
+ };
+
++#define PRIV_ALIGN 15 /* Required alignment mask */
++/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
++ within the structure. */
+ struct netdev_private {
+ /* Descriptor rings first for alignment. */
+ struct rx_desc rx_ring[RX_RING_SIZE];
+@@ -353,24 +408,34 @@
+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
+ unsigned char *tx_buf[TX_RING_SIZE]; /* Tx bounce buffers */
+ unsigned char *tx_bufs; /* Tx bounce buffer region. */
+- struct device *next_module; /* Link for devices of this type. */
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
+ struct net_device_stats stats;
+ struct timer_list timer; /* Media monitoring timer. */
+- unsigned char pci_bus, pci_devfn;
++ int msg_level;
++ int max_interrupt_work;
++ int intr_enable;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++
+ /* Frequently used values: keep some adjacent for cache effect. */
+- int chip_id;
+- long in_interrupt; /* Word-long for SMP locks. */
++
+ struct rx_desc *rx_head_desc;
+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
+- unsigned int cur_tx, dirty_tx;
+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int cur_tx, dirty_tx;
+ u16 chip_cmd; /* Current setting for ChipCmd */
++ int multicast_filter_limit;
++ u32 mc_filter[2];
++ int rx_mode;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ /* These values are keep track of the transceiver/media in use. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
+ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+- unsigned int default_port:4; /* Last dev->if_port value. */
++ unsigned int default_port; /* Last dev->if_port value. */
+ u8 tx_thresh, rx_thresh;
+ /* MII transceiver section. */
+ int mii_cnt; /* MII device addresses. */
+@@ -378,171 +443,81 @@
+ unsigned char phys[2]; /* MII device addresses. */
+ };
+
+-static int mdio_read(struct device *dev, int phy_id, int location);
+-static void mdio_write(struct device *dev, int phy_id, int location, int value);
+-static int netdev_open(struct device *dev);
+-static void check_duplex(struct device *dev);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
++static int netdev_open(struct net_device *dev);
++static void check_duplex(struct net_device *dev);
+ static void netdev_timer(unsigned long data);
+-static void tx_timeout(struct device *dev);
+-static void init_ring(struct device *dev);
+-static int start_tx(struct sk_buff *skb, struct device *dev);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
+ static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
+-static int netdev_rx(struct device *dev);
+-static void netdev_error(struct device *dev, int intr_status);
+-static void set_rx_mode(struct device *dev);
+-static struct net_device_stats *get_stats(struct device *dev);
+-static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd);
+-static int netdev_close(struct device *dev);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
+
+
+
+ /* A list of our installed devices, for removing the driver module. */
+-static struct device *root_net_dev = NULL;
+-
+-/* Ideally we would detect all network cards in slot order. That would
+- be best done a central PCI probe dispatch, which wouldn't work
+- well when dynamically adding drivers. So instead we detect just the
+- cards we know about in slot order. */
+-
+-static int pci_etherdev_probe(struct device *dev, struct pci_id_info pci_tbl[])
+-{
+- int cards_found = 0;
+- int pci_index = 0;
+- unsigned char pci_bus, pci_device_fn;
+-
+- if ( ! pcibios_present())
+- return -ENODEV;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- u16 vendor, device, pci_command, new_command;
+- int chip_idx, irq;
+- long pciaddr;
+- long ioaddr;
+-
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8, pci_index,
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+-
+- for (chip_idx = 0; pci_tbl[chip_idx].vendor_id; chip_idx++)
+- if (vendor == pci_tbl[chip_idx].vendor_id
+- && (device & pci_tbl[chip_idx].device_id_mask) ==
+- pci_tbl[chip_idx].device_id)
+- break;
+- if (pci_tbl[chip_idx].vendor_id == 0) /* Compiled out! */
+- continue;
+-
+- {
+-#if defined(PCI_SUPPORT_VER2)
+- struct pci_dev *pdev = pci_find_slot(pci_bus, pci_device_fn);
+-#ifdef VIA_USE_IO
+- pciaddr = pdev->base_address[0];
+-#else
+- pciaddr = pdev->base_address[1];
+-#endif
+- irq = pdev->irq;
+-#else
+- u32 pci_memaddr;
+- u8 pci_irq_line;
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+-#ifdef VIA_USE_IO
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_memaddr);
+- pciaddr = pci_memaddr;
+-#else
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_1, &pci_memaddr);
+- pciaddr = pci_memaddr;
+-#endif
+- irq = pci_irq_line;
+-#endif
+- }
+-
+- if (debug > 2)
+- printk(KERN_INFO "Found %s at PCI address %#lx, IRQ %d.\n",
+- pci_tbl[chip_idx].name, pciaddr, irq);
+-
+- if (pci_tbl[chip_idx].flags & PCI_USES_IO) {
+- if (check_region(pciaddr, pci_tbl[chip_idx].io_size))
+- continue;
+- ioaddr = pciaddr & ~3;
+- } else if ((ioaddr = (long)ioremap(pciaddr & ~0xf,
+- pci_tbl[chip_idx].io_size)) == 0) {
+- printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+- pciaddr);
+- continue;
+- }
+-
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | (pci_tbl[chip_idx].flags & 7);
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled the"
+- " device at %d/%d! Updating PCI command %4.4x->%4.4x.\n",
+- pci_bus, pci_device_fn, pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+-
+- dev = pci_tbl[chip_idx].probe1(pci_bus, pci_device_fn, dev, ioaddr,
+- irq, chip_idx, cards_found);
+-
+- if (dev && (pci_tbl[chip_idx].flags & PCI_COMMAND_MASTER)) {
+- u8 pci_latency;
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < min_pci_latency) {
+- printk(KERN_INFO " PCI latency timer (CFLT) is "
+- "unreasonably low at %d. Setting to %d clocks.\n",
+- pci_latency, min_pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, min_pci_latency);
+- }
+- }
+- dev = 0;
+- cards_found++;
+- }
+-
+- return cards_found ? 0 : -ENODEV;
+-}
++static struct net_device *root_net_dev = NULL;
+
+ #ifndef MODULE
+-int via_rhine_probe(struct device *dev)
++int via_rhine_probe(struct net_device *dev)
+ {
+- return pci_etherdev_probe(dev, pci_tbl);
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&via_rhine_drv_id, dev);
+ }
+ #endif
+
+-static struct device *via_probe1(int pci_bus, int pci_devfn,
+- struct device *dev, long ioaddr, int irq,
+- int chip_id, int card_idx)
++static void *via_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
+ {
+- static int did_version = 0; /* Already printed version info */
++ struct net_device *dev;
+ struct netdev_private *np;
++ void *priv_mem;
+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+
+- if (debug > 0 && did_version++ == 0)
+- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+-
+- dev = init_etherdev(dev, 0);
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
+
+ printk(KERN_INFO "%s: %s at 0x%lx, ",
+- dev->name, pci_tbl[chip_id].name, ioaddr);
++ dev->name, pci_tbl[chip_idx].name, ioaddr);
+
+- /* Ideally we would be read the EEPROM but access may be locked. */
+- for (i = 0; i <6; i++)
++ /* We would prefer to directly read the EEPROM but access may be locked. */
++ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
++ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
++ /* Reload the station address from the EEPROM. */
++ writeb(0x20, ioaddr + MACRegEEcsr);
++ /* Typically 2 cycles to reload. */
++ for (i = 0; i < 150; i++)
++ if (! (readb(ioaddr + MACRegEEcsr) & 0x20))
++ break;
++ for (i = 0; i < 6; i++)
++ dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
++ if (memcmp(dev->dev_addr, "\0\0\0\0\0", 6) == 0) {
++ printk(" (MISSING EEPROM ADDRESS)");
++ /* Fill a temp addr with the "locally administered" bit set. */
++ memcpy(dev->dev_addr, ">Linux", 6);
++ }
++ }
++
+ for (i = 0; i < 5; i++)
+- printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x:", dev->dev_addr[i]);
+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+-#ifdef VIA_USE_IO
+- request_region(ioaddr, pci_tbl[chip_id].io_size, dev->name);
++ /* Make certain the descriptor lists are cache-aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
++
++#ifdef USE_IO_OPS
++ request_region(ioaddr, pci_tbl[chip_idx].io_size, dev->name);
+ #endif
+
+ /* Reset the chip to erase previous misconfiguration. */
+@@ -551,24 +526,27 @@
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+- /* Make certain the descriptor lists are cache-aligned. */
+- np = (void *)(((long)kmalloc(sizeof(*np), GFP_KERNEL) + 31) & ~31);
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
+ memset(np, 0, sizeof(*np));
+- dev->priv = np;
++ np->priv_addr = priv_mem;
+
+ np->next_module = root_net_dev;
+ root_net_dev = dev;
+
+- np->pci_bus = pci_bus;
+- np->pci_devfn = pci_devfn;
+- np->chip_id = chip_id;
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
+
+ if (dev->mem_start)
+ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+ if (option > 0) {
+- if (option & 0x200)
++ if (option & 0x220)
+ np->full_duplex = 1;
+ np->default_port = option & 15;
+ if (np->default_port)
+@@ -577,8 +555,11 @@
+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
+ np->full_duplex = 1;
+
+- if (np->full_duplex)
++ if (np->full_duplex) {
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
+ np->duplex_lock = 1;
++ }
+
+ /* The chip-specific entries in the device structure. */
+ dev->open = &netdev_open;
+@@ -588,7 +569,7 @@
+ dev->set_multicast_list = &set_rx_mode;
+ dev->do_ioctl = &mii_ioctl;
+
+- if (cap_tbl[np->chip_id].flags & CanHaveMII) {
++ if (np->drv_flags & CanHaveMII) {
+ int phy, phy_idx = 0;
+ np->phys[0] = 1; /* Standard for this chip. */
+ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
+@@ -605,13 +586,30 @@
+ np->mii_cnt = phy_idx;
+ }
+
++ /* Allow forcing the media type. */
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ if (np->mii_cnt)
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
+ return dev;
+ }
+
+
+ /* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+-static int mdio_read(struct device *dev, int phy_id, int regnum)
++static int mdio_read(struct net_device *dev, int phy_id, int regnum)
+ {
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+@@ -629,11 +627,23 @@
+ return readw(ioaddr + MIIData);
+ }
+
+-static void mdio_write(struct device *dev, int phy_id, int regnum, int value)
++static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
+ {
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int boguscnt = 1024;
+
++ if (phy_id == np->phys[0]) {
++ switch (regnum) {
++ case 0: /* Is user forcing speed/duplex? */
++ if (value & 0x9000) /* Autonegotiation. */
++ np->duplex_lock = 0;
++ else
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ }
+ /* Wait for a previous command to complete. */
+ while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+ ;
+@@ -646,7 +656,7 @@
+ }
+
+
+-static int netdev_open(struct device *dev)
++static int netdev_open(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -655,15 +665,17 @@
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+- if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
++ }
+
+- if (debug > 1)
++ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+ dev->name, dev->irq);
+
+- MOD_INC_USE_COUNT;
+-
+ init_ring(dev);
+
+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
+@@ -673,7 +685,7 @@
+ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+ /* Initialize other registers. */
+- writew(0x0006, ioaddr + PCIConfig); /* Tune configuration??? */
++ writew(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
+ /* Configure the FIFO thresholds. */
+ writeb(0x20, ioaddr + TxConfig); /* Initial threshold 32 bytes */
+ np->tx_thresh = 0x20;
+@@ -682,26 +694,29 @@
+ if (dev->if_port == 0)
+ dev->if_port = np->default_port;
+
+- dev->tbusy = 0;
+- dev->interrupt = 0;
+- np->in_interrupt = 0;
+-
+ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
+
+- dev->start = 1;
+-
++ np->intr_enable = IntrRxDone | IntrRxErr | IntrRxEmpty |
++ IntrRxOverflow| IntrRxDropped| IntrTxDone | IntrTxAbort |
++ IntrTxUnderrun | IntrPCIErr | IntrStatsMax | IntrLinkChange |
++ IntrMIIChange;
+ /* Enable interrupts by setting the interrupt mask. */
+- writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow| IntrRxDropped|
+- IntrTxDone | IntrTxAbort | IntrTxUnderrun |
+- IntrPCIErr | IntrStatsMax | IntrLinkChange | IntrMIIChange,
+- ioaddr + IntrEnable);
++ writew(np->intr_enable, ioaddr + IntrEnable);
+
+ np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
++ if (np->duplex_lock)
++ np->chip_cmd |= CmdFDuplex;
+ writew(np->chip_cmd, ioaddr + ChipCmd);
+
+ check_duplex(dev);
++ /* The LED outputs of various MII xcvrs should be configured. */
++ /* For NS or Mison phys, turn on bit 1 in register 0x17 */
++ /* For ESI phys, turn on bit 7 in register 0x17. */
++ mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
++ (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+
+- if (debug > 2)
++ if (np->msg_level & NETIF_MSG_IFUP)
+ printk(KERN_DEBUG "%s: Done netdev_open(), status %4.4x "
+ "MII status: %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd),
+@@ -709,7 +724,7 @@
+
+ /* Set the timer to check for link beat. */
+ init_timer(&np->timer);
+- np->timer.expires = RUN_AT(1);
++ np->timer.expires = jiffies + 2;
+ np->timer.data = (unsigned long)dev;
+ np->timer.function = &netdev_timer; /* timer handler */
+ add_timer(&np->timer);
+@@ -717,19 +732,20 @@
+ return 0;
+ }
+
+-static void check_duplex(struct device *dev)
++static void check_duplex(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
+ int duplex;
+
+ if (np->duplex_lock || mii_reg5 == 0xffff)
+ return;
+- duplex = (mii_reg5 & 0x0100) || (mii_reg5 & 0x01C0) == 0x0040;
++ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+ if (np->full_duplex != duplex) {
+ np->full_duplex = duplex;
+- if (debug)
++ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+ " partner capability of %4.4x.\n", dev->name,
+ duplex ? "full" : "half", np->phys[0], mii_reg5);
+@@ -743,22 +759,27 @@
+
+ static void netdev_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+ int next_tick = 10*HZ;
+
+- if (debug > 3) {
++ if (np->msg_level & NETIF_MSG_TIMER) {
+ printk(KERN_DEBUG "%s: VIA Rhine monitor tick, status %4.4x.\n",
+ dev->name, readw(ioaddr + IntrStatus));
+ }
++ if (netif_queue_paused(dev)
++ && np->cur_tx - np->dirty_tx > 1
++ && jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++
+ check_duplex(dev);
+
+- np->timer.expires = RUN_AT(next_tick);
++ np->timer.expires = jiffies + next_tick;
+ add_timer(&np->timer);
+ }
+
+-static void tx_timeout(struct device *dev)
++static void tx_timeout(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -768,20 +789,23 @@
+ dev->name, readw(ioaddr + IntrStatus),
+ mdio_read(dev, np->phys[0], 1));
+
+- /* Perhaps we should reinitialize the hardware here. */
+- dev->if_port = 0;
+- /* Stop and restart the chip's Tx processes . */
+-
+- /* Trigger an immediate transmit demand. */
+-
+- dev->trans_start = jiffies;
+- np->stats.tx_errors++;
+- return;
++ /* Perhaps we should reinitialize the hardware here. */
++ dev->if_port = 0;
++ /* Restart the chip's Tx processes . */
++ writel(virt_to_bus(np->tx_ring + (np->dirty_tx % TX_RING_SIZE)),
++ ioaddr + TxRingPtr);
++ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
++
++ /* Trigger an immediate transmit demand. */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
+ }
+
+
+ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+-static void init_ring(struct device *dev)
++static void init_ring(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+@@ -790,93 +814,105 @@
+ np->cur_rx = np->cur_tx = 0;
+ np->dirty_rx = np->dirty_tx = 0;
+
+- np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
++ /* Use 1518/+18 if the CRC is transferred. */
++ np->rx_buf_sz = dev->mtu + 14;
++ if (np->rx_buf_sz < PKT_BUF_SZ)
++ np->rx_buf_sz = PKT_BUF_SZ;
+ np->rx_head_desc = &np->rx_ring[0];
+
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ np->rx_ring[i].rx_status = 0;
+- np->rx_ring[i].rx_length = 0;
+- np->rx_ring[i].desc_length = np->rx_buf_sz;
+- np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
++ np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
++ np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
+ np->rx_skbuff[i] = 0;
+ }
+ /* Mark the last entry as wrapping the ring. */
+- np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
++ np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
+
+- /* Fill in the Rx buffers. */
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
+ np->rx_skbuff[i] = skb;
+ if (skb == NULL)
+ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+- np->rx_ring[i].addr = virt_to_bus(skb->tail);
+- np->rx_ring[i].rx_status = 0;
+- np->rx_ring[i].rx_length = DescOwn;
++ np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
++ np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+ }
+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ np->tx_skbuff[i] = 0;
+- np->tx_ring[i].tx_own = 0;
+- np->tx_ring[i].desc_length = 0x00e08000;
+- np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
+- np->tx_buf[i] = kmalloc(PKT_BUF_SZ, GFP_KERNEL);
++ np->tx_ring[i].tx_status = 0;
++ np->tx_ring[i].desc_length = cpu_to_le32(0x00e08000);
++ np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
++ np->tx_buf[i] = 0; /* Allocated as/if needed. */
+ }
+- np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
++ np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
+
+ return;
+ }
+
+-static int start_tx(struct sk_buff *skb, struct device *dev)
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ unsigned entry;
+
+- /* Block a timer-based transmit from overlapping. This could better be
+- done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start < TX_TIMEOUT)
+- return 1;
+- tx_timeout(dev);
++ /* Block a timer-based transmit from overlapping. This happens when
++ packets are presumed lost, and we use this check the Tx status. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
+ return 1;
+ }
+
+- /* Caution: the write order is important here, set the field
+- with the "ownership" bits last. */
++ /* Caution: the write order is important here, set the descriptor word
++ with the "ownership" bit last. No SMP locking is needed if the
++ cur_tx is incremented after the descriptor is consistent. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = np->cur_tx % TX_RING_SIZE;
+
+ np->tx_skbuff[entry] = skb;
+
+- if ((long)skb->data & 3) { /* Must use alignment buffer. */
++ if ((np->drv_flags & ReqTxAlign) && ((long)skb->data & 3)) {
++ /* Must use alignment buffer. */
+ if (np->tx_buf[entry] == NULL &&
+ (np->tx_buf[entry] = kmalloc(PKT_BUF_SZ, GFP_KERNEL)) == NULL)
+ return 1;
+ memcpy(np->tx_buf[entry], skb->data, skb->len);
+- np->tx_ring[entry].addr = virt_to_bus(np->tx_buf[entry]);
++ np->tx_ring[entry].addr = virt_to_le32desc(np->tx_buf[entry]);
+ } else
+- np->tx_ring[entry].addr = virt_to_bus(skb->data);
++ np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
++ /* Explicitly flush packet data cache lines here. */
+
+- np->tx_ring[entry].desc_length = 0x00E08000 |
+- (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN);
+- np->tx_ring[entry].tx_own = DescOwn;
++ np->tx_ring[entry].desc_length =
++ cpu_to_le32(0x00E08000 | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
++ np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+
+ np->cur_tx++;
+
+- /* Non-x86 Todo: explicitly flush cache lines here. */
++ /* Explicitly flush descriptor cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+
+- if (np->cur_tx - np->dirty_tx < TX_RING_SIZE - 1)
+- clear_bit(0, (void*)&dev->tbusy); /* Typical path */
+- else
++ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+ np->tx_full = 1;
++ /* Check for a just-cleared queue. */
++ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
++ < TX_QUEUE_LEN - 2) {
++ np->tx_full = 0;
++ netif_unpause_tx_queue(dev);
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++
+ dev->trans_start = jiffies;
+
+- if (debug > 4) {
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+ dev->name, np->cur_tx, entry);
+ }
+@@ -887,27 +923,10 @@
+ after the Tx thread. */
+ static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
+ {
+- struct device *dev = (struct device *)dev_instance;
+- struct netdev_private *np;
+- long ioaddr, boguscnt = max_interrupt_work;
+-
+- ioaddr = dev->base_addr;
+- np = (struct netdev_private *)dev->priv;
+-#if defined(__i386__)
+- /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
+- if (test_and_set_bit(0, (void*)&dev->interrupt)) {
+- printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+- dev->name);
+- dev->interrupt = 0; /* Avoid halting machine. */
+- return;
+- }
+-#else
+- if (dev->interrupt) {
+- printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+- return;
+- }
+- dev->interrupt = 1;
+-#endif
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np = (void *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int boguscnt = np->max_interrupt_work;
+
+ do {
+ u32 intr_status = readw(ioaddr + IntrStatus);
+@@ -915,7 +934,7 @@
+ /* Acknowledge all of the current interrupt sources ASAP. */
+ writew(intr_status & 0xffff, ioaddr + IntrStatus);
+
+- if (debug > 4)
++ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+@@ -928,15 +947,14 @@
+
+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+ int entry = np->dirty_tx % TX_RING_SIZE;
+- int txstatus;
+- if (np->tx_ring[entry].tx_own)
++ int txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
++ if (txstatus & DescOwn)
+ break;
+- txstatus = np->tx_ring[entry].tx_status;
+- if (debug > 6)
+- printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n",
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG " Tx scavenge %d status %4.4x.\n",
+ entry, txstatus);
+ if (txstatus & 0x8000) {
+- if (debug > 1)
++ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, txstatus);
+ np->stats.tx_errors++;
+@@ -953,22 +971,24 @@
+ #ifdef ETHER_STATS
+ if (txstatus & 0x0001) np->stats.tx_deferred++;
+ #endif
+- np->stats.collisions += (txstatus >> 3) & 15;
++ if (np->drv_flags & HasV1TxStat)
++ np->stats.collisions += (txstatus >> 3) & 15;
++ else
++ np->stats.collisions += txstatus & 15;
+ #if defined(NETSTATS_VER2)
+- np->stats.tx_bytes += np->tx_ring[entry].desc_length & 0x7ff;
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+ #endif
+ np->stats.tx_packets++;
+ }
+ /* Free the original skb. */
+- dev_free_skb(np->tx_skbuff[entry]);
++ dev_free_skb_irq(np->tx_skbuff[entry]);
+ np->tx_skbuff[entry] = 0;
+ }
+- if (np->tx_full && dev->tbusy
+- && np->cur_tx - np->dirty_tx < TX_RING_SIZE - 4) {
+- /* The ring is no longer full, clear tbusy. */
++ /* Note the 4 slot hysteresis in mark the queue non-full. */
++ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
++ /* The ring is no longer full, allow new TX entries. */
+ np->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ netif_resume_tx_queue(dev);
+ }
+
+ /* Abnormal error summary/uncommon events handlers. */
+@@ -984,38 +1004,33 @@
+ }
+ } while (1);
+
+- if (debug > 3)
++ if (np->msg_level & NETIF_MSG_INTR)
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+- dev->name, readw(ioaddr + IntrStatus));
++ dev->name, (int)readw(ioaddr + IntrStatus));
+
+-#if defined(__i386__)
+- clear_bit(0, (void*)&dev->interrupt);
+-#else
+- dev->interrupt = 0;
+-#endif
+ return;
+ }
+
+ /* This routine is logically part of the interrupt handler, but isolated
+ for clarity and better register allocation. */
+-static int netdev_rx(struct device *dev)
++static int netdev_rx(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int entry = np->cur_rx % RX_RING_SIZE;
+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+- if (debug > 4) {
+- printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
+- entry, np->rx_head_desc->rx_length);
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %8.8x.\n",
++ entry, np->rx_head_desc->rx_status);
+ }
+
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+- while ( ! (np->rx_head_desc->rx_length & DescOwn)) {
++ while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+ struct rx_desc *desc = np->rx_head_desc;
+- int data_size = desc->rx_length;
+- u16 desc_status = desc->rx_status;
++ u32 desc_status = le32_to_cpu(desc->rx_status);
++ int data_size = desc_status >> 16;
+
+- if (debug > 4)
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
+ printk(KERN_DEBUG " netdev_rx() status is %4.4x.\n",
+ desc_status);
+ if (--boguscnt < 0)
+@@ -1031,7 +1046,7 @@
+ np->stats.rx_length_errors++;
+ } else if (desc_status & RxErr) {
+ /* There was a error. */
+- if (debug > 2)
++ if (np->msg_level & NETIF_MSG_RX_ERR)
+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
+ desc_status);
+ np->stats.rx_errors++;
+@@ -1043,28 +1058,38 @@
+ } else {
+ struct sk_buff *skb;
+ /* Length should omit the CRC */
+- u16 pkt_len = data_size - 4;
++ int pkt_len = data_size - 4;
+
+ /* Check if the packet is long enough to accept without copying
+ to a minimally-sized skbuff. */
+- if (pkt_len < rx_copybreak
++ if (pkt_len < np->rx_copybreak
+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
+ skb->dev = dev;
+ skb_reserve(skb, 2); /* 16 byte align the IP header */
+-#if ! defined(__alpha__) || USE_IP_COPYSUM /* Avoid misaligned on Alpha */
+- eth_copy_and_sum(skb, bus_to_virt(desc->addr),
+- pkt_len, 0);
++#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
+ skb_put(skb, pkt_len);
+ #else
+- memcpy(skb_put(skb,pkt_len), bus_to_virt(desc->addr), pkt_len);
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
+ #endif
+ } else {
+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
+ np->rx_skbuff[entry] = NULL;
+ }
+ skb->protocol = eth_type_trans(skb, dev);
++ { /* Use hardware checksum info. */
++ int rxtype = le32_to_cpu(desc->desc_length);
++ int csum_bits = rxtype & RxTypeCsumMask;
++ if (csum_bits == RxTypeUDPSumOK ||
++ csum_bits == RxTypeTCPSumOK)
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ }
+ netif_rx(skb);
+ dev->last_rx = jiffies;
++#if defined(NETSTATS_VER2)
++ np->stats.rx_bytes += pkt_len;
++#endif
+ np->stats.rx_packets++;
+ }
+ entry = (++np->cur_rx) % RX_RING_SIZE;
+@@ -1081,10 +1106,9 @@
+ if (skb == NULL)
+ break; /* Better luck next round. */
+ skb->dev = dev; /* Mark as being used by this device. */
+- np->rx_ring[entry].addr = virt_to_bus(skb->tail);
++ np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+- np->rx_ring[entry].rx_status = 0;
+- np->rx_ring[entry].rx_length = DescOwn;
++ np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+ }
+
+ /* Pre-emptively restart Rx engine. */
+@@ -1092,18 +1116,22 @@
+ return 0;
+ }
+
+-static void netdev_error(struct device *dev, int intr_status)
++static void netdev_error(struct net_device *dev, int intr_status)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+
+ if (intr_status & (IntrMIIChange | IntrLinkChange)) {
+- if (readb(ioaddr + MIIStatus) & 0x02)
++ if (readb(ioaddr + MIIStatus) & 0x02) {
+ /* Link failed, restart autonegotiation. */
+- mdio_write(dev, np->phys[0], 0, 0x3300);
+- else
++ if (np->drv_flags & HasDavicomPhy)
++ mdio_write(dev, np->phys[0], 0, 0x3300);
++ netif_link_down(dev);
++ } else {
++ netif_link_up(dev);
+ check_duplex(dev);
+- if (debug)
++ }
++ if (np->msg_level & NETIF_MSG_LINK)
+ printk(KERN_ERR "%s: MII status changed: Autonegotiation "
+ "advertising %4.4x partner %4.4x.\n", dev->name,
+ mdio_read(dev, np->phys[0], 4),
+@@ -1112,20 +1140,24 @@
+ if (intr_status & IntrStatsMax) {
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+- writel(0, RxMissed);
++ writel(0, ioaddr + RxMissed);
+ }
+ if (intr_status & IntrTxAbort) {
+ /* Stats counted in Tx-done handler, just restart Tx. */
++ writel(virt_to_bus(&np->tx_ring[np->dirty_tx % TX_RING_SIZE]),
++ ioaddr + TxRingPtr);
+ writew(CmdTxDemand | np->chip_cmd, dev->base_addr + ChipCmd);
+ }
+ if (intr_status & IntrTxUnderrun) {
+ if (np->tx_thresh < 0xE0)
+ writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+- if (debug > 1)
++ if (np->msg_level & NETIF_MSG_TX_ERR)
+ printk(KERN_INFO "%s: Transmitter underrun, increasing Tx "
+ "threshold setting to %2.2x.\n", dev->name, np->tx_thresh);
+ }
+- if ((intr_status & ~(IntrLinkChange|IntrStatsMax|IntrTxAbort)) && debug) {
++ if ((intr_status & ~(IntrLinkChange | IntrMIIChange | IntrStatsMax |
++ IntrTxAbort|IntrTxAborted | IntrNormalSummary))
++ && (np->msg_level & NETIF_MSG_DRV)) {
+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
+ dev->name, intr_status);
+ /* Recovery for other fault sources not known. */
+@@ -1133,7 +1165,7 @@
+ }
+ }
+
+-static struct enet_statistics *get_stats(struct device *dev)
++static struct net_device_stats *get_stats(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -1143,7 +1175,7 @@
+ non-critical. */
+ np->stats.rx_crc_errors += readw(ioaddr + RxCRCErrs);
+ np->stats.rx_missed_errors += readw(ioaddr + RxMissed);
+- writel(0, RxMissed);
++ writel(0, ioaddr + RxMissed);
+
+ return &np->stats;
+ }
+@@ -1154,20 +1186,20 @@
+ static unsigned const ethernet_polynomial = 0x04c11db7U;
+ static inline u32 ether_crc(int length, unsigned char *data)
+ {
+- int crc = -1;
++ int crc = -1;
+
+- while(--length >= 0) {
++ while(--length >= 0) {
+ unsigned char current_octet = *data++;
+ int bit;
+ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
+ crc = (crc << 1) ^
+ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
+ }
+- }
+- return crc;
++ }
++ return crc;
+ }
+
+-static void set_rx_mode(struct device *dev)
++static void set_rx_mode(struct net_device *dev)
+ {
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ long ioaddr = dev->base_addr;
+@@ -1178,9 +1210,11 @@
+ /* Unconditionally log net taps. */
+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+ rx_mode = 0x1C;
+- } else if ((dev->mc_count > multicast_filter_limit)
++ } else if ((dev->mc_count > np->multicast_filter_limit)
+ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to match, or accept all multicasts. */
++ writel(0xffffffff, ioaddr + MulticastFilter0);
++ writel(0xffffffff, ioaddr + MulticastFilter1);
+ rx_mode = 0x0C;
+ } else {
+ struct dev_mc_list *mclist;
+@@ -1198,44 +1232,67 @@
+ writeb(np->rx_thresh | rx_mode, ioaddr + RxConfig);
+ }
+
+-static int mii_ioctl(struct device *dev, struct ifreq *rq, int cmd)
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
+
+ switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+- data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
+ /* Fall Through */
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
+ return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+- if (!suser())
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
++ /* Note: forced media tracking is done in mdio_write(). */
+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
+ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+-static int netdev_close(struct device *dev)
++static int netdev_close(struct net_device *dev)
+ {
+ long ioaddr = dev->base_addr;
+ struct netdev_private *np = (struct netdev_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- dev->tbusy = 1;
++ netif_stop_tx_queue(dev);
+
+- if (debug > 1)
++ if (np->msg_level & NETIF_MSG_IFDOWN)
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+ dev->name, readw(ioaddr + ChipCmd));
+
++ /* Switch to loopback mode to avoid hardware races. */
++ writeb(np->tx_thresh | 0x01, ioaddr + TxConfig);
++
+ /* Disable interrupts by clearing the interrupt mask. */
+ writew(0x0000, ioaddr + IntrEnable);
+
+ /* Stop the chip's Tx and Rx processes. */
++ np->chip_cmd = CmdStop;
+ writew(CmdStop, ioaddr + ChipCmd);
+
+ del_timer(&np->timer);
+@@ -1244,7 +1301,7 @@
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+- np->rx_ring[i].rx_length = 0;
++ np->rx_ring[i].rx_status = 0;
+ np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (np->rx_skbuff[i]) {
+ #if LINUX_VERSION_CODE < 0x20100
+@@ -1258,6 +1315,10 @@
+ if (np->tx_skbuff[i])
+ dev_free_skb(np->tx_skbuff[i]);
+ np->tx_skbuff[i] = 0;
++ if (np->tx_buf[i]) {
++ kfree(np->tx_buf[i]);
++ np->tx_buf[i] = 0;
++ }
+ }
+
+ MOD_DEC_USE_COUNT;
+@@ -1265,42 +1326,90 @@
+ return 0;
+ }
+
++static int via_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND:
++ /* Disable interrupts, stop Tx and Rx. */
++ writew(0x0000, ioaddr + IntrEnable);
++ /* Stop the chip's Tx and Rx processes. */
++ writew(CmdStop, ioaddr + ChipCmd);
++ break;
++ case DRV_RESUME:
++ /* This is incomplete: the actions are very chip specific. */
++ set_rx_mode(dev);
++ netif_start_tx_queue(dev);
++ writew(np->chip_cmd, ioaddr + ChipCmd);
++ writew(np->intr_enable, ioaddr + IntrEnable);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ /* Some, but not all, kernel versions close automatically. */
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ }
++
++ return 0;
++}
++
+
+ #ifdef MODULE
+ int init_module(void)
+ {
+- if (debug) /* Emit version even if no cards detected. */
+- printk(KERN_INFO "%s" KERN_INFO "%s", versionA, versionB);
+-#ifdef CARDBUS
+- register_driver(&etherdev_ops);
+- return 0;
+-#else
+- return pci_etherdev_probe(NULL, pci_tbl);
+-#endif
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&via_rhine_drv_id, NULL);
+ }
+
+ void cleanup_module(void)
+ {
++ struct net_device *next_dev;
+
+-#ifdef CARDBUS
+- unregister_driver(&etherdev_ops);
+-#endif
++ pci_drv_unregister(&via_rhine_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_net_dev) {
+- struct netdev_private *np =
+- (struct netdev_private *)(root_net_dev->priv);
++ struct netdev_private *np = (void *)(root_net_dev->priv);
+ unregister_netdev(root_net_dev);
+-#ifdef VIA_USE_IO
++#ifdef USE_IO_OPS
+ release_region(root_net_dev->base_addr, pci_tbl[np->chip_id].io_size);
+ #else
+ iounmap((char *)(root_net_dev->base_addr));
+ #endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
+ kfree(root_net_dev);
+- root_net_dev = np->next_module;
+-#if 0
+- kfree(np); /* Assumption: no struct realignment. */
+-#endif
++ root_net_dev = next_dev;
+ }
+ }
+
+@@ -1308,8 +1417,9 @@
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
++ * compile-command: "make KERNVER=`uname -r` via-rhine.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c via-rhine.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c via-rhine.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4
+Index: linux/src/drivers/net/winbond-840.c
+===================================================================
+RCS file: linux/src/drivers/net/winbond-840.c
+diff -N linux/src/drivers/net/winbond-840.c
+--- /dev/null 1 Jan 1970 00:00:00 -0000
++++ linux/src/drivers/net/winbond-840.c 20 Aug 2004 10:32:55 -0000
+@@ -0,0 +1,1558 @@
++/* winbond-840.c: A Linux network device driver for the Winbond W89c840. */
++/*
++ Written 1998-2003 by Donald Becker.
++
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
++
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/drivers.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
++
++ Do not remove the copyright infomation.
++ Do not change the version information unless an improvement has been made.
++ Merely removing my name, as Compex has done in the past, does not count
++ as an improvement.
++*/
++
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"winbond-840.c:v1.10 7/22/2003 Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/drivers.html\n";
++
++/* Automatically extracted configuration info:
++probe-func: winbond840_probe
++config-in: tristate 'Winbond W89c840 Ethernet support' CONFIG_WINBOND_840
++
++c-help-name: Winbond W89c840 PCI Ethernet support
++c-help-symbol: CONFIG_WINBOND_840
++c-help: The winbond-840.c driver is for the Winbond W89c840 chip.
++c-help: This chip is named TX9882 on the Compex RL100-ATX board.
++c-help: More specific information and updates are available from
++c-help: http://www.scyld.com/network/drivers.html
++*/
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
++
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
++
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
++static int max_interrupt_work = 20;
++
++/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
++ The '840 uses a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 32;
++
++/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
++ Setting to > 1518 effectively disables this feature. */
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ Both 'options[]' and 'full_duplex[]' should exist for driver
++ interoperability, however setting full_duplex[] is deprecated.
++ The media type is usually passed in 'options[]'.
++ The default is autonegotation for speed and duplex.
++ This should rarely be overridden.
++ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
++ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
++ Use option values 0x20 and 0x200 for forcing full duplex operation.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Operational parameters that are set at compile time. */
++
++/* Keep the ring sizes a power of two for compile efficiency.
++ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
++ Making the Tx ring too large decreases the effectiveness of channel
++ bonding and packet priority, confuses the system network buffer limits,
++ and wastes memory.
++ Larger receive rings merely waste memory.
++*/
++#define TX_RING_SIZE 16
++#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
++#define RX_RING_SIZE 32
++
++/* The presumed FIFO size for working around the Tx-FIFO-overflow bug.
++ To avoid overflowing we don't queue again until we have room for a
++ full-size packet.
++ */
++#define TX_FIFO_SIZE (2048)
++#define TX_BUG_FIFO_LIMIT (TX_FIFO_SIZE-1514-16)
++
++/* Operational parameters that usually are not changed. */
++/* Time in jiffies before concluding the transmitter is hung.
++ Re-autonegotiation may take up to 3 seconds.
++ */
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
++
++/* Include files, designed to support most kernel versions 2.0.0 and later. */
++#include <linux/config.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
++#endif
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
++#include <linux/version.h>
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
++#endif
++#include <linux/module.h>
++
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/timer.h>
++#include <linux/errno.h>
++#include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
++#include <linux/malloc.h>
++#endif
++#include <linux/interrupt.h>
++#include <linux/pci.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/bitops.h>
++#include <asm/io.h>
++
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
++#else
++#include "pci-scan.h"
++#include "kern_compat.h"
++#endif
++
++/* Configure the PCI bus bursts and FIFO thresholds.
++ 486: Set 8 longword cache alignment, 8 longword burst.
++ 586: Set 16 longword cache alignment, no burst limit.
++ Cache alignment bits 15:14 Burst length 13:8
++ 0000 <not allowed> 0000 align to cache 0800 8 longwords
++ 4000 8 longwords 0100 1 longword 1000 16 longwords
++ 8000 16 longwords 0200 2 longwords 2000 32 longwords
++ C000 32 longwords 0400 4 longwords
++ Wait the specified 50 PCI cycles after a reset by initializing
++ Tx and Rx queues and the address filter list. */
++#define TX_DESC_SIZE 16
++#if defined(__powerpc__) || defined(__sparc__) /* Big endian */
++static int csr0 = 0x00100000 | 0xE000 | TX_DESC_SIZE;
++#elif defined(__alpha__) || defined(__x86_64) || defined(__ia64)
++static int csr0 = 0xE000 | TX_DESC_SIZE;
++#elif defined(__i386__)
++static int csr0 = 0xE000 | TX_DESC_SIZE;
++#else
++static int csr0 = 0xE000 | TX_DESC_SIZE;
++#warning Processor architecture unknown!
++#endif
++
++
++
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
++#endif
++
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Winbond W89c840 Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(debug, "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM_DESC(debug, "Driver message level (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(full_duplex, "Non-zero to set forced full duplex.");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++
++/*
++ Theory of Operation
++
++I. Board Compatibility
++
++This driver is for the Winbond w89c840 chip.
++
++II. Board-specific settings
++
++None.
++
++III. Driver operation
++
++This chip is very similar to the Digital 21*4* "Tulip" family. The first
++twelve registers and the descriptor format are nearly identical. Read a
++Tulip manual for operational details.
++
++A significant difference is that the multicast filter and station address are
++stored in registers rather than loaded through a pseudo-transmit packet.
++
++Unlike the Tulip, transmit buffers are limited to 1KB. To transmit a
++full-sized packet we must use both data buffers in a descriptor. Thus the
++driver uses ring mode where descriptors are implicitly sequential in memory,
++rather than using the second descriptor address as a chain pointer to
++subsequent descriptors.
++
++IV. Notes
++
++If you are going to almost clone a Tulip, why not go all the way and avoid
++the need for a new driver?
++
++IVb. References
++
++http://www.scyld.com/expert/100mbps.html
++http://www.scyld.com/expert/NWay.html
++http://www.winbond.com.tw/
++
++IVc. Errata
++
++A horrible bug exists in the transmit FIFO. Apparently the chip doesn't
++correctly detect a full FIFO, and queuing more than 2048 bytes may result in
++silent data corruption.
++
++*/
++
++
++
++/*
++ PCI probe table.
++*/
++static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt);
++static int winbond_pwr_event(void *dev_instance, int event);
++enum chip_capability_flags {
++ CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,};
++#ifdef USE_IO_OPS
++#define W840_FLAGS (PCI_USES_IO | PCI_ADDR0 | PCI_USES_MASTER)
++#else
++#define W840_FLAGS (PCI_USES_MEM | PCI_ADDR1 | PCI_USES_MASTER)
++#endif
++
++static struct pci_id_info pci_id_tbl[] = {
++ {"Winbond W89c840", /* Sometime a Level-One switch card. */
++ { 0x08401050, 0xffffffff, 0x81530000, 0xffff0000 },
++ W840_FLAGS, 128, CanHaveMII | HasBrokenTx | FDXOnNoMII},
++ {"Winbond W89c840", { 0x08401050, 0xffffffff, },
++ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
++ {"Compex RL100-ATX", { 0x201111F6, 0xffffffff,},
++ W840_FLAGS, 128, CanHaveMII | HasBrokenTx},
++ {0,}, /* 0 terminated list. */
++};
++
++struct drv_id_info winbond840_drv_id = {
++ "winbond-840", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ w840_probe1, winbond_pwr_event };
++
++/* This driver was written to use PCI memory space, however some x86 systems
++ work only with I/O space accesses. Pass -DUSE_IO_OPS to use PCI I/O space
++ accesses instead of memory space. */
++
++#ifdef USE_IO_OPS
++#undef readb
++#undef readw
++#undef readl
++#undef writeb
++#undef writew
++#undef writel
++#define readb inb
++#define readw inw
++#define readl inl
++#define writeb outb
++#define writew outw
++#define writel outl
++#endif
++
++/* Offsets to the Command and Status Registers, "CSRs".
++ While similar to the Tulip, these registers are longword aligned.
++ Note: It's not useful to define symbolic names for every register bit in
++ the device. The name can only partially document the semantics and make
++ the driver longer and more difficult to read.
++*/
++enum w840_offsets {
++ PCIBusCfg=0x00, TxStartDemand=0x04, RxStartDemand=0x08,
++ RxRingPtr=0x0C, TxRingPtr=0x10,
++ IntrStatus=0x14, NetworkConfig=0x18, IntrEnable=0x1C,
++ RxMissed=0x20, EECtrl=0x24, MIICtrl=0x24, BootRom=0x28, GPTimer=0x2C,
++ CurRxDescAddr=0x30, CurRxBufAddr=0x34, /* Debug use */
++ MulticastFilter0=0x38, MulticastFilter1=0x3C, StationAddr=0x40,
++ CurTxDescAddr=0x4C, CurTxBufAddr=0x50,
++};
++
++/* Bits in the interrupt status/enable registers. */
++/* The bits in the Intr Status/Enable registers, mostly interrupt sources. */
++enum intr_status_bits {
++ NormalIntr=0x10000, AbnormalIntr=0x8000,
++ IntrPCIErr=0x2000, TimerInt=0x800,
++ IntrRxDied=0x100, RxNoBuf=0x80, IntrRxDone=0x40,
++ TxFIFOUnderflow=0x20, RxErrIntr=0x10,
++ TxIdle=0x04, IntrTxStopped=0x02, IntrTxDone=0x01,
++};
++
++/* Bits in the NetworkConfig register. */
++enum rx_mode_bits {
++ TxOn=0x2000, RxOn=0x0002, FullDuplex=0x0200,
++ AcceptErr=0x80, AcceptRunt=0x40, /* Not used */
++ AcceptBroadcast=0x20, AcceptMulticast=0x10, AcceptAllPhys=0x08,
++};
++
++enum mii_reg_bits {
++ MDIO_ShiftClk=0x10000, MDIO_DataIn=0x80000, MDIO_DataOut=0x20000,
++ MDIO_EnbOutput=0x40000, MDIO_EnbIn = 0x00000,
++};
++
++/* The Tulip-like Rx and Tx buffer descriptors. */
++struct w840_rx_desc {
++ s32 status;
++ s32 length;
++ u32 buffer1;
++ u32 next_desc;
++};
++
++struct w840_tx_desc {
++ s32 status;
++ s32 length;
++ u32 buffer1, buffer2; /* We use only buffer 1. */
++ char pad[TX_DESC_SIZE - 16];
++};
++
++/* Bits in network_desc.status */
++enum desc_status_bits {
++ DescOwn=0x80000000, DescEndRing=0x02000000, DescUseLink=0x01000000,
++ DescWholePkt=0x60000000, DescStartPkt=0x20000000, DescEndPkt=0x40000000,
++ DescIntr=0x80000000,
++};
++
++#define PRIV_ALIGN 15 /* Required alignment mask */
++struct netdev_private {
++ /* Descriptor rings first for alignment. */
++ struct w840_rx_desc rx_ring[RX_RING_SIZE];
++ struct w840_tx_desc tx_ring[TX_RING_SIZE];
++ struct net_device *next_module; /* Link for devices of this type. */
++ void *priv_addr; /* Unaligned address for kfree */
++ const char *product_name;
++ /* The addresses of receive-in-place skbuffs. */
++ struct sk_buff* rx_skbuff[RX_RING_SIZE];
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct net_device_stats stats;
++ struct timer_list timer; /* Media monitoring timer. */
++ /* Frequently used values: keep some adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ int csr0, csr6;
++ unsigned int polling; /* Switched to polling mode. */
++ int max_interrupt_work;
++
++ struct w840_rx_desc *rx_head_desc;
++ unsigned int rx_ring_size;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ unsigned int tx_ring_size;
++ unsigned int cur_tx, dirty_tx;
++ unsigned int tx_q_bytes, tx_unq_bytes;
++ unsigned int tx_full:1; /* The Tx queue is full. */
++
++ /* These values track of the transceiver/media in use. */
++ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
++ unsigned int medialock:1; /* Do not sense media. */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* Rx filter. */
++ u32 cur_rx_mode;
++ u32 rx_filter[2];
++ int multicast_filter_limit;
++
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++};
++
++static int eeprom_read(long ioaddr, int location);
++static int mdio_read(struct net_device *dev, int phy_id, int location);
++static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
++static int netdev_open(struct net_device *dev);
++static void check_duplex(struct net_device *dev);
++static void netdev_timer(unsigned long data);
++static void tx_timeout(struct net_device *dev);
++static void init_ring(struct net_device *dev);
++static int start_tx(struct sk_buff *skb, struct net_device *dev);
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
++static void netdev_error(struct net_device *dev, int intr_status);
++static int netdev_rx(struct net_device *dev);
++static void netdev_error(struct net_device *dev, int intr_status);
++static inline unsigned ether_crc(int length, unsigned char *data);
++static void set_rx_mode(struct net_device *dev);
++static struct net_device_stats *get_stats(struct net_device *dev);
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
++static int netdev_close(struct net_device *dev);
++
++
++
++/* A list of our installed devices, for removing the driver module. */
++static struct net_device *root_net_dev = NULL;
++
++static void *w840_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int card_idx)
++{
++ struct net_device *dev;
++ struct netdev_private *np;
++ void *priv_mem;
++ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++#if LINUX_VERSION_CODE < 0x20155
++ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
++ pci_bus_number(pdev), pci_devfn(pdev)>>3);
++#else
++ printk(KERN_INFO "%s: %s at 0x%lx, %2.2x:%2.2x",
++ dev->name, pci_id_tbl[chip_idx].name, ioaddr,
++ pdev->bus->number, pdev->devfn>>3);
++#endif
++
++ /* Warning: validate for big-endian machines. */
++ for (i = 0; i < 3; i++)
++ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
++
++ for (i = 0; i < 5; i++)
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
++
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Out of memory is very unlikely. */
++ if (priv_mem == NULL)
++ return NULL;
++
++#ifdef USE_IO_OPS
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
++#endif
++
++ /* Reset the chip to erase previous misconfiguration.
++ No hold time required! */
++ writel(0x00000001, ioaddr + PCIBusCfg);
++
++ dev->base_addr = ioaddr;
++ dev->irq = irq;
++
++ /* The descriptor lists must be aligned. */
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
++
++ np->next_module = root_net_dev;
++ root_net_dev = dev;
++
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
++ np->tx_ring_size = TX_RING_SIZE;
++ np->rx_ring_size = RX_RING_SIZE;
++
++ if (dev->mem_start)
++ option = dev->mem_start;
++
++ if ((card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
++ || (np->drv_flags & AlwaysFDX))
++ np->full_duplex = 1;
++
++ /* The chip-specific entries in the device structure. */
++ dev->open = &netdev_open;
++ dev->hard_start_xmit = &start_tx;
++ dev->stop = &netdev_close;
++ dev->get_stats = &get_stats;
++ dev->set_multicast_list = &set_rx_mode;
++ dev->do_ioctl = &mii_ioctl;
++
++ if (np->drv_flags & CanHaveMII) {
++ int phy, phy_idx = 0;
++ for (phy = 1; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(dev, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(dev, phy, 4);
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ np->mii_cnt = phy_idx;
++ if (phy_idx == 0) {
++ printk(KERN_WARNING "%s: MII PHY not found -- this device may "
++ "not operate correctly.\n"
++ KERN_WARNING "%s: If this is a switch card, explicitly "
++ "force full duplex on this interface.\n",
++ dev->name, dev->name);
++ if (np->drv_flags & FDXOnNoMII) {
++ printk(KERN_INFO "%s: Assuming a switch card, forcing full "
++ "duplex.\n", dev->name);
++ np->full_duplex = np->duplex_lock = 1;
++ }
++ }
++ }
++ /* Allow forcing the media type. */
++ if (np->full_duplex) {
++ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
++ " disabled.\n", dev->name);
++ np->duplex_lock = 1;
++ }
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 0x3ff;
++ if (np->default_port & 0x330) {
++ np->medialock = 1;
++ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
++ (option & 0x300 ? 100 : 10),
++ (np->full_duplex ? "full" : "half"));
++ if (np->mii_cnt)
++ mdio_write(dev, np->phys[0], 0,
++ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
++ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
++ }
++ }
++
++ return dev;
++}
++
++
++/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
++ The Winbond NIC uses serial bit streams generated by the host processor. */
++
++/* Delay between EEPROM clock transitions.
++ This "delay" is to force out buffered PCI writes. */
++#define eeprom_delay(ee_addr) readl(ee_addr)
++
++enum EEPROM_Ctrl_Bits {
++ EE_ShiftClk=0x02, EE_Write0=0x801, EE_Write1=0x805,
++ EE_ChipSelect=0x801, EE_DataIn=0x08,
++};
++
++/* The EEPROM commands always start with 01.. preamble bits.
++ Commands are prepended to the variable-length address. */
++enum EEPROM_Cmds {
++ EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
++};
++
++static int eeprom_read(long addr, int location)
++{
++ int i;
++ int retval = 0;
++ long ee_addr = addr + EECtrl;
++ int read_cmd = location | EE_ReadCmd;
++
++ writel(EE_ChipSelect, ee_addr);
++ /* Shift the read command bits out. */
++ for (i = 10; i >= 0; i--) {
++ short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
++ writel(dataval, ee_addr);
++ eeprom_delay(ee_addr);
++ writel(dataval | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++
++ for (i = 16; i > 0; i--) {
++ writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
++ eeprom_delay(ee_addr);
++ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
++ writel(EE_ChipSelect, ee_addr);
++ eeprom_delay(ee_addr);
++ }
++
++ /* Terminate the EEPROM access. */
++ writel(0, ee_addr);
++ return retval;
++}
++
++/* MII transceiver control section.
++ Read and write the MII registers using software-generated serial
++ MDIO protocol. See the MII specifications or DP83840A data sheet
++ for details.
++
++ The maximum data clock rate is 2.5 Mhz.
++ The timing is decoupled from the processor clock by flushing the write
++ from the CPU write buffer with a following read, and using PCI
++ transaction time. */
++#define mdio_in(mdio_addr) readl(mdio_addr)
++#define mdio_out(value, mdio_addr) writel(value, mdio_addr)
++#define mdio_delay(mdio_addr) readl(mdio_addr)
++
++/* Set iff a MII transceiver on any interface requires mdio preamble.
++ This only set with older tranceivers, so the extra
++ code size of a per-interface flag is not worthwhile. */
++static char mii_preamble_required = 1;
++
++#define MDIO_WRITE0 (MDIO_EnbOutput)
++#define MDIO_WRITE1 (MDIO_DataOut | MDIO_EnbOutput)
++
++/* Generate the preamble required for initial synchronization and
++ a few older transceivers. */
++static void mdio_sync(long mdio_addr)
++{
++ int bits = 32;
++
++ /* Establish sync by sending at least 32 logic ones. */
++ while (--bits >= 0) {
++ mdio_out(MDIO_WRITE1, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++}
++
++static int mdio_read(struct net_device *dev, int phy_id, int location)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
++ int i, retval = 0;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the read command bits out. */
++ for (i = 15; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Read the two transition, 16 data, and wire-idle bits. */
++ for (i = 20; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ retval = (retval << 1) | ((mdio_in(mdio_addr) & MDIO_DataIn) ? 1 : 0);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return (retval>>1) & 0xffff;
++}
++
++static void mdio_write(struct net_device *dev, int phy_id, int reg, int value)
++{
++ long mdio_addr = dev->base_addr + MIICtrl;
++ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (reg<<18) | value;
++ int i;
++
++ if (mii_preamble_required)
++ mdio_sync(mdio_addr);
++
++ /* Shift the command bits out. */
++ for (i = 31; i >= 0; i--) {
++ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
++
++ mdio_out(dataval, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(dataval | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ /* Clear out extra bits. */
++ for (i = 2; i > 0; i--) {
++ mdio_out(MDIO_EnbIn, mdio_addr);
++ mdio_delay(mdio_addr);
++ mdio_out(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
++ mdio_delay(mdio_addr);
++ }
++ return;
++}
++
++
++static int netdev_open(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int i;
++
++ writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
++
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
++ MOD_DEC_USE_COUNT;
++ return -EAGAIN;
++ }
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
++ dev->name, dev->irq);
++
++ init_ring(dev);
++
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++
++ for (i = 0; i < 6; i++)
++ writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
++
++ /* Initialize other registers. */
++ np->csr0 = csr0;
++ writel(np->csr0, ioaddr + PCIBusCfg);
++
++ if (dev->if_port == 0)
++ dev->if_port = np->default_port;
++
++ writel(0, ioaddr + RxStartDemand);
++ np->csr6 = np->full_duplex ? 0x20022202 : 0x20022002;
++ check_duplex(dev);
++ set_rx_mode(dev);
++
++ netif_start_tx_queue(dev);
++
++ /* Clear and Enable interrupts by setting the interrupt mask.
++ See enum intr_status_bits above for bit guide.
++ We omit: TimerInt, IntrRxDied, IntrTxStopped
++ */
++ writel(0x1A0F5, ioaddr + IntrStatus);
++ writel(0x1A0F5, ioaddr + IntrEnable);
++
++ if (np->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
++
++ /* Set the timer to check for link beat. */
++ init_timer(&np->timer);
++ np->timer.expires = jiffies + 3*HZ;
++ np->timer.data = (unsigned long)dev;
++ np->timer.function = &netdev_timer; /* timer handler */
++ add_timer(&np->timer);
++
++ return 0;
++}
++
++static void check_duplex(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int mii_reg5 = mdio_read(dev, np->phys[0], 5);
++ int negotiated = mii_reg5 & np->advertising;
++ int duplex;
++
++ if (np->duplex_lock || mii_reg5 == 0xffff)
++ return;
++ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
++ if (np->full_duplex != duplex) {
++ np->full_duplex = duplex;
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
++ "negotiated capability %4.4x.\n", dev->name,
++ duplex ? "full" : "half", np->phys[0], negotiated);
++ np->csr6 &= ~0x200;
++ np->csr6 |= duplex ? 0x200 : 0;
++ }
++}
++
++static void netdev_timer(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int next_tick = 10*HZ;
++ int old_csr6 = np->csr6;
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
++ "config %8.8x.\n",
++ dev->name, intr_status, (int)readl(ioaddr + NetworkConfig));
++ /* Check for blocked interrupts. */
++ if (np->polling) {
++ if (intr_status & 0x1ffff) {
++ intr_handler(dev->irq, dev, 0);
++ next_tick = 1;
++ np->polling = 1;
++ } else if (++np->polling > 10*HZ)
++ np->polling = 0;
++ else
++ next_tick = 2;
++ } else if ((intr_status & 0x1ffff)) {
++ np->polling = 1;
++ }
++
++ if (netif_queue_paused(dev) &&
++ np->cur_tx - np->dirty_tx > 1 &&
++ (jiffies - dev->trans_start) > TX_TIMEOUT) {
++ tx_timeout(dev);
++ }
++ check_duplex(dev);
++ if (np->csr6 != old_csr6) {
++ writel(np->csr6 & ~0x0002, ioaddr + NetworkConfig);
++ writel(np->csr6 | 0x2002, ioaddr + NetworkConfig);
++ }
++ np->timer.expires = jiffies + next_tick;
++ add_timer(&np->timer);
++}
++
++static void tx_timeout(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
++ " resetting...\n", dev->name, (int)readl(ioaddr + IntrStatus));
++
++#ifndef __alpha__
++ if (np->msg_level & NETIF_MSG_TX_ERR) {
++ int i;
++ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
++ for (i = 0; i < np->rx_ring_size; i++)
++ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
++ for (i = 0; i < np->tx_ring_size; i++)
++ printk(" %8.8x", np->tx_ring[i].status);
++ printk("\n");
++ }
++#endif
++
++ /* Perhaps we should reinitialize the hardware here. Just trigger a
++ Tx demand for now. */
++ writel(0, ioaddr + TxStartDemand);
++ dev->if_port = 0;
++ /* Stop and restart the chip's Tx processes . */
++
++ dev->trans_start = jiffies;
++ np->stats.tx_errors++;
++ return;
++}
++
++
++/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
++static void init_ring(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int i;
++
++ np->tx_full = 0;
++ np->cur_tx = np->dirty_tx = 0;
++ np->tx_q_bytes = np->tx_unq_bytes = 0;
++
++ np->cur_rx = np->dirty_rx = 0;
++ np->rx_buf_sz = (dev->mtu <= 1522 ? PKT_BUF_SZ : dev->mtu + 14);
++ np->rx_head_desc = &np->rx_ring[0];
++
++ /* Initialize all Rx descriptors. */
++ for (i = 0; i < np->rx_ring_size; i++) {
++ np->rx_ring[i].length = np->rx_buf_sz;
++ np->rx_ring[i].status = 0;
++ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
++ np->rx_skbuff[i] = 0;
++ }
++ /* Mark the last entry as wrapping the ring. */
++ np->rx_ring[i-1].length |= DescEndRing;
++ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
++
++ /* Fill in the Rx buffers. Handle allocation failure gracefully. */
++ for (i = 0; i < np->rx_ring_size; i++) {
++ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[i] = skb;
++ if (skb == NULL)
++ break;
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[i].buffer1 = virt_to_bus(skb->tail);
++ np->rx_ring[i].status = DescOwn | DescIntr;
++ }
++ np->dirty_rx = (unsigned int)(i - np->rx_ring_size);
++
++ for (i = 0; i < np->tx_ring_size; i++) {
++ np->tx_skbuff[i] = 0;
++ np->tx_ring[i].status = 0;
++ }
++ return;
++}
++
++static int start_tx(struct sk_buff *skb, struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ unsigned entry;
++
++ /* Block a timer-based transmit from overlapping. */
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ tx_timeout(dev);
++ return 1;
++ }
++
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
++
++ /* Calculate the next Tx descriptor entry. */
++ entry = np->cur_tx % np->tx_ring_size;
++
++ np->tx_skbuff[entry] = skb;
++ np->tx_ring[entry].buffer1 = virt_to_bus(skb->data);
++
++#define one_buffer
++#define BPT 1022
++#if defined(one_buffer)
++ np->tx_ring[entry].length = DescWholePkt | skb->len;
++ if (entry >= np->tx_ring_size-1) /* Wrap ring */
++ np->tx_ring[entry].length |= DescIntr | DescEndRing;
++ np->tx_ring[entry].status = DescOwn;
++ np->cur_tx++;
++#elif defined(two_buffer)
++ if (skb->len > BPT) {
++ unsigned int entry1 = ++np->cur_tx % np->tx_ring_size;
++ np->tx_ring[entry].length = DescStartPkt | BPT;
++ np->tx_ring[entry1].length = DescEndPkt | (skb->len - BPT);
++ np->tx_ring[entry1].buffer1 = virt_to_bus((skb->data) + BPT);
++ np->tx_ring[entry1].status = DescOwn;
++ np->tx_ring[entry].status = DescOwn;
++ if (entry >= np->tx_ring_size-1)
++ np->tx_ring[entry].length |= DescIntr|DescEndRing;
++ else if (entry1 >= np->tx_ring_size-1)
++ np->tx_ring[entry1].length |= DescIntr|DescEndRing;
++ np->cur_tx++;
++ } else {
++ np->tx_ring[entry].length = DescWholePkt | skb->len;
++ if (entry >= np->tx_ring_size-1) /* Wrap ring */
++ np->tx_ring[entry].length |= DescIntr | DescEndRing;
++ np->tx_ring[entry].status = DescOwn;
++ np->cur_tx++;
++ }
++#elif defined(split_buffer)
++ {
++ /* Work around the Tx-FIFO-full bug by splitting our transmit packet
++ into two pieces, the first which may be loaded without overflowing
++ the FIFO, and the second which contains the remainder of the
++ packet. When we get a Tx-done interrupt that frees enough room
++ in the FIFO we mark the remainder of the packet as loadable.
++
++ This has the problem that the Tx descriptors are written both
++ here and in the interrupt handler.
++ */
++
++ int buf1size = TX_FIFO_SIZE - (np->tx_q_bytes - np->tx_unq_bytes);
++ int buf2size = skb->len - buf1size;
++
++ if (buf2size <= 0) { /* We fit into one descriptor. */
++ np->tx_ring[entry].length = DescWholePkt | skb->len;
++ } else { /* We must use two descriptors. */
++ unsigned int entry2;
++ np->tx_ring[entry].length = DescIntr | DescStartPkt | buf1size;
++ if (entry >= np->tx_ring_size-1) { /* Wrap ring */
++ np->tx_ring[entry].length |= DescEndRing;
++ entry2 = 0;
++ } else
++ entry2 = entry + 1;
++ np->cur_tx++;
++ np->tx_ring[entry2].buffer1 =
++ virt_to_bus(skb->data + buf1size);
++ np->tx_ring[entry2].length = DescEndPkt | buf2size;
++ if (entry2 >= np->tx_ring_size-1) /* Wrap ring */
++ np->tx_ring[entry2].length |= DescEndRing;
++ }
++ np->tx_ring[entry].status = DescOwn;
++ np->cur_tx++;
++ }
++#endif
++ np->tx_q_bytes += skb->len;
++ writel(0, dev->base_addr + TxStartDemand);
++
++ /* Work around horrible bug in the chip by marking the queue as full
++ when we do not have FIFO room for a maximum sized packet. */
++ if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN) {
++ np->tx_full = 1;
++ netif_stop_tx_queue(dev);
++ } else if ((np->drv_flags & HasBrokenTx)
++ && np->tx_q_bytes - np->tx_unq_bytes > TX_BUG_FIFO_LIMIT) {
++ np->tx_full = 1;
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
++
++ dev->trans_start = jiffies;
++
++ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
++ dev->name, np->cur_tx, entry);
++ }
++ return 0;
++}
++
++/* The interrupt handler does all of the Rx thread work and cleans up
++ after the Tx thread. */
++static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
++{
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ int work_limit = np->max_interrupt_work;
++
++ do {
++ u32 intr_status = readl(ioaddr + IntrStatus);
++
++ /* Acknowledge all of the current interrupt sources ASAP. */
++ writel(intr_status & 0x0001ffff, ioaddr + IntrStatus);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
++ dev->name, intr_status);
++
++ if ((intr_status & (NormalIntr|AbnormalIntr)) == 0
++ || intr_status == 0xffffffff)
++ break;
++
++ if (intr_status & (IntrRxDone | RxNoBuf))
++ netdev_rx(dev);
++
++ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
++ int entry = np->dirty_tx % np->tx_ring_size;
++ int tx_status = np->tx_ring[entry].status;
++
++ if (tx_status < 0)
++ break;
++ if (np->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ if (tx_status & 0x8000) { /* There was an error, log it. */
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
++ dev->name, tx_status);
++ np->stats.tx_errors++;
++ if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
++ if (tx_status & 0x0C80) np->stats.tx_carrier_errors++;
++ if (tx_status & 0x0200) np->stats.tx_window_errors++;
++ if (tx_status & 0x0002) np->stats.tx_fifo_errors++;
++ if ((tx_status & 0x0080) && np->full_duplex == 0)
++ np->stats.tx_heartbeat_errors++;
++#ifdef ETHER_STATS
++ if (tx_status & 0x0100) np->stats.collisions16++;
++#endif
++ } else {
++#ifdef ETHER_STATS
++ if (tx_status & 0x0001) np->stats.tx_deferred++;
++#endif
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
++#endif
++ np->stats.collisions += (tx_status >> 3) & 15;
++ np->stats.tx_packets++;
++ }
++ /* Free the original skb. */
++ np->tx_unq_bytes += np->tx_skbuff[entry]->len;
++ dev_free_skb_irq(np->tx_skbuff[entry]);
++ np->tx_skbuff[entry] = 0;
++ }
++ if (np->tx_full &&
++ np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4
++ && np->tx_q_bytes - np->tx_unq_bytes < TX_BUG_FIFO_LIMIT) {
++ /* The ring is no longer full, allow new TX entries. */
++ np->tx_full = 0;
++ netif_resume_tx_queue(dev);
++ }
++
++ /* Abnormal error summary/uncommon events handlers. */
++ if (intr_status & (AbnormalIntr | TxFIFOUnderflow | IntrPCIErr |
++ TimerInt | IntrTxStopped))
++ netdev_error(dev, intr_status);
++
++ if (--work_limit < 0) {
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n", dev->name, intr_status);
++ /* Set the timer to re-enable the other interrupts after
++ 10*82usec ticks. */
++ writel(AbnormalIntr | TimerInt, ioaddr + IntrEnable);
++ writel(10, ioaddr + GPTimer);
++ break;
++ }
++ } while (1);
++
++ if (np->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
++ dev->name, (int)readl(ioaddr + IntrStatus));
++
++ return;
++}
++
++/* This routine is logically part of the interrupt handler, but separated
++ for clarity and better register allocation. */
++static int netdev_rx(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ int entry = np->cur_rx % np->rx_ring_size;
++ int work_limit = np->dirty_rx + np->rx_ring_size - np->cur_rx;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
++ entry, np->rx_ring[entry].status);
++ }
++
++ /* If EOP is set on the next entry, it's a new packet. Send it up. */
++ while (--work_limit >= 0) {
++ struct w840_rx_desc *desc = np->rx_head_desc;
++ s32 status = desc->status;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
++ status);
++ if (status < 0)
++ break;
++ if ((status & 0x38008300) != 0x0300) {
++ if ((status & 0x38000300) != 0x0300) {
++ /* Ingore earlier buffers. */
++ if ((status & 0xffff) != 0x7fff) {
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
++ "multiple buffers, entry %#x status %4.4x!\n",
++ dev->name, np->cur_rx, status);
++ np->stats.rx_length_errors++;
++ }
++ } else if (status & 0x8000) {
++ /* There was a fatal error. */
++ if (np->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
++ dev->name, status);
++ np->stats.rx_errors++; /* end of a packet.*/
++ if (status & 0x0890) np->stats.rx_length_errors++;
++ if (status & 0x004C) np->stats.rx_frame_errors++;
++ if (status & 0x0002) np->stats.rx_crc_errors++;
++ }
++ } else {
++ struct sk_buff *skb;
++ /* Omit the four octet CRC from the length. */
++ int pkt_len = ((status >> 16) & 0x7ff) - 4;
++
++ if (np->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
++ " status %x.\n", pkt_len, status);
++ /* Check if the packet is long enough to accept without copying
++ to a minimally-sized skbuff. */
++ if (pkt_len < np->rx_copybreak
++ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++ /* Call copy + cksum if available. */
++#if HAS_IP_COPYSUM
++ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
++#else
++ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
++ pkt_len);
++#endif
++ } else {
++ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
++ np->rx_skbuff[entry] = NULL;
++#ifndef final_version /* Remove after testing. */
++ if (bus_to_virt(desc->buffer1) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in netdev_rx: %p vs. %p / %p.\n",
++ dev->name, bus_to_virt(desc->buffer1),
++ skb->head, temp);
++#endif
++ }
++ skb->protocol = eth_type_trans(skb, dev);
++ netif_rx(skb);
++ dev->last_rx = jiffies;
++ np->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ np->stats.rx_bytes += pkt_len;
++#endif
++ }
++ entry = (++np->cur_rx) % np->rx_ring_size;
++ np->rx_head_desc = &np->rx_ring[entry];
++ }
++
++ /* Refill the Rx ring buffers. */
++ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
++ struct sk_buff *skb;
++ entry = np->dirty_rx % np->rx_ring_size;
++ if (np->rx_skbuff[entry] == NULL) {
++ skb = dev_alloc_skb(np->rx_buf_sz);
++ np->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ np->rx_ring[entry].buffer1 = virt_to_bus(skb->tail);
++ }
++ np->rx_ring[entry].status = DescOwn;
++ }
++
++ return 0;
++}
++
++static void netdev_error(struct net_device *dev, int intr_status)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ if (np->msg_level & NETIF_MSG_MISC)
++ printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
++ dev->name, intr_status);
++ if (intr_status == 0xffffffff)
++ return;
++ if (intr_status & TxFIFOUnderflow) {
++ np->csr6 += 0x4000; /* Bump up the Tx threshold */
++ if (np->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Tx underflow, increasing threshold to "
++ "%8.8x.\n", dev->name, np->csr6);
++ writel(np->csr6, ioaddr + NetworkConfig);
++ }
++ if (intr_status & IntrRxDied) { /* Missed a Rx frame. */
++ np->stats.rx_errors++;
++ }
++ if (intr_status & TimerInt) {
++ /* Re-enable other interrupts. */
++ writel(0x1A0F5, ioaddr + IntrEnable);
++ }
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
++ writel(0, ioaddr + RxStartDemand);
++}
++
++static struct net_device_stats *get_stats(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ /* The chip only need report frame silently dropped. */
++ if (netif_running(dev))
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
++
++ return &np->stats;
++}
++
++static unsigned const ethernet_polynomial = 0x04c11db7U;
++static inline u32 ether_crc(int length, unsigned char *data)
++{
++ int crc = -1;
++
++ while(--length >= 0) {
++ unsigned char current_octet = *data++;
++ int bit;
++ for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
++ crc = (crc << 1) ^
++ ((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
++ }
++ }
++ return crc;
++}
++
++static void set_rx_mode(struct net_device *dev)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u32 mc_filter[2]; /* Multicast hash filter */
++ u32 rx_mode;
++
++ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
++ /* Unconditionally log net taps. */
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ memset(mc_filter, ~0, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAllPhys;
++ } else if ((dev->mc_count > np->multicast_filter_limit)
++ || (dev->flags & IFF_ALLMULTI)) {
++ /* Too many to match, or accept all multicasts. */
++ memset(mc_filter, 0xff, sizeof(mc_filter));
++ rx_mode = AcceptBroadcast | AcceptMulticast;
++ } else {
++ struct dev_mc_list *mclist;
++ int i;
++ memset(mc_filter, 0, sizeof(mc_filter));
++ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
++ i++, mclist = mclist->next) {
++ set_bit((ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F,
++ mc_filter);
++ }
++ rx_mode = AcceptBroadcast | AcceptMulticast;
++ }
++ writel(mc_filter[0], ioaddr + MulticastFilter0);
++ writel(mc_filter[1], ioaddr + MulticastFilter1);
++ np->csr6 &= ~0x00F8;
++ np->csr6 |= rx_mode;
++ writel(np->csr6, ioaddr + NetworkConfig);
++}
++
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
++{
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
++
++
++static void empty_rings(struct net_device *dev)
++{
++ struct netdev_private *np = (void *)dev->priv;
++ int i;
++
++ /* Free all the skbuffs in the Rx queue. */
++ for (i = 0; i < np->rx_ring_size; i++) {
++ np->rx_ring[i].status = 0;
++ if (np->rx_skbuff[i]) {
++#if LINUX_VERSION_CODE < 0x20100
++ np->rx_skbuff[i]->free = 1;
++#endif
++ dev_free_skb(np->rx_skbuff[i]);
++ }
++ np->rx_skbuff[i] = 0;
++ }
++ for (i = 0; i < np->tx_ring_size; i++) {
++ if (np->tx_skbuff[i])
++ dev_free_skb(np->tx_skbuff[i]);
++ np->tx_skbuff[i] = 0;
++ }
++}
++
++static int netdev_close(struct net_device *dev)
++{
++ long ioaddr = dev->base_addr;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++
++ netif_stop_tx_queue(dev);
++
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
++ "Config %8.8x.\n", dev->name, (int)readl(ioaddr + IntrStatus),
++ (int)readl(ioaddr + NetworkConfig));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
++ }
++
++ /* Disable interrupts by clearing the interrupt mask. */
++ writel(0x0000, ioaddr + IntrEnable);
++
++ /* Stop the chip's Tx and Rx processes. */
++ writel(np->csr6 &= ~0x20FA, ioaddr + NetworkConfig);
++
++ del_timer(&np->timer);
++ if (readl(ioaddr + NetworkConfig) != 0xffffffff)
++ np->stats.rx_missed_errors += readl(ioaddr + RxMissed) & 0xffff;
++
++#ifdef __i386__
++ if (np->msg_level & NETIF_MSG_IFDOWN) {
++ int i;
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(np->tx_ring));
++ for (i = 0; i < np->tx_ring_size; i++)
++ printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n",
++ i, np->tx_ring[i].length,
++ np->tx_ring[i].status, np->tx_ring[i].buffer1);
++ printk(KERN_DEBUG "\n" KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(np->rx_ring));
++ for (i = 0; i < np->rx_ring_size; i++) {
++ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
++ i, np->rx_ring[i].length,
++ np->rx_ring[i].status, np->rx_ring[i].buffer1);
++ }
++ }
++#endif /* __i386__ debugging only */
++
++ free_irq(dev->irq, dev);
++ empty_rings(dev);
++
++ MOD_DEC_USE_COUNT;
++
++ return 0;
++}
++
++static int winbond_pwr_event(void *dev_instance, int event)
++{
++ struct net_device *dev = dev_instance;
++ struct netdev_private *np = (struct netdev_private *)dev->priv;
++ long ioaddr = dev->base_addr;
++
++ if (np->msg_level & NETIF_MSG_LINK)
++ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
++ switch(event) {
++ case DRV_ATTACH:
++ MOD_INC_USE_COUNT;
++ break;
++ case DRV_SUSPEND: {
++ int csr6 = readl(ioaddr + NetworkConfig);
++ /* Disable interrupts, stop the chip, gather stats. */
++ if (csr6 != 0xffffffff) {
++ int csr8 = readl(ioaddr + RxMissed);
++ writel(0x00000000, ioaddr + IntrEnable);
++ writel(csr6 & ~TxOn & ~RxOn, ioaddr + NetworkConfig);
++ np->stats.rx_missed_errors += (unsigned short)csr8;
++ }
++ empty_rings(dev);
++ break;
++ }
++ case DRV_RESUME:
++ writel(np->csr0, ioaddr + PCIBusCfg);
++ init_ring(dev);
++ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
++ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
++ writel(0x1A0F5, ioaddr + IntrStatus);
++ writel(0x1A0F5, ioaddr + IntrEnable);
++ writel(np->csr6 | TxOn | RxOn, ioaddr + NetworkConfig);
++ writel(0, ioaddr + RxStartDemand); /* Rx poll demand */
++ set_rx_mode(dev);
++ break;
++ case DRV_DETACH: {
++ struct net_device **devp, **next;
++ if (dev->flags & IFF_UP) {
++ printk(KERN_ERR "%s: Winbond-840 NIC removed while still "
++ "active.\n", dev->name);
++ dev_close(dev);
++ dev->flags &= ~(IFF_UP|IFF_RUNNING);
++ }
++ unregister_netdev(dev);
++ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
++#ifndef USE_IO_OPS
++ iounmap((char *)dev->base_addr);
++#endif
++ for (devp = &root_net_dev; *devp; devp = next) {
++ next = &((struct netdev_private *)(*devp)->priv)->next_module;
++ if (*devp == dev) {
++ *devp = *next;
++ break;
++ }
++ }
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(dev);
++ MOD_DEC_USE_COUNT;
++ break;
++ }
++ default:
++ break;
++ }
++
++ return 0;
++}
++
++
++#ifdef MODULE
++int init_module(void)
++{
++ if (debug >= NETIF_MSG_DRV) /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&winbond840_drv_id, NULL);
++}
++
++void cleanup_module(void)
++{
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&winbond840_drv_id);
++
++ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
++ while (root_net_dev) {
++ struct netdev_private *np = (void *)(root_net_dev->priv);
++ unregister_netdev(root_net_dev);
++#ifdef USE_IO_OPS
++ release_region(root_net_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++#else
++ iounmap((char *)(root_net_dev->base_addr));
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
++ kfree(root_net_dev);
++ root_net_dev = next_dev;
++ }
++}
++#else
++int winbond840_probe(struct net_device *dev)
++{
++ if (pci_drv_register(&winbond840_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
++}
++#endif /* MODULE */
++
++
++/*
++ * Local variables:
++ * compile-command: "make KERNVER=`uname -r` winbond-840.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c winbond-840.c"
++ * c-indent-level: 4
++ * c-basic-offset: 4
++ * tab-width: 4
++ * End:
++ */
+Index: linux/src/drivers/net/yellowfin.c
+===================================================================
+RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/yellowfin.c,v
+retrieving revision 1.2
+diff -u -r1.2 yellowfin.c
+--- linux/src/drivers/net/yellowfin.c 7 Sep 1999 07:19:16 -0000 1.2
++++ linux/src/drivers/net/yellowfin.c 20 Aug 2004 10:32:55 -0000
+@@ -1,28 +1,47 @@
+ /* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */
+ /*
+- Written 1997-1998 by Donald Becker.
++ Written 1997-2003 by Donald Becker.
+
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
++ This software may be used and distributed according to the terms of
++ the GNU General Public License (GPL), incorporated herein by reference.
++ Drivers based on or derived from this code fall under the GPL and must
++ retain the authorship, copyright and license notice. This file is not
++ a complete program and may only be used when the entire operating
++ system is licensed under the GPL.
+
+ This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter.
+ It also supports the Symbios Logic version of the same chip core.
+
+- The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
+-
+- Support and updates available at
+- http://cesdis.gsfc.nasa.gov/linux/drivers/yellowfin.html
++ The author may be reached as becker@scyld.com, or C/O
++ Scyld Computing Corporation
++ 914 Bay Ridge Road, Suite 220
++ Annapolis MD 21403
++
++ Support information and updates available at
++ http://www.scyld.com/network/yellowfin.html
++ The information and support mailing lists are based at
++ http://www.scyld.com/mailman/listinfo/
+ */
+
+-static const char *version = "yellowfin.c:v0.99A 4/7/98 becker@cesdis.gsfc.nasa.gov\n";
++/* These identify the driver base version and may not be removed. */
++static const char version1[] =
++"yellowfin.c:v1.10 7/22/2003 Written by Donald Becker <becker@scyld.com>\n";
++static const char version2[] =
++" http://www.scyld.com/network/yellowfin.html\n";
++
++/* The user-configurable values.
++ These may be modified when a driver module is loaded.*/
+
+-/* A few user-configurable values. */
++/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
++static int debug = 2;
+
++/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+ static int max_interrupt_work = 20;
+-static int min_pci_latency = 64;
+-static int mtu = 0;
++
++/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
++ Typical is a 64 element hash table based on the Ethernet CRC. */
++static int multicast_filter_limit = 64;
++
+ #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+ /* System-wide count of bogus-rx frames. */
+ static int bogus_rx = 0;
+@@ -38,109 +57,121 @@
+
+ /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+ Setting to > 1518 effectively disables this feature. */
+-static const int rx_copybreak = 100;
++static int rx_copybreak = 0;
++
++/* Used to pass the media type, etc.
++ No media types are currently defined. These options exist only for
++ compatibility with other drivers.
++*/
++#define MAX_UNITS 8 /* More are supported, limit only on options */
++static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
++
++/* Do ugly workaround for GX server chipset errata. */
++static int gx_fix = 0;
++
++/* Operational parameters that are set at compile time. */
+
+ /* Keep the ring sizes a power of two for efficiency.
+ Making the Tx ring too large decreases the effectiveness of channel
+- bonding and packet priority.
+- There are no ill effects from too-large receive rings. */
++ bonding and packet priority, confuses the system network buffer limits,
++ and wastes memory.
++ Too-large receive rings waste memory and confound network buffer limits.
++*/
+ #define TX_RING_SIZE 16
+-#define RX_RING_SIZE 32
++#define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */
++#define RX_RING_SIZE 64
+
+ /* Operational parameters that usually are not changed. */
+ /* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT ((2000*HZ)/1000)
++#define TX_TIMEOUT (6*HZ)
++
++/* Allocation size of Rx buffers with normal sized Ethernet frames.
++ Do not change this value without good reason. This is not a limit,
++ but a way to keep a consistent allocation size among drivers.
++ */
++#define PKT_BUF_SZ 1536
++
++#ifndef __KERNEL__
++#define __KERNEL__
++#endif
++#if !defined(__OPTIMIZE__)
++#warning You must compile this file with the correct options!
++#warning See the last lines of the source file.
++#error You must compile this driver with "-O".
++#endif
+
+ #include <linux/config.h>
+-#ifdef MODULE
+-#ifdef MODVERSIONS
+-#include <linux/modversions.h>
++#if defined(CONFIG_SMP) && ! defined(__SMP__)
++#define __SMP__
+ #endif
+-#include <linux/module.h>
++#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
++#define MODVERSIONS
++#endif
++
+ #include <linux/version.h>
+-#else
+-#define MOD_INC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
++#if defined(MODVERSIONS)
++#include <linux/modversions.h>
+ #endif
++#include <linux/module.h>
+
+ #include <linux/kernel.h>
+-#include <linux/sched.h>
+ #include <linux/string.h>
+ #include <linux/timer.h>
+-#include <linux/ptrace.h>
+ #include <linux/errno.h>
+ #include <linux/ioport.h>
++#if LINUX_VERSION_CODE >= 0x20400
++#include <linux/slab.h>
++#else
+ #include <linux/malloc.h>
++#endif
+ #include <linux/interrupt.h>
+ #include <linux/pci.h>
+-#include <linux/bios32.h>
+-#include <asm/processor.h> /* Processor type for cache alignment. */
+-#include <asm/bitops.h>
+-#include <asm/io.h>
+-
+ #include <linux/netdevice.h>
+ #include <linux/etherdevice.h>
+ #include <linux/skbuff.h>
++#include <asm/processor.h> /* Processor type for cache alignment. */
++#include <asm/unaligned.h>
++#include <asm/bitops.h>
++#include <asm/io.h>
+
+-/* Kernel compatibility defines, common to David Hind's PCMCIA package.
+- This is only in the support-all-kernels source code. */
+-#include <linux/version.h> /* Evil, but neccessary */
+-
+-#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10300
+-#define RUN_AT(x) (x) /* What to put in timer->expires. */
+-#define DEV_ALLOC_SKB(len) alloc_skb(len, GFP_ATOMIC)
+-#define virt_to_bus(addr) ((unsigned long)addr)
+-#define bus_to_virt(addr) ((void*)addr)
+-
+-#else /* 1.3.0 and later */
+-#define RUN_AT(x) (jiffies + (x))
+-#define DEV_ALLOC_SKB(len) dev_alloc_skb(len + 2)
+-#endif
+-
+-#if defined (LINUX_VERSION_CODE) && LINUX_VERSION_CODE < 0x10338
+-#ifdef MODULE
+-#if !defined(CONFIG_MODVERSIONS) && !defined(__NO_VERSION__)
+-char kernel_version[] = UTS_RELEASE;
+-#endif
++#ifdef INLINE_PCISCAN
++#include "k_compat.h"
+ #else
+-#undef MOD_INC_USE_COUNT
+-#define MOD_INC_USE_COUNT
+-#undef MOD_DEC_USE_COUNT
+-#define MOD_DEC_USE_COUNT
+-#endif
+-#endif /* 1.3.38 */
+-
+-#if (LINUX_VERSION_CODE >= 0x10344)
+-#define NEW_MULTICAST
+-#include <linux/delay.h>
+-#endif
+-#if (LINUX_VERSION_CODE >= 0x20100)
+-char kernel_version[] = UTS_RELEASE;
+-#endif
+-#ifdef SA_SHIRQ
+-#define IRQ(irq, dev_id, pt_regs) (irq, dev_id, pt_regs)
+-#else
+-#define IRQ(irq, dev_id, pt_regs) (irq, pt_regs)
+-#endif
+-#if (LINUX_VERSION_CODE < 0x20123)
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
++#include "pci-scan.h"
++#include "kern_compat.h"
+ #endif
+
+-static const char *card_name = "Yellowfin G-NIC Gbit Ethernet";
++/* Condensed operations for readability. */
++#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
++#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+
+-/* The PCI I/O space extent. */
+-#define YELLOWFIN_TOTAL_SIZE 0x100
+-
+-#ifdef HAVE_DEVLIST
+-struct netdev_entry yellowfin_drv =
+-{card_name, yellowfin_pci_probe, YELLOWFIN_TOTAL_SIZE, NULL};
++#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
++char kernel_version[] = UTS_RELEASE;
+ #endif
+
+-#ifdef YELLOWFIN_DEBUG
+-int yellowfin_debug = YELLOWFIN_DEBUG;
+-#else
+-int yellowfin_debug = 1;
+-#endif
++MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
++MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
++MODULE_LICENSE("GPL");
++MODULE_PARM(debug, "i");
++MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(rx_copybreak, "i");
++MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
++MODULE_PARM(multicast_filter_limit, "i");
++MODULE_PARM(max_interrupt_work, "i");
++MODULE_PARM(gx_fix, "i");
++MODULE_PARM_DESC(debug, "Driver message level enable (0-31)");
++MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
++MODULE_PARM_DESC(rx_copybreak,
++ "Breakpoint in bytes for copy-only-tiny-frames");
++MODULE_PARM_DESC(full_duplex,
++ "Non-zero to force full duplex, non-negotiated link "
++ "(deprecated).");
++MODULE_PARM_DESC(max_interrupt_work,
++ "Driver maximum events handled per interrupt");
++MODULE_PARM_DESC(multicast_filter_limit,
++ "Multicast addresses before switching to Rx-all-multicast");
++MODULE_PARM_DESC(gx_fix, "Set to work around old GX chipset errata");
+
+ /*
+ Theory of Operation
+@@ -203,29 +234,50 @@
+ IV. Notes
+
+ Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards.
++Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board
++and an AlphaStation to verifty the Alpha port!
+
+ IVb. References
+
+ Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential
++Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary
++ Data Manual v3.0
++http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
+ http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
+
+ IVc. Errata
+
+-See Packet Engines confidential appendix.
+-
++See Packet Engines confidential appendix (prototype chips only).
+ */
++
+
+-/* A few values that may be tweaked. */
+-#define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
+
+-#ifndef PCI_VENDOR_ID_PKT_ENG /* To be defined in linux/pci.h */
+-#define PCI_VENDOR_ID_PKT_ENG 0x1000 /* Hmm, likely number.. */
+-#define PCI_DEVICE_ID_YELLOWFIN 0x0702
++static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int fnd_cnt);
++enum capability_flags {
++ HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16,
++ HasMACAddrBug=32, /* Only on early revs. */
++};
++/* The PCI I/O space extent. */
++#define YELLOWFIN_SIZE 0x100
++#ifdef USE_IO_OPS
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
++#else
++#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+ #endif
+
+-/* The rest of these values should never change. */
++static struct pci_id_info pci_id_tbl[] = {
++ {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff},
++ PCI_IOTYPE, YELLOWFIN_SIZE,
++ FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug},
++ {"Symbios SYM83C885", { 0x07011000, 0xffffffff},
++ PCI_IOTYPE, YELLOWFIN_SIZE, HasMII },
++ {0,},
++};
+
+-static void yellowfin_timer(unsigned long data);
++struct drv_id_info yellowfin_drv_id = {
++ "yellowfin", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
++ yellowfin_probe1, };
+
+ /* Offsets to the Yellowfin registers. Various sizes and alignments. */
+ enum yellowfin_offsets {
+@@ -234,33 +286,45 @@
+ RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C,
+ RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58,
+ EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86,
+- ChipRev=0x8C, DMACtrl=0x90, Cnfg=0xA0, RxDepth=0xB8, FlowCtrl=0xBC,
++ ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94,
++ Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4,
++ MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC,
++ MII_Status=0xAE,
++ RxDepth=0xB8, FlowCtrl=0xBC,
+ AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8,
++ EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4,
++ EEFeature=0xF5,
+ };
+
+-/* The Yellowfin Rx and Tx buffer descriptors. */
++/* The Yellowfin Rx and Tx buffer descriptors.
++ Elements are written as 32 bit for endian portability. */
+ struct yellowfin_desc {
+- u16 request_cnt;
+- u16 cmd;
++ u32 dbdma_cmd;
+ u32 addr;
+ u32 branch_addr;
+- u16 result_cnt;
+- u16 status;
++ u32 result_status;
+ };
+
+ struct tx_status_words {
++#if defined(__powerpc__)
++ u16 tx_errs;
++ u16 tx_cnt;
++ u16 paused;
++ u16 total_tx_cnt;
++#else /* Little endian chips. */
+ u16 tx_cnt;
+ u16 tx_errs;
+ u16 total_tx_cnt;
+ u16 paused;
++#endif
+ };
+
+ /* Bits in yellowfin_desc.cmd */
+ enum desc_cmd_bits {
+- CMD_TX_PKT=0x1000, CMD_RX_BUF=0x2000, CMD_TXSTATUS=0x3000,
+- CMD_NOP=0x6000, CMD_STOP=0x7000,
+- BRANCH_ALWAYS=0x0C, INTR_ALWAYS=0x30, WAIT_ALWAYS=0x03,
+- BRANCH_IFTRUE=0x04,
++ CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000,
++ CMD_NOP=0x60000000, CMD_STOP=0x70000000,
++ BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000,
++ BRANCH_IFTRUE=0x040000,
+ };
+
+ /* Bits in yellowfin_desc.status */
+@@ -272,227 +336,159 @@
+ IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80,
+ IntrEarlyRx=0x100, IntrWakeup=0x200, };
+
++#define PRIV_ALIGN 31 /* Required alignment mask */
+ struct yellowfin_private {
+- /* Descriptor rings first for alignment. Tx requires a second descriptor
+- for status. */
++ /* Descriptor rings first for alignment.
++ Tx requires a second descriptor for status. */
+ struct yellowfin_desc rx_ring[RX_RING_SIZE];
+ struct yellowfin_desc tx_ring[TX_RING_SIZE*2];
+- const char *product_name;
+- struct device *next_module;
+- /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+- struct sk_buff* tx_skbuff[TX_RING_SIZE];
+- struct tx_status_words tx_status[TX_RING_SIZE];
++ struct net_device *next_module;
++ void *priv_addr; /* Unaligned address for kfree */
+ /* The addresses of receive-in-place skbuffs. */
+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
+- int chip_id;
+- struct enet_statistics stats;
++ /* The saved address of a sent-in-place packet/buffer, for later free(). */
++ struct sk_buff* tx_skbuff[TX_RING_SIZE];
++ struct tx_status_words tx_status[TX_RING_SIZE];
+ struct timer_list timer; /* Media selection timer. */
+- int in_interrupt;
+- unsigned int cur_rx, cur_tx; /* The next free ring entry */
+- unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
++ struct net_device_stats stats;
++ /* Frequently used and paired value: keep adjacent for cache effect. */
++ int msg_level;
++ int chip_id, drv_flags;
++ struct pci_dev *pci_dev;
++ long in_interrupt;
++ int max_interrupt_work;
++
++ struct yellowfin_desc *rx_head_desc;
++ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
++ unsigned int rx_buf_sz; /* Based on MTU+slack. */
++ int rx_copybreak;
++
++ struct tx_status_words *tx_tail_desc;
++ unsigned int cur_tx, dirty_tx;
++ int tx_threshold;
+ unsigned int tx_full:1; /* The Tx queue is full. */
+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
++ unsigned int duplex_lock:1;
+ unsigned int medialock:1; /* Do not sense media. */
+- unsigned int default_port:4; /* Last dev->if_port value. */
+- u32 pad[4]; /* Used for 32-byte alignment */
++ unsigned int default_port; /* Last dev->if_port value. */
++ /* MII transceiver section. */
++ int mii_cnt; /* MII device addresses. */
++ u16 advertising; /* NWay media advertisement */
++ unsigned char phys[2]; /* MII device addresses. */
++ /* Rx multicast filter. */
++ u16 mc_filter[4];
++ int rx_mode;
++ int multicast_filter_limit;
+ };
+
+-#ifdef MODULE
+-/* Used to pass the media type, etc. */
+-#define MAX_UNITS 8 /* More are supported, limit only on options */
+-static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-
+-#if LINUX_VERSION_CODE > 0x20115
+-MODULE_AUTHOR("Donald Becker <becker@cesdis.gsfc.nasa.gov>");
+-MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver");
+-MODULE_PARM(max_interrupt_work, "i");
+-MODULE_PARM(min_pci_latency, "i");
+-MODULE_PARM(mtu, "i");
+-MODULE_PARM(debug, "i");
+-MODULE_PARM(rx_copybreak, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
++static int read_eeprom(long ioaddr, int location);
++static int mdio_read(long ioaddr, int phy_id, int location);
++static void mdio_write(long ioaddr, int phy_id, int location, int value);
++#ifdef HAVE_PRIVATE_IOCTL
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+ #endif
+-
+-#endif
+-
+-static struct device *yellowfin_probe1(struct device *dev, int ioaddr, int irq,
+- int chip_id, int options);
+-static int yellowfin_open(struct device *dev);
++static int yellowfin_open(struct net_device *dev);
+ static void yellowfin_timer(unsigned long data);
+-static void yellowfin_tx_timeout(struct device *dev);
+-static void yellowfin_init_ring(struct device *dev);
+-static int yellowfin_start_xmit(struct sk_buff *skb, struct device *dev);
+-static int yellowfin_rx(struct device *dev);
+-static void yellowfin_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs);
+-static int yellowfin_close(struct device *dev);
+-static struct enet_statistics *yellowfin_get_stats(struct device *dev);
+-#ifdef NEW_MULTICAST
+-static void set_rx_mode(struct device *dev);
+-#else
+-static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+-#endif
++static void yellowfin_tx_timeout(struct net_device *dev);
++static void yellowfin_init_ring(struct net_device *dev);
++static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev);
++static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
++static int yellowfin_rx(struct net_device *dev);
++static void yellowfin_error(struct net_device *dev, int intr_status);
++static int yellowfin_close(struct net_device *dev);
++static struct net_device_stats *yellowfin_get_stats(struct net_device *dev);
++static void set_rx_mode(struct net_device *dev);
+
+
+
+-#ifdef MODULE
+-/* A list of all installed Yellowfin devices, for removing the driver module. */
+-static struct device *root_yellowfin_dev = NULL;
+-#endif
++/* A list of installed Yellowfin devices, for removing the driver module. */
++static struct net_device *root_yellowfin_dev = NULL;
+
+-int yellowfin_probe(struct device *dev)
++#ifndef MODULE
++int yellowfin_probe(struct net_device *dev)
+ {
+- int cards_found = 0;
+- static int pci_index = 0; /* Static, for multiple probe calls. */
+-
+- /* Ideally we would detect all network cards in slot order. That would
+- be best done a central PCI probe dispatch, which wouldn't work
+- well with the current structure. So instead we detect just the
+- Yellowfin cards in slot order. */
+-
+- if (pcibios_present()) {
+- unsigned char pci_bus, pci_device_fn;
+-
+- for (;pci_index < 0xff; pci_index++) {
+- u8 pci_irq_line, pci_latency;
+- u16 pci_command, vendor, device;
+- u32 pci_ioaddr, chip_idx = 0;
+-
+-#ifdef REVERSE_PROBE_ORDER
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+- 0xfe - pci_index,
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- continue;
+-#else
+- if (pcibios_find_class (PCI_CLASS_NETWORK_ETHERNET << 8,
+- pci_index,
+- &pci_bus, &pci_device_fn)
+- != PCIBIOS_SUCCESSFUL)
+- break;
+-#endif
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_VENDOR_ID, &vendor);
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_DEVICE_ID, &device);
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+- /* Remove I/O space marker in bit 0. */
+- pci_ioaddr &= ~3;
+-
+- if (vendor != PCI_VENDOR_ID_PKT_ENG)
+- continue;
+-
+- if (device != PCI_DEVICE_ID_YELLOWFIN)
+- continue;
+-
+- if (yellowfin_debug > 2)
+- printk("Found Packet Engines Yellowfin G-NIC at I/O %#x, IRQ %d.\n",
+- pci_ioaddr, pci_irq_line);
+-
+- if (check_region(pci_ioaddr, YELLOWFIN_TOTAL_SIZE))
+- continue;
+-
+-#ifdef MODULE
+- dev = yellowfin_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+- cards_found < MAX_UNITS ? options[cards_found] : 0);
+-#else
+- dev = yellowfin_probe1(dev, pci_ioaddr, pci_irq_line, chip_idx,
+- dev ? dev->mem_start : 0);
+-#endif
+-
+- if (dev) {
+- /* Get and check the bus-master and latency values. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- if ( ! (pci_command & PCI_COMMAND_MASTER)) {
+- printk(" PCI Master Bit has not been set! Setting...\n");
+- pci_command |= PCI_COMMAND_MASTER;
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, pci_command);
+- }
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < min_pci_latency) {
+- printk(" PCI latency timer (CFLT) is unreasonably low at %d."
+- " Setting to %d clocks.\n",
+- pci_latency, min_pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, min_pci_latency);
+- } else if (yellowfin_debug > 1)
+- printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
+- dev = 0;
+- cards_found++;
+- }
+- }
+- }
+-
+-#if defined (MODULE)
+- return cards_found;
+-#else
+- return cards_found ? 0 : -ENODEV;
+-#endif
++ if (pci_drv_register(&yellowfin_drv_id, dev) < 0)
++ return -ENODEV;
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return 0;
+ }
++#endif
+
+-static struct device *yellowfin_probe1(struct device *dev, int ioaddr, int irq,
+- int chip_id, int options)
++static void *yellowfin_probe1(struct pci_dev *pdev, void *init_dev,
++ long ioaddr, int irq, int chip_idx, int find_cnt)
+ {
+- static int did_version = 0; /* Already printed version info. */
+- struct yellowfin_private *yp;
+- int i;
+-
+- if (yellowfin_debug > 0 && did_version++ == 0)
+- printk(version);
+-
+- dev = init_etherdev(dev, sizeof(struct yellowfin_private));
+-
+- printk("%s: P-E Yellowfin type %8x at %#3x, ",
+- dev->name, inl(ioaddr + ChipRev), ioaddr);
+-
++ struct net_device *dev;
++ struct yellowfin_private *np;
++ void *priv_mem;
++ int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
++ int drv_flags = pci_id_tbl[chip_idx].drv_flags;
++
++ dev = init_etherdev(init_dev, 0);
++ if (!dev)
++ return NULL;
++
++ printk(KERN_INFO "%s: %s type %8x at 0x%lx, ",
++ dev->name, pci_id_tbl[chip_idx].name, (int)inl(ioaddr + ChipRev),
++ ioaddr);
++
++ if (drv_flags & IsGigabit)
++ for (i = 0; i < 6; i++)
++ dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
++ else {
++ int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
++ for (i = 0; i < 6; i++)
++ dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
++ }
+ for (i = 0; i < 5; i++)
+- printk("%2.2x:", inb(ioaddr + StnAddr + i));
+- printk("%2.2x, IRQ %d.\n", inb(ioaddr + StnAddr + i), irq);
+- for (i = 0; i < 6; i++)
+- dev->dev_addr[i] = inb(ioaddr + StnAddr + i);
++ printk("%2.2x:", dev->dev_addr[i]);
++ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
++ /* Make certain elements e.g. descriptor lists are aligned. */
++ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
++ /* Check for the very unlikely case of no memory. */
++ if (priv_mem == NULL)
++ return NULL;
+
+ /* We do a request_region() only to register /proc/ioports info. */
+- request_region(ioaddr, YELLOWFIN_TOTAL_SIZE, card_name);
++ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
+
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+
+- /* Make certain the descriptor lists are aligned. */
+- yp = (void *)(((long)kmalloc(sizeof(*yp), GFP_KERNEL) + 31) & ~31);
+- memset(yp, 0, sizeof(*yp));
+- dev->priv = yp;
++ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
++ memset(np, 0, sizeof(*np));
++ np->priv_addr = priv_mem;
+
+-#ifdef MODULE
+- yp->next_module = root_yellowfin_dev;
++ np->next_module = root_yellowfin_dev;
+ root_yellowfin_dev = dev;
+-#endif
+
+- yp->chip_id = chip_id;
++ np->pci_dev = pdev;
++ np->chip_id = chip_idx;
++ np->drv_flags = drv_flags;
++ np->msg_level = (1 << debug) - 1;
++ np->rx_copybreak = rx_copybreak;
++ np->max_interrupt_work = max_interrupt_work;
++ np->multicast_filter_limit = multicast_filter_limit;
+
+- yp->full_duplex = 1;
+-#ifdef YELLOWFIN_DEFAULT_MEDIA
+- yp->default_port = YELLOWFIN_DEFAULT_MEDIA;
+-#endif
+-#ifdef YELLOWFIN_NO_MEDIA_SWITCH
+- yp->medialock = 1;
+-#endif
++ if (dev->mem_start)
++ option = dev->mem_start;
+
+ /* The lower four bits are the media type. */
+- if (options > 0) {
+- yp->full_duplex = (options & 16) ? 1 : 0;
+- yp->default_port = options & 15;
+- if (yp->default_port)
+- yp->medialock = 1;
++ if (option > 0) {
++ if (option & 0x220)
++ np->full_duplex = 1;
++ np->default_port = option & 15;
++ if (np->default_port)
++ np->medialock = 1;
+ }
++ if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
++ np->full_duplex = 1;
++
++ if (np->full_duplex)
++ np->duplex_lock = 1;
+
+ /* The Yellowfin-specific entries in the device structure. */
+ dev->open = &yellowfin_open;
+@@ -500,48 +496,97 @@
+ dev->stop = &yellowfin_close;
+ dev->get_stats = &yellowfin_get_stats;
+ dev->set_multicast_list = &set_rx_mode;
+- if (mtu)
+- dev->mtu = mtu;
++ dev->do_ioctl = &mii_ioctl;
+
+- /* todo: Reset the xcvr interface and turn on heartbeat. */
++ if (np->drv_flags & HasMII) {
++ int phy, phy_idx = 0;
++ for (phy = 0; phy < 32 && phy_idx < 4; phy++) {
++ int mii_status = mdio_read(ioaddr, phy, 1);
++ if (mii_status != 0xffff && mii_status != 0x0000) {
++ np->phys[phy_idx++] = phy;
++ np->advertising = mdio_read(ioaddr, phy, 4);
++ printk(KERN_INFO "%s: MII PHY found at address %d, status "
++ "0x%4.4x advertising %4.4x.\n",
++ dev->name, phy, mii_status, np->advertising);
++ }
++ }
++ np->mii_cnt = phy_idx;
++ }
+
+ return dev;
+ }
+
++static int read_eeprom(long ioaddr, int location)
++{
++ int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */
++
++ outb(location, ioaddr + EEAddr);
++ outb(0x30 | ((location >> 8) & 7), ioaddr + EECtrl);
++ while ((inb(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0)
++ ;
++ return inb(ioaddr + EERead);
++}
++
++/* MII Managemen Data I/O accesses.
++ These routines assume the MDIO controller is idle, and do not exit until
++ the command is finished. */
++
++static int mdio_read(long ioaddr, int phy_id, int location)
++{
++ int i;
++
++ outw((phy_id<<8) + location, ioaddr + MII_Addr);
++ outw(1, ioaddr + MII_Cmd);
++ for (i = 10000; i >= 0; i--)
++ if ((inw(ioaddr + MII_Status) & 1) == 0)
++ break;
++ return inw(ioaddr + MII_Rd_Data);
++}
++
++static void mdio_write(long ioaddr, int phy_id, int location, int value)
++{
++ int i;
++
++ outw((phy_id<<8) + location, ioaddr + MII_Addr);
++ outw(value, ioaddr + MII_Wr_Data);
++
++ /* Wait for the command to finish. */
++ for (i = 10000; i >= 0; i--)
++ if ((inw(ioaddr + MII_Status) & 1) == 0)
++ break;
++ return;
++}
++
+
+-static int
+-yellowfin_open(struct device *dev)
++static int yellowfin_open(struct net_device *dev)
+ {
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
++ int i;
+
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+-#ifdef SA_SHIRQ
+- if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ,
+- card_name, dev)) {
+- return -EAGAIN;
+- }
+-#else
+- if (irq2dev_map[dev->irq] != NULL
+- || (irq2dev_map[dev->irq] = dev) == NULL
+- || dev->irq == 0
+- || request_irq(dev->irq, &yellowfin_interrupt, 0, card_name)) {
++ MOD_INC_USE_COUNT;
++
++ if (request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name,
++ dev)) {
++ MOD_DEC_USE_COUNT;
+ return -EAGAIN;
+ }
+-#endif
+
+- if (yellowfin_debug > 1)
+- printk("%s: yellowfin_open() irq %d.\n", dev->name, dev->irq);
+-
+- MOD_INC_USE_COUNT;
++ if (yp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
++ dev->name, dev->irq);
+
+ yellowfin_init_ring(dev);
+
+ outl(virt_to_bus(yp->rx_ring), ioaddr + RxPtr);
+ outl(virt_to_bus(yp->tx_ring), ioaddr + TxPtr);
+
++ for (i = 0; i < 6; i++)
++ outb(dev->dev_addr[i], ioaddr + StnAddr + i);
++
+ /* Set up various condition 'select' registers.
+ There are no options here. */
+ outl(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */
+@@ -558,25 +603,27 @@
+ /* Enable automatic generation of flow control frames, period 0xffff. */
+ outl(0x0030FFFF, ioaddr + FlowCtrl);
+
++ yp->tx_threshold = 32;
++ outl(yp->tx_threshold, ioaddr + TxThreshold);
++
+ if (dev->if_port == 0)
+ dev->if_port = yp->default_port;
+
+- dev->tbusy = 0;
+- dev->interrupt = 0;
+ yp->in_interrupt = 0;
+
+- /* We are always in full-duplex mode with the current chip! */
+- yp->full_duplex = 1;
+-
+ /* Setting the Rx mode will start the Rx process. */
+- outw(0x01CD | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
+-#ifdef NEW_MULTICAST
++ if (yp->drv_flags & IsGigabit) {
++ /* We are always in full-duplex mode with gigabit! */
++ yp->full_duplex = 1;
++ outw(0x01CF, ioaddr + Cnfg);
++ } else {
++ outw(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */
++ outw(0x1018, ioaddr + FrameGap1);
++ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
++ }
++ yp->rx_mode = 0;
+ set_rx_mode(dev);
+-#else
+- set_rx_mode(dev, 0, 0);
+-#endif
+-
+- dev->start = 1;
++ netif_start_tx_queue(dev);
+
+ /* Enable interrupts by setting the interrupt mask. */
+ outw(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */
+@@ -584,13 +631,13 @@
+ outl(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */
+ outl(0x80008000, ioaddr + TxCtrl);
+
+- if (yellowfin_debug > 2) {
+- printk("%s: Done yellowfin_open().\n",
++ if (yp->msg_level & NETIF_MSG_IFUP)
++ printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
+ dev->name);
+- }
++
+ /* Set the timer to check for link beat. */
+ init_timer(&yp->timer);
+- yp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
++ yp->timer.expires = jiffies + 3*HZ;
+ yp->timer.data = (unsigned long)dev;
+ yp->timer.function = &yellowfin_timer; /* timer handler */
+ add_timer(&yp->timer);
+@@ -600,183 +647,240 @@
+
+ static void yellowfin_timer(unsigned long data)
+ {
+- struct device *dev = (struct device *)data;
++ struct net_device *dev = (struct net_device *)data;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+- int ioaddr = dev->base_addr;
+- int next_tick = 0;
++ long ioaddr = dev->base_addr;
++ int next_tick = 60*HZ;
++
++ if (yp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
++ dev->name, inw(ioaddr + IntrStatus));
++
++ if (jiffies - dev->trans_start > TX_TIMEOUT
++ && yp->cur_tx - yp->dirty_tx > 1
++ && netif_queue_paused(dev))
++ yellowfin_tx_timeout(dev);
+
+- if (yellowfin_debug > 3) {
+- printk("%s: Yellowfin timer tick, status %8.8x.\n",
+- dev->name, inl(ioaddr + IntrStatus));
+- }
+- if (next_tick) {
+- yp->timer.expires = RUN_AT(next_tick);
+- add_timer(&yp->timer);
++ if (yp->mii_cnt) {
++ int mii_reg1 = mdio_read(ioaddr, yp->phys[0], 1);
++ int mii_reg5 = mdio_read(ioaddr, yp->phys[0], 5);
++ int negotiated = mii_reg5 & yp->advertising;
++ if (yp->msg_level & NETIF_MSG_TIMER)
++ printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
++ "link partner capability %4.4x.\n",
++ dev->name, yp->phys[0], mii_reg1, mii_reg5);
++
++ if ( ! yp->duplex_lock &&
++ ((negotiated & 0x0300) == 0x0100
++ || (negotiated & 0x00C0) == 0x0040)) {
++ yp->full_duplex = 1;
++ }
++ outw(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg);
++
++ if (mii_reg1 & 0x0004)
++ next_tick = 60*HZ;
++ else
++ next_tick = 3*HZ;
+ }
++
++ yp->timer.expires = jiffies + next_tick;
++ add_timer(&yp->timer);
+ }
+
+-static void yellowfin_tx_timeout(struct device *dev)
++static void yellowfin_tx_timeout(struct net_device *dev)
+ {
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+
+- printk("%s: Yellowfin transmit timed out, status %8.8x, resetting...\n",
+- dev->name, inl(ioaddr));
++ printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
++ "status %4.4x, Rx status %4.4x, resetting...\n",
++ dev->name, yp->cur_tx, yp->dirty_tx,
++ (int)inl(ioaddr + TxStatus), (int)inl(ioaddr + RxStatus));
+
+-#ifndef __alpha__
+- {
++ /* Note: these should be KERN_DEBUG. */
++ if (yp->msg_level & NETIF_MSG_TX_ERR) {
+ int i;
+- printk(" Rx ring %8.8x: ", (int)yp->rx_ring);
++ printk(KERN_DEBUG " Rx ring %p: ", yp->rx_ring);
+ for (i = 0; i < RX_RING_SIZE; i++)
+- printk(" %8.8x", (unsigned int)yp->rx_ring[i].status);
+- printk("\n Tx ring %8.8x: ", (int)yp->tx_ring);
++ printk(" %8.8x", yp->rx_ring[i].result_status);
++ printk("\n"KERN_DEBUG" Tx ring %p: ", yp->tx_ring);
+ for (i = 0; i < TX_RING_SIZE; i++)
+- printk(" %4.4x /%4.4x", yp->tx_status[i].tx_errs, yp->tx_ring[i].status);
++ printk(" %4.4x /%8.8x", yp->tx_status[i].tx_errs,
++ yp->tx_ring[i].result_status);
+ printk("\n");
+ }
+-#endif
+
+- /* Perhaps we should reinitialize the hardware here. */
+- dev->if_port = 0;
+- /* Stop and restart the chip's Tx processes . */
+-
+- /* Trigger an immediate transmit demand. */
+-
+- dev->trans_start = jiffies;
+- yp->stats.tx_errors++;
+- return;
+-}
++ /* If the hardware is found to hang regularly, we will update the code
++ to reinitialize the chip here. */
++ dev->if_port = 0;
+
++ /* Wake the potentially-idle transmit channel. */
++ outl(0x10001000, dev->base_addr + TxCtrl);
++ if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
++ netif_unpause_tx_queue(dev);
++
++ dev->trans_start = jiffies;
++ yp->stats.tx_errors++;
++ return;
++}
+
+ /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+-static void
+-yellowfin_init_ring(struct device *dev)
++static void yellowfin_init_ring(struct net_device *dev)
+ {
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+ yp->tx_full = 0;
+ yp->cur_rx = yp->cur_tx = 0;
+- yp->dirty_rx = yp->dirty_tx = 0;
++ yp->dirty_tx = 0;
+
+- for (i = 0; i < RX_RING_SIZE; i++) {
+- struct sk_buff *skb;
+- int pkt_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
++ yp->rx_buf_sz = dev->mtu + 18 + 15;
++ /* Match other driver's allocation size when possible. */
++ if (yp->rx_buf_sz < PKT_BUF_SZ)
++ yp->rx_buf_sz = PKT_BUF_SZ;
++ yp->rx_head_desc = &yp->rx_ring[0];
+
+- yp->rx_ring[i].request_cnt = pkt_buf_sz;
+- yp->rx_ring[i].cmd = CMD_RX_BUF | INTR_ALWAYS;
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ yp->rx_ring[i].dbdma_cmd =
++ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
++ yp->rx_ring[i].branch_addr = virt_to_le32desc(&yp->rx_ring[i+1]);
++ }
++ /* Mark the last entry as wrapping the ring. */
++ yp->rx_ring[i-1].branch_addr = virt_to_le32desc(&yp->rx_ring[0]);
+
+- skb = DEV_ALLOC_SKB(pkt_buf_sz);
+- skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ for (i = 0; i < RX_RING_SIZE; i++) {
++ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
+ yp->rx_skbuff[i] = skb;
+ if (skb == NULL)
+- break; /* Bad news! */
++ break;
+ skb->dev = dev; /* Mark as being used by this device. */
+-#if LINUX_VERSION_CODE > 0x10300
+- yp->rx_ring[i].addr = virt_to_bus(skb->tail);
+-#else
+- yp->rx_ring[i].addr = virt_to_bus(skb->data);
+-#endif
+- yp->rx_ring[i].branch_addr = virt_to_bus(&yp->rx_ring[i+1]);
++ skb_reserve(skb, 2); /* 16 byte align the IP header. */
++ yp->rx_ring[i].addr = virt_to_le32desc(skb->tail);
+ }
+- /* Mark the last entry as wrapping the ring. */
+- yp->rx_ring[i-1].cmd = CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
+- yp->rx_ring[i-1].branch_addr = virt_to_bus(&yp->rx_ring[0]);
++ yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+-/*#define NO_TXSTATS*/
++#define NO_TXSTATS
+ #ifdef NO_TXSTATS
+ /* In this mode the Tx ring needs only a single descriptor. */
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ yp->tx_skbuff[i] = 0;
+- yp->tx_ring[i].cmd = CMD_STOP;
+- yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
++ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+- yp->tx_ring[--i].cmd = CMD_STOP | BRANCH_ALWAYS; /* Wrap ring */
+- yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
++ /* Wrap ring */
++ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
++ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+ #else
+ /* Tx ring needs a pair of descriptors, the second for the status. */
+ for (i = 0; i < TX_RING_SIZE*2; i++) {
+ yp->tx_skbuff[i/2] = 0;
+- yp->tx_ring[i].cmd = CMD_STOP; /* Branch on Tx error. */
+- yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
++ /* Branch on Tx error. */
++ yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ i++;
+- yp->tx_ring[i].cmd = CMD_TXSTATUS; /* Interrupt, no wait. */
+- yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
+- yp->tx_ring[i].addr = virt_to_bus(&yp->tx_status[i/2]);
+- yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[i+1]);
++ if (yp->flags & FullTxStatus) {
++ yp->tx_ring[i].dbdma_cmd =
++ cpu_to_le32(CMD_TXSTATUS | sizeof(yp->tx_status[i]));
++ yp->tx_ring[i].request_cnt = sizeof(yp->tx_status[i]);
++ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2]);
++ } else { /* Symbios chips write only tx_errs word. */
++ yp->tx_ring[i].dbdma_cmd =
++ cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2);
++ yp->tx_ring[i].request_cnt = 2;
++ yp->tx_ring[i].addr = virt_to_le32desc(&yp->tx_status[i/2].tx_errs);
++ }
++ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[i+1]);
+ }
+ /* Wrap ring */
+- yp->tx_ring[--i].cmd = CMD_TXSTATUS | BRANCH_ALWAYS | INTR_ALWAYS;
+- yp->tx_ring[i].branch_addr = virt_to_bus(&yp->tx_ring[0]);
++ yp->tx_ring[--i].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS);
++ yp->tx_ring[i].branch_addr = virt_to_le32desc(&yp->tx_ring[0]);
+ #endif
++ yp->tx_tail_desc = &yp->tx_status[0];
++ return;
+ }
+
+-static int
+-yellowfin_start_xmit(struct sk_buff *skb, struct device *dev)
++static int yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev)
+ {
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ unsigned entry;
+
++#if LINUX_VERSION_CODE < 0x20323
+ /* Block a timer-based transmit from overlapping. This could better be
+ done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
+- if (test_and_set_bit(0, (void*)&dev->tbusy) != 0) {
+- if (jiffies - dev->trans_start < TX_TIMEOUT)
+- return 1;
+- yellowfin_tx_timeout(dev);
++ if (netif_pause_tx_queue(dev) != 0) {
++ /* This watchdog code is redundant with the media monitor timer. */
++ if (jiffies - dev->trans_start > TX_TIMEOUT)
++ yellowfin_tx_timeout(dev);
+ return 1;
+ }
++#endif
+
+- /* Caution: the write order is important here, set the base address
+- with the "ownership" bits last. */
++ /* Note: Ordering is important here, set the field with the
++ "ownership" bit last, and only then increment cur_tx. */
+
+ /* Calculate the next Tx descriptor entry. */
+ entry = yp->cur_tx % TX_RING_SIZE;
+
+ yp->tx_skbuff[entry] = skb;
+
++ if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */
++ int cacheline_end = (virt_to_bus(skb->data) + skb->len) % 32;
++ /* Fix GX chipset errata. */
++ if (cacheline_end > 24 || cacheline_end == 0)
++ skb->len += 32 - cacheline_end + 1;
++ }
+ #ifdef NO_TXSTATS
+- yp->tx_ring[entry].request_cnt = skb->len;
+- yp->tx_ring[entry].addr = virt_to_bus(skb->data);
+- yp->tx_ring[entry].status = 0;
++ yp->tx_ring[entry].addr = virt_to_le32desc(skb->data);
++ yp->tx_ring[entry].result_status = 0;
+ if (entry >= TX_RING_SIZE-1) {
+- yp->tx_ring[0].cmd = CMD_STOP; /* New stop command. */
+- yp->tx_ring[TX_RING_SIZE-1].cmd = CMD_TX_PKT | BRANCH_ALWAYS;
++ /* New stop command. */
++ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd =
++ cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | skb->len);
+ } else {
+- yp->tx_ring[entry+1].cmd = CMD_STOP; /* New stop command. */
+- yp->tx_ring[entry].cmd = CMD_TX_PKT | BRANCH_IFTRUE;
++ yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->tx_ring[entry].dbdma_cmd =
++ cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | skb->len);
+ }
+ yp->cur_tx++;
+ #else
+ yp->tx_ring[entry<<1].request_cnt = skb->len;
+- yp->tx_ring[entry<<1].addr = virt_to_bus(skb->data);
++ yp->tx_ring[entry<<1].addr = virt_to_le32desc(skb->data);
+ /* The input_last (status-write) command is constant, but we must rewrite
+ the subsequent 'stop' command. */
+
+ yp->cur_tx++;
+ {
+ unsigned next_entry = yp->cur_tx % TX_RING_SIZE;
+- yp->tx_ring[next_entry<<1].cmd = CMD_STOP;
++ yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ }
+ /* Final step -- overwrite the old 'stop' command. */
+
+- yp->tx_ring[entry<<1].cmd =
+- (entry % 6) == 0 ? CMD_TX_PKT | INTR_ALWAYS | BRANCH_IFTRUE :
+- CMD_TX_PKT | BRANCH_IFTRUE;
++ yp->tx_ring[entry<<1].dbdma_cmd =
++ cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE :
++ CMD_TX_PKT | BRANCH_IFTRUE) | skb->len);
+ #endif
+
+- /* Todo: explicitly flush cache lines here. */
++ /* Non-x86 Todo: explicitly flush cache lines here. */
+
+ /* Wake the potentially-idle transmit channel. */
+ outl(0x10001000, dev->base_addr + TxCtrl);
+
+- if (yp->cur_tx - yp->dirty_tx < TX_RING_SIZE - 1)
+- clear_bit(0, (void*)&dev->tbusy); /* Typical path */
+- else
++ if (yp->cur_tx - yp->dirty_tx >= TX_QUEUE_SIZE) {
++ netif_stop_tx_queue(dev);
+ yp->tx_full = 1;
++ if (yp->cur_tx - (volatile int)yp->dirty_tx < TX_QUEUE_SIZE) {
++ netif_unpause_tx_queue(dev);
++ yp->tx_full = 0;
++ } else
++ netif_stop_tx_queue(dev);
++ } else
++ netif_unpause_tx_queue(dev); /* Typical path */
+ dev->trans_start = jiffies;
+
+- if (yellowfin_debug > 4) {
+- printk("%s: Yellowfin transmit frame #%d queued in slot %d.\n",
++ if (yp->msg_level & NETIF_MSG_TX_QUEUED) {
++ printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
+ dev->name, yp->cur_tx, entry);
+ }
+ return 0;
+@@ -784,316 +888,331 @@
+
+ /* The interrupt handler does all of the Rx thread work and cleans up
+ after the Tx thread. */
+-static void yellowfin_interrupt IRQ(int irq, void *dev_instance, struct pt_regs *regs)
++static void yellowfin_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+ {
+-#ifdef SA_SHIRQ /* Use the now-standard shared IRQ implementation. */
+- struct device *dev = (struct device *)dev_instance;
+-#else
+- struct device *dev = (struct device *)(irq2dev_map[irq]);
+-#endif
+-
+- struct yellowfin_private *lp;
+- int ioaddr, boguscnt = max_interrupt_work;
++ struct net_device *dev = (struct net_device *)dev_instance;
++ struct yellowfin_private *yp;
++ long ioaddr;
++ int boguscnt = max_interrupt_work;
+
++#ifndef final_version /* Can never occur. */
+ if (dev == NULL) {
+- printk ("yellowfin_interrupt(): irq %d for unknown device.\n", irq);
++ printk (KERN_ERR "yellowfin_interrupt(): irq %d for unknown device.\n", irq);
+ return;
+ }
++#endif
+
+ ioaddr = dev->base_addr;
+- lp = (struct yellowfin_private *)dev->priv;
+- if (test_and_set_bit(0, (void*)&lp->in_interrupt)) {
+- dev->interrupt = 1;
++ yp = (struct yellowfin_private *)dev->priv;
++ if (test_and_set_bit(0, (void*)&yp->in_interrupt)) {
+ printk(KERN_ERR "%s: Re-entering the interrupt handler.\n", dev->name);
+ return;
+ }
+
+ do {
+ u16 intr_status = inw(ioaddr + IntrClear);
+- unsigned dirty_tx = lp->dirty_tx;
+
+- if (yellowfin_debug > 4)
+- printk("%s: Yellowfin interrupt, status %4.4x.\n",
++ if (yp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
+ dev->name, intr_status);
+
+ if (intr_status == 0)
+ break;
+
+- if (intr_status & (IntrRxDone | IntrEarlyRx))
++ if (intr_status & (IntrRxDone | IntrEarlyRx)) {
+ yellowfin_rx(dev);
++ outl(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */
++ }
+
+ #ifdef NO_TXSTATS
+- for (; lp->cur_tx - dirty_tx > 0; dirty_tx++) {
+- int entry = dirty_tx % TX_RING_SIZE;
+- if (lp->tx_ring[entry].status == 0)
++ for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) {
++ int entry = yp->dirty_tx % TX_RING_SIZE;
++ if (yp->tx_ring[entry].result_status == 0)
+ break;
++ yp->stats.tx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
++#endif
+ /* Free the original skb. */
+- dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+- lp->tx_skbuff[entry] = 0;
+- lp->stats.tx_packets++;
++ dev_free_skb_irq(yp->tx_skbuff[entry]);
++ yp->tx_skbuff[entry] = 0;
+ }
+- if (lp->tx_full && dev->tbusy
+- && lp->cur_tx - dirty_tx < TX_RING_SIZE - 4) {
++ if (yp->tx_full
++ && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) {
+ /* The ring is no longer full, clear tbusy. */
+- lp->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ yp->tx_full = 0;
++ netif_resume_tx_queue(dev);
+ }
+- lp->dirty_tx = dirty_tx;
+ #else
+ if (intr_status & IntrTxDone
+- || lp->tx_status[dirty_tx % TX_RING_SIZE].tx_errs) {
++ || yp->tx_tail_desc->tx_errs) {
++ unsigned dirty_tx = yp->dirty_tx;
+
+- for (dirty_tx = lp->dirty_tx; lp->cur_tx - dirty_tx > 0;
++ for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0;
+ dirty_tx++) {
+ /* Todo: optimize this. */
+ int entry = dirty_tx % TX_RING_SIZE;
+- u16 tx_errs = lp->tx_status[entry].tx_errs;
++ u16 tx_errs = yp->tx_status[entry].tx_errs;
+
++#ifndef final_version
++ if (yp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
++ "%4.4x %4.4x %4.4x %4.4x.\n",
++ dev->name, entry,
++ yp->tx_status[entry].tx_cnt,
++ yp->tx_status[entry].tx_errs,
++ yp->tx_status[entry].total_tx_cnt,
++ yp->tx_status[entry].paused);
++#endif
+ if (tx_errs == 0)
+ break; /* It still hasn't been Txed */
+- if (tx_errs & 0xF8100000) {
++ if (tx_errs & 0xF810) {
+ /* There was an major error, log it. */
+ #ifndef final_version
+- if (yellowfin_debug > 1)
+- printk("%s: Transmit error, Tx status %4.4x.\n",
++ if (yp->msg_level & NETIF_MSG_TX_ERR)
++ printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
+ dev->name, tx_errs);
+ #endif
+- lp->stats.tx_errors++;
+- if (tx_errs & 0xF800) lp->stats.tx_aborted_errors++;
+- if (tx_errs & 0x0800) lp->stats.tx_carrier_errors++;
+- if (tx_errs & 0x2000) lp->stats.tx_window_errors++;
+- if (tx_errs & 0x8000) lp->stats.tx_fifo_errors++;
++ yp->stats.tx_errors++;
++ if (tx_errs & 0xF800) yp->stats.tx_aborted_errors++;
++ if (tx_errs & 0x0800) yp->stats.tx_carrier_errors++;
++ if (tx_errs & 0x2000) yp->stats.tx_window_errors++;
++ if (tx_errs & 0x8000) yp->stats.tx_fifo_errors++;
+ #ifdef ETHER_STATS
+- if (tx_errs & 0x1000) lp->stats.collisions16++;
++ if (tx_errs & 0x1000) yp->stats.collisions16++;
+ #endif
+ } else {
++#ifndef final_version
++ if (yp->msg_level & NETIF_MSG_TX_DONE)
++ printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
++ dev->name, tx_errs);
++#endif
+ #ifdef ETHER_STATS
+- if (status & 0x0400) lp->stats.tx_deferred++;
++ if (tx_errs & 0x0400) yp->stats.tx_deferred++;
++#endif
++#if LINUX_VERSION_CODE > 0x20127
++ yp->stats.tx_bytes += yp->tx_skbuff[entry]->len;
+ #endif
+- lp->stats.collisions += tx_errs & 15;
+- lp->stats.tx_packets++;
++ yp->stats.collisions += tx_errs & 15;
++ yp->stats.tx_packets++;
+ }
+-
+ /* Free the original skb. */
+- dev_kfree_skb(lp->tx_skbuff[entry], FREE_WRITE);
+- lp->tx_skbuff[entry] = 0;
++ dev_free_skb_irq(yp->tx_skbuff[entry]);
++ yp->tx_skbuff[entry] = 0;
+ /* Mark status as empty. */
+- lp->tx_status[entry].tx_errs = 0;
++ yp->tx_status[entry].tx_errs = 0;
+ }
+
+ #ifndef final_version
+- if (lp->cur_tx - dirty_tx > TX_RING_SIZE) {
+- printk("%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+- dev->name, dirty_tx, lp->cur_tx, lp->tx_full);
++ if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
++ printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
++ dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ dirty_tx += TX_RING_SIZE;
+ }
+ #endif
+
+- if (lp->tx_full && dev->tbusy
+- && lp->cur_tx - dirty_tx < TX_RING_SIZE - 2) {
++ if (yp->tx_full
++ && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) {
+ /* The ring is no longer full, clear tbusy. */
+- lp->tx_full = 0;
+- clear_bit(0, (void*)&dev->tbusy);
+- mark_bh(NET_BH);
++ yp->tx_full = 0;
++ netif_resume_tx_queue(dev);
+ }
+
+- lp->dirty_tx = dirty_tx;
++ yp->dirty_tx = dirty_tx;
++ yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE];
+ }
+ #endif
+
+- /* Log errors and other events. */
+- if (intr_status & 0x2ee) { /* Abnormal error summary. */
+- printk("%s: Something Wicked happened! %4.4x.\n",
+- dev->name, intr_status);
+- /* Hmmmmm, it's not clear what to do here. */
+- if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
+- lp->stats.tx_errors++;
+- if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
+- lp->stats.rx_errors++;
+- }
++ /* Log errors and other uncommon events. */
++ if (intr_status & 0x2ee) /* Abnormal error summary. */
++ yellowfin_error(dev, intr_status);
++
+ if (--boguscnt < 0) {
+- printk("%s: Too much work at interrupt, status=0x%4.4x.\n",
++ printk(KERN_WARNING "%s: Too much work at interrupt, "
++ "status=0x%4.4x.\n",
+ dev->name, intr_status);
+ break;
+ }
+ } while (1);
+
+- if (yellowfin_debug > 3)
+- printk("%s: exiting interrupt, status=%#4.4x.\n",
++ if (yp->msg_level & NETIF_MSG_INTR)
++ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+ dev->name, inw(ioaddr + IntrStatus));
+
+- /* Code that should never be run! Perhaps remove after testing.. */
+- {
+- static int stopit = 10;
+- if (dev->start == 0 && --stopit < 0) {
+- printk("%s: Emergency stop, looping startup interrupt.\n",
+- dev->name);
+-#ifdef SA_SHIRQ
+- free_irq(irq, dev);
+-#else
+- free_irq(irq);
+-#endif
+- }
+- }
+-
+- dev->interrupt = 0;
+- clear_bit(0, (void*)&lp->in_interrupt);
++ clear_bit(0, (void*)&yp->in_interrupt);
+ return;
+ }
+
+ /* This routine is logically part of the interrupt handler, but separated
+ for clarity and better register allocation. */
+-static int
+-yellowfin_rx(struct device *dev)
++static int yellowfin_rx(struct net_device *dev)
+ {
+- struct yellowfin_private *lp = (struct yellowfin_private *)dev->priv;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+- int entry = lp->cur_rx % RX_RING_SIZE;
+- int boguscnt = 20;
++ int entry = yp->cur_rx % RX_RING_SIZE;
++ int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
+
+- if (yellowfin_debug > 4) {
+- printk(" In yellowfin_rx(), entry %d status %4.4x.\n", entry,
+- yp->rx_ring[entry].status);
+- printk(" #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x.\n",
+- entry, yp->rx_ring[entry].cmd,
+- yp->rx_ring[entry].request_cnt, yp->rx_ring[entry].addr,
+- yp->rx_ring[entry].result_cnt, yp->rx_ring[entry].status);
++ if (yp->msg_level & NETIF_MSG_RX_STATUS) {
++ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
++ entry, yp->rx_ring[entry].result_status);
++ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
++ entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
++ yp->rx_ring[entry].result_status);
+ }
+
+-
+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
+- while (yp->rx_ring[entry].status) {
+- /* Todo: optimize this mess. */
+- u16 desc_status = yp->rx_ring[entry].status;
+- struct yellowfin_desc *desc = &lp->rx_ring[entry];
+- int frm_size = desc->request_cnt - desc->result_cnt;
+- u8 *buf_addr = bus_to_virt(lp->rx_ring[entry].addr);
+- s16 frame_status = *(s16*)&(buf_addr[frm_size - 2]);
+-
+- if (yellowfin_debug > 4)
+- printk(" yellowfin_rx() status was %4.4x.\n", frame_status);
++ while (yp->rx_head_desc->result_status) {
++ struct yellowfin_desc *desc = yp->rx_head_desc;
++ u16 desc_status = le32_to_cpu(desc->result_status) >> 16;
++ int data_size =
++ (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status))
++ & 0xffff;
++ u8 *buf_addr = le32desc_to_virt(desc->addr);
++ s16 frame_status = get_unaligned((s16*)&(buf_addr[data_size - 2]));
++
++ if (yp->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
++ frame_status);
+ if (--boguscnt < 0)
+ break;
+ if ( ! (desc_status & RX_EOP)) {
+- printk("%s: Oversized Ethernet frame spanned multiple buffers,"
++ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
+ " status %4.4x!\n", dev->name, desc_status);
+- lp->stats.rx_length_errors++;
+- } else if (frame_status & 0x0038) {
++ yp->stats.rx_length_errors++;
++ } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
+ /* There was a error. */
+- if (yellowfin_debug > 3)
+- printk(" yellowfin_rx() Rx error was %4.4x.\n", frame_status);
+- lp->stats.rx_errors++;
+- if (frame_status & 0x0060) lp->stats.rx_length_errors++;
+- if (frame_status & 0x0008) lp->stats.rx_frame_errors++;
+- if (frame_status & 0x0010) lp->stats.rx_crc_errors++;
+- if (frame_status < 0) lp->stats.rx_dropped++;
++ if (yp->msg_level & NETIF_MSG_RX_ERR)
++ printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
++ frame_status);
++ yp->stats.rx_errors++;
++ if (frame_status & 0x0060) yp->stats.rx_length_errors++;
++ if (frame_status & 0x0008) yp->stats.rx_frame_errors++;
++ if (frame_status & 0x0010) yp->stats.rx_crc_errors++;
++ if (frame_status < 0) yp->stats.rx_dropped++;
++ } else if ( !(yp->drv_flags & IsGigabit) &&
++ ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) {
++ u8 status1 = buf_addr[data_size-2];
++ u8 status2 = buf_addr[data_size-1];
++ yp->stats.rx_errors++;
++ if (status1 & 0xC0) yp->stats.rx_length_errors++;
++ if (status2 & 0x03) yp->stats.rx_frame_errors++;
++ if (status2 & 0x04) yp->stats.rx_crc_errors++;
++ if (status2 & 0x80) yp->stats.rx_dropped++;
+ #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+- } else if (memcmp(bus_to_virt(lp->rx_ring[entry].addr),
++ } else if ((yp->flags & HasMACAddrBug) &&
++ memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ dev->dev_addr, 6) != 0
+- && memcmp(bus_to_virt(lp->rx_ring[entry].addr),
++ && memcmp(le32desc_to_virt(yp->rx_ring[entry].addr),
+ "\377\377\377\377\377\377", 6) != 0) {
+- printk("%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x.\n",
+- dev->name,
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[0],
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[1],
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[2],
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[3],
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[4],
+- ((char *)bus_to_virt(lp->rx_ring[entry].addr))[5]);
+- bogus_rx++;
++ if (bogus_rx++ == 0)
++ printk(KERN_WARNING "%s: Bad frame to %2.2x:%2.2x:%2.2x:%2.2x:"
++ "%2.2x:%2.2x.\n",
++ dev->name, buf_addr[0], buf_addr[1], buf_addr[2],
++ buf_addr[3], buf_addr[4], buf_addr[5]);
+ #endif
+ } else {
+- u8 bogus_cnt = buf_addr[frm_size - 8];
+- int pkt_len = frm_size - 8 - bogus_cnt;
+ struct sk_buff *skb;
+- int rx_in_place = 0;
++ int pkt_len = data_size -
++ (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]);
++ /* To verify: Yellowfin Length should omit the CRC! */
+
+- /* Check if the packet is long enough to just accept without
+- copying to a properly sized skbuff. */
+- if (pkt_len > rx_copybreak) {
+- struct sk_buff *newskb;
+- char *temp;
+-
+- /* Get a fresh skbuff to replace the filled one. */
+- newskb = DEV_ALLOC_SKB(dev->mtu <= 1500 ? PKT_BUF_SZ
+- : dev->mtu + 32);
+- if (newskb == NULL) {
+- skb = 0; /* No memory, drop the packet. */
+- goto memory_squeeze;
+- }
+- /* Pass up the skb already on the Rx ring. */
+- skb = lp->rx_skbuff[entry];
+- temp = skb_put(skb, pkt_len);
+- if (bus_to_virt(lp->rx_ring[entry].addr) != temp)
+- printk("%s: Warning -- the skbuff addresses do not match"
+- " in yellowfin_rx: %p vs. %p / %p.\n", dev->name,
+- bus_to_virt(lp->rx_ring[entry].addr),
++#ifndef final_version
++ if (yp->msg_level & NETIF_MSG_RX_STATUS)
++ printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
++ " of %d, bogus_cnt %d.\n",
++ pkt_len, data_size, boguscnt);
++#endif
++ /* Check if the packet is long enough to just pass up the skbuff
++ without copying to a properly sized skbuff. */
++ if (pkt_len > yp->rx_copybreak) {
++ char *temp = skb_put(skb = yp->rx_skbuff[entry], pkt_len);
++ yp->rx_skbuff[entry] = NULL;
++#ifndef final_version /* Remove after testing. */
++ if (le32desc_to_virt(yp->rx_ring[entry].addr) != temp)
++ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
++ "do not match in yellowfin_rx: %p vs. %p / %p.\n",
++ dev->name,
++ le32desc_to_virt(yp->rx_ring[entry].addr),
+ skb->head, temp);
+- rx_in_place = 1;
+- lp->rx_skbuff[entry] = newskb;
+- newskb->dev = dev;
+- skb_reserve(newskb, 2); /* 16 byte align IP header */
+- lp->rx_ring[entry].addr = virt_to_bus(newskb->tail);
+- } else
+- skb = DEV_ALLOC_SKB(pkt_len + 2);
+- memory_squeeze:
+- if (skb == NULL) {
+- printk("%s: Memory squeeze, deferring packet.\n", dev->name);
+- /* todo: Check that at least two ring entries are free.
+- If not, free one and mark stats->rx_dropped++. */
+- break;
+- }
+- skb->dev = dev;
+- if (! rx_in_place) {
+- skb_reserve(skb, 2); /* 16 byte align the data fields */
+- memcpy(skb_put(skb, pkt_len),
+- bus_to_virt(lp->rx_ring[entry].addr), pkt_len);
+- }
+-#if LINUX_VERSION_CODE > 0x10300
+- skb->protocol = eth_type_trans(skb, dev);
++#endif
++ } else {
++ skb = dev_alloc_skb(pkt_len + 2);
++ if (skb == NULL)
++ break;
++ skb->dev = dev;
++ skb_reserve(skb, 2); /* 16 byte align the IP header */
++#if HAS_IP_COPYSUM
++ eth_copy_and_sum(skb, yp->rx_skbuff[entry]->tail, pkt_len, 0);
++ skb_put(skb, pkt_len);
+ #else
+- skb->len = pkt_len;
++ memcpy(skb_put(skb, pkt_len), yp->rx_skbuff[entry]->tail,
++ pkt_len);
+ #endif
++ }
++ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+- lp->stats.rx_packets++;
++ dev->last_rx = jiffies;
++ yp->stats.rx_packets++;
++#if LINUX_VERSION_CODE > 0x20127
++ yp->stats.rx_bytes += pkt_len;
++#endif
+ }
++ entry = (++yp->cur_rx) % RX_RING_SIZE;
++ yp->rx_head_desc = &yp->rx_ring[entry];
++ }
+
+- /* Mark this entry as being the end-of-list, and the prior entry
+- as now valid. */
+- lp->rx_ring[entry].cmd = CMD_STOP;
+- yp->rx_ring[entry].status = 0;
+- {
+- int prev_entry = entry - 1;
+- if (prev_entry < 0)
+- lp->rx_ring[RX_RING_SIZE - 1].cmd =
+- CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS;
+- else
+- lp->rx_ring[prev_entry].cmd = CMD_RX_BUF | INTR_ALWAYS;
++ /* Refill the Rx ring buffers. */
++ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
++ entry = yp->dirty_rx % RX_RING_SIZE;
++ if (yp->rx_skbuff[entry] == NULL) {
++ struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz);
++ yp->rx_skbuff[entry] = skb;
++ if (skb == NULL)
++ break; /* Better luck next round. */
++ skb->dev = dev; /* Mark as being used by this device. */
++ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
++ yp->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
+ }
+- entry = (++lp->cur_rx) % RX_RING_SIZE;
++ yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
++ yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
++ if (entry != 0)
++ yp->rx_ring[entry - 1].dbdma_cmd =
++ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz);
++ else
++ yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd =
++ cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS
++ | yp->rx_buf_sz);
+ }
+- /* todo: restart Rx engine if stopped. For now we just make the Rx ring
+- large enough to avoid this. */
+
+ return 0;
+ }
+
+-static int
+-yellowfin_close(struct device *dev)
++static void yellowfin_error(struct net_device *dev, int intr_status)
++{
++ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
++
++ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
++ dev->name, intr_status);
++ /* Hmmmmm, it's not clear what to do here. */
++ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
++ yp->stats.tx_errors++;
++ if (intr_status & (IntrRxPCIErr | IntrRxPCIFault))
++ yp->stats.rx_errors++;
++}
++
++static int yellowfin_close(struct net_device *dev)
+ {
+- int ioaddr = dev->base_addr;
++ long ioaddr = dev->base_addr;
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ int i;
+
+- dev->start = 0;
+- dev->tbusy = 1;
++ netif_stop_tx_queue(dev);
+
+- if (yellowfin_debug > 1) {
+- printk("%s: Shutting down ethercard, status was Tx %4.4x Rx %4.4x Int %2.2x.\n",
++ if (yp->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
++ "Rx %4.4x Int %2.2x.\n",
+ dev->name, inw(ioaddr + TxStatus),
+- inw(ioaddr + RxStatus), inl(ioaddr + IntrStatus));
+- printk("%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
++ inw(ioaddr + RxStatus), inw(ioaddr + IntrStatus));
++ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
+ dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ }
+
+@@ -1106,34 +1225,34 @@
+
+ del_timer(&yp->timer);
+
+-#ifdef __i386__
+- if (yellowfin_debug > 2) {
+- printk("\n Tx ring at %8.8x:\n", (int)virt_to_bus(yp->tx_ring));
++#if defined(__i386__)
++ if (yp->msg_level & NETIF_MSG_IFDOWN) {
++ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
++ (int)virt_to_bus(yp->tx_ring));
+ for (i = 0; i < TX_RING_SIZE*2; i++)
+- printk(" %c #%d desc. %4.4x %4.4x %8.8x %8.8x %4.4x %4.4x.\n",
++ printk(" %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ inl(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
+- i, yp->tx_ring[i].cmd,
+- yp->tx_ring[i].request_cnt, yp->tx_ring[i].addr,
+- yp->tx_ring[i].branch_addr,
+- yp->tx_ring[i].result_cnt, yp->tx_ring[i].status);
+- printk(" Tx status %p:\n", yp->tx_status);
++ i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
++ yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
++ printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
+ for (i = 0; i < TX_RING_SIZE; i++)
+- printk(" #%d status %4.4x %4.4x %4.4x %4.4x.\n",
++ printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
+ yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
+
+- printk("\n Rx ring %8.8x:\n", (int)virt_to_bus(yp->rx_ring));
++ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
++ (int)virt_to_bus(yp->rx_ring));
+ for (i = 0; i < RX_RING_SIZE; i++) {
+- printk(" %c #%d desc. %4.4x %4.4x %8.8x %4.4x %4.4x\n",
++ printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ inl(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
+- i, yp->rx_ring[i].cmd,
+- yp->rx_ring[i].request_cnt, yp->rx_ring[i].addr,
+- yp->rx_ring[i].result_cnt, yp->rx_ring[i].status);
+- if (yellowfin_debug > 5) {
+- if (*(u8*)yp->rx_ring[i].addr != 0x69) {
++ i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
++ yp->rx_ring[i].result_status);
++ if (yp->msg_level & NETIF_MSG_PKTDATA) {
++ if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
+ int j;
+ for (j = 0; j < 0x50; j++)
+- printk(" %4.4x", ((u16*)yp->rx_ring[i].addr)[j]);
++ printk(" %4.4x",
++ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ printk("\n");
+ }
+ }
+@@ -1141,34 +1260,29 @@
+ }
+ #endif /* __i386__ debugging only */
+
+-#ifdef SA_SHIRQ
+ free_irq(dev->irq, dev);
+-#else
+- free_irq(dev->irq);
+- irq2dev_map[dev->irq] = 0;
+-#endif
+
+ /* Free all the skbuffs in the Rx queue. */
+ for (i = 0; i < RX_RING_SIZE; i++) {
+- yp->rx_ring[i].cmd = CMD_STOP;
++ yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP);
+ yp->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+ if (yp->rx_skbuff[i]) {
+ #if LINUX_VERSION_CODE < 0x20100
+ yp->rx_skbuff[i]->free = 1;
+ #endif
+- dev_kfree_skb(yp->rx_skbuff[i], FREE_WRITE);
++ dev_free_skb(yp->rx_skbuff[i]);
+ }
+ yp->rx_skbuff[i] = 0;
+ }
+ for (i = 0; i < TX_RING_SIZE; i++) {
+ if (yp->tx_skbuff[i])
+- dev_kfree_skb(yp->tx_skbuff[i], FREE_WRITE);
++ dev_free_skb(yp->tx_skbuff[i]);
+ yp->tx_skbuff[i] = 0;
+ }
+
+ #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
+- if (yellowfin_debug > 0) {
+- printk("%s: Received %d frames that we should not have.\n",
++ if (yp->msg_level & NETIF_MSG_IFDOWN) {
++ printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
+ dev->name, bogus_rx);
+ }
+ #endif
+@@ -1177,8 +1291,7 @@
+ return 0;
+ }
+
+-static struct enet_statistics *
+-yellowfin_get_stats(struct device *dev)
++static struct net_device_stats *yellowfin_get_stats(struct net_device *dev)
+ {
+ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
+ return &yp->stats;
+@@ -1190,6 +1303,7 @@
+ N.B. Do not use for bulk data, use a table-based routine instead.
+ This is common code and should be moved to net/core/crc.c */
+ static unsigned const ethernet_polynomial_le = 0xedb88320U;
++
+ static inline unsigned ether_crc_le(int length, unsigned char *data)
+ {
+ unsigned int crc = 0xffffffff; /* Initial value. */
+@@ -1208,82 +1322,147 @@
+ }
+
+
+-#ifdef NEW_MULTICAST
+-static void set_rx_mode(struct device *dev)
+-#else
+-static void set_rx_mode(struct device *dev, int num_addrs, void *addrs);
+-#endif
++static void set_rx_mode(struct net_device *dev)
+ {
+- int ioaddr = dev->base_addr;
+- u16 cfg_value = inw(ioaddr + Cnfg);
++ struct yellowfin_private *yp = (struct yellowfin_private *)dev->priv;
++ u16 hash_table[4] = {0, 0, 0, 0};
++ int mc_change = 0;
++ int new_rx_mode, i;
+
+- /* Stop the Rx process to change any value. */
+- outw(cfg_value & ~0x1000, ioaddr + Cnfg);
+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+ /* Unconditionally log net taps. */
+- printk("%s: Promiscuous mode enabled.\n", dev->name);
+- outw(0x000F, ioaddr + AddrMode);
+- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
++ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
++ new_rx_mode = 0x000F;
++ } else if (dev->mc_count > yp->multicast_filter_limit
++ || (dev->flags & IFF_ALLMULTI)) {
+ /* Too many to filter well, or accept all multicasts. */
+- outw(0x000B, ioaddr + AddrMode);
++ new_rx_mode = 0x000B;
+ } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ struct dev_mc_list *mclist;
+- u16 hash_table[4];
+- int i;
+- memset(hash_table, 0, sizeof(hash_table));
++
+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+ i++, mclist = mclist->next) {
+ /* Due to a bug in the early chip versions, multiple filter
+ slots must be set for each address. */
+- set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
+- hash_table);
+- set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
+- hash_table);
+- set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
+- hash_table);
++ if (yp->drv_flags & HasMulticastBug) {
++ set_bit((ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f,
++ hash_table);
++ set_bit((ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f,
++ hash_table);
++ set_bit((ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f,
++ hash_table);
++ }
+ set_bit((ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f,
+ hash_table);
+ }
++ if (memcmp(hash_table, yp->mc_filter, sizeof hash_table) != 0)
++ mc_change = 1;
++ new_rx_mode = 0x0003;
++ } else { /* Normal, unicast/broadcast-only mode. */
++ new_rx_mode = 0x0001;
++ }
++
++ /* Stop the Rx process to change any value. */
++ if (yp->rx_mode != new_rx_mode || mc_change) {
++ long ioaddr = dev->base_addr;
++ u16 cfg_value = inw(ioaddr + Cnfg);
++
++ outw(cfg_value & ~0x1000, ioaddr + Cnfg);
++
++ yp->rx_mode = new_rx_mode;
++ outw(new_rx_mode, ioaddr + AddrMode);
++ memcpy(yp->mc_filter, hash_table, sizeof hash_table);
+ /* Copy the hash table to the chip. */
+ for (i = 0; i < 4; i++)
+ outw(hash_table[i], ioaddr + HashTbl + i*2);
+- outw(0x0003, ioaddr + AddrMode);
+- } else { /* Normal, unicast/broadcast-only mode. */
+- outw(0x0001, ioaddr + AddrMode);
++
++ /* Restart the Rx process. */
++ outw(cfg_value | 0x1000, ioaddr + Cnfg);
+ }
+- /* Restart the Rx process. */
+- outw(cfg_value | 0x1000, ioaddr + Cnfg);
+ }
+-
+-#ifdef MODULE
+
+-/* An additional parameter that may be passed in... */
+-static int debug = -1;
+-
+-int
+-init_module(void)
++static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+ {
+- int cards_found;
+-
+- if (debug >= 0)
+- yellowfin_debug = debug;
+-
+- root_yellowfin_dev = NULL;
+- cards_found = yellowfin_probe(0);
++ struct yellowfin_private *np = (void *)dev->priv;
++ long ioaddr = dev->base_addr;
++ u16 *data = (u16 *)&rq->ifr_data;
++ u32 *data32 = (void *)&rq->ifr_data;
++
++ switch(cmd) {
++ case 0x8947: case 0x89F0:
++ /* SIOCGMIIPHY: Get the address of the PHY in use. */
++ data[0] = np->phys[0] & 0x1f;
++ /* Fall Through */
++ case 0x8948: case 0x89F1:
++ /* SIOCGMIIREG: Read the specified MII register. */
++ data[3] = mdio_read(ioaddr, data[0] & 0x1f, data[1] & 0x1f);
++ return 0;
++ case 0x8949: case 0x89F2:
++ /* SIOCSMIIREG: Write the specified MII register */
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ if (data[0] == np->phys[0]) {
++ u16 value = data[2];
++ switch (data[1]) {
++ case 0:
++ /* Check for autonegotiation on or reset. */
++ np->medialock = (value & 0x9000) ? 0 : 1;
++ if (np->medialock)
++ np->full_duplex = (value & 0x0100) ? 1 : 0;
++ break;
++ case 4: np->advertising = value; break;
++ }
++ /* Perhaps check_duplex(dev), depending on chip semantics. */
++ }
++ mdio_write(ioaddr, data[0] & 0x1f, data[1] & 0x1f, data[2]);
++ return 0;
++ case SIOCGPARAMS:
++ data32[0] = np->msg_level;
++ data32[1] = np->multicast_filter_limit;
++ data32[2] = np->max_interrupt_work;
++ data32[3] = np->rx_copybreak;
++ return 0;
++ case SIOCSPARAMS:
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++ np->msg_level = data32[0];
++ np->multicast_filter_limit = data32[1];
++ np->max_interrupt_work = data32[2];
++ np->rx_copybreak = data32[3];
++ return 0;
++ default:
++ return -EOPNOTSUPP;
++ }
++}
+
+- return cards_found ? 0 : -ENODEV;
++
++#ifdef MODULE
++int init_module(void)
++{
++ /* Emit version even if no cards detected. */
++ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
++ return pci_drv_register(&yellowfin_drv_id, NULL);
+ }
+
+-void
+-cleanup_module(void)
++void cleanup_module(void)
+ {
+- struct device *next_dev;
++ struct net_device *next_dev;
++
++ pci_drv_unregister(&yellowfin_drv_id);
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+ while (root_yellowfin_dev) {
+- next_dev = ((struct yellowfin_private *)root_yellowfin_dev->priv)->next_module;
++ struct yellowfin_private *np = (void *)(root_yellowfin_dev->priv);
+ unregister_netdev(root_yellowfin_dev);
+- release_region(root_yellowfin_dev->base_addr, YELLOWFIN_TOTAL_SIZE);
++#ifdef USE_IO_OPS
++ release_region(root_yellowfin_dev->base_addr,
++ pci_id_tbl[np->chip_id].io_size);
++#else
++ iounmap((char *)root_yellowfin_dev->base_addr);
++#endif
++ next_dev = np->next_module;
++ if (np->priv_addr)
++ kfree(np->priv_addr);
+ kfree(root_yellowfin_dev);
+ root_yellowfin_dev = next_dev;
+ }
+@@ -1293,8 +1472,9 @@
+
+ /*
+ * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c yellowfin.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
++ * compile-command: "make KERNVER=`uname -r` yellowfin.o"
++ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c yellowfin.c"
++ * simple-compile-command: "gcc -DMODULE -O6 -c yellowfin.c"
+ * c-indent-level: 4
+ * c-basic-offset: 4
+ * tab-width: 4