summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuillem Jover <guillem@debian.org>2005-11-10 02:09:52 +0000
committerGuillem Jover <guillem@debian.org>2005-11-10 02:09:52 +0000
commit184e4ec281911f24da314b444104379e777c8a1f (patch)
tree83ab25070cf59b1dc6d69728dc59f2c8c167e2d5
parent3dec4b6834b4a287bb56f2058dcd273d30b4e945 (diff)
Fix patch. Now it's ready to be integrated upstream.
-rw-r--r--debian/patches/11_nic_update.patch5540
1 files changed, 2370 insertions, 3170 deletions
diff --git a/debian/patches/11_nic_update.patch b/debian/patches/11_nic_update.patch
index fada77f..8cc4e03 100644
--- a/debian/patches/11_nic_update.patch
+++ b/debian/patches/11_nic_update.patch
@@ -1,8 +1,8 @@
#DPATCHLEVEL=0
-2004-10-25 Guillem Jover <guillem@hadrons.org>
+2005-11-10 Guillem Jover <guillem@hadrons.org>
- * i386/linux/Drivers.in: Renamed winbond-840 driver to winbond_840.
+ * i386/linux/configure.ac: Renamed winbond-840 driver to winbond_840.
Do not include "pci-skeleton.c" in the "net" AC_DRIVER_CLASS.
Enable the starfire, intel_gige and natsemi network drivers. Remove
"CONFIG_" from cb_chim, starfire, sundance, winbond840, hamachi,
@@ -10,22 +10,17 @@
with INTEL_GIGE.
* linux/src/include/asm-i386/cache.h: New file from linux 2.2.26.
* linux/dev/include/linux/malloc.h: Include <asm/cache.h>.
- * linux/src/drivers/net/ns820.c: Copied to ...
- * linux/dev/drivers/net/ns820.c: ... here.
- (netsami_drv_id): Renamed to ...
+ * linux/src/drivers/net/ns820.c (netsami_drv_id): Renamed to ...
(ns820_drv_id): ... this. Fix all callers.
- * linux/src/drivers/net/intel-gige.c: Copied to ...
- * linux/dev/drivers/net/intel-gige.c: ... here.
+ * linux/src/drivers/net/intel-gige.c
(skel_netdev_probe): Renamed to ...
(igige_probe): ... this.
* linux/dev/drivers/net/Space.c: Add conditional probes for natsemi,
ns820, winbond840, hamachi, sundance, starfire, myson803 and
intel-gige drivers.
-
-2004-08-05 Arief M. Utama <arief_mulya@yahoo.com>
-
- * linux/dev/drivers/net/eepro100.c (pci_tbl): Add PCI ID's from
- linux-2.4.23.
+ * linux/dev/drivers/net/eepro100.c: Remove obsoleted file.
+ * linux/src/drivers/net/eepro100.c (pci_id_tbl): Add PCI ID's from
+ linux-2.6.14-rc4.
2004-02-29 Alfred M. Szmidt <ams@kemisten.nu>
@@ -133,106 +128,2295 @@ diff -u -r1.4 Makefile.in
vpath %.c $(linuxsrcdir)/src/drivers/net
Index: linux/dev/drivers/net/eepro100.c
===================================================================
-RCS file: /cvsroot/hurd/gnumach/linux/dev/drivers/net/Attic/eepro100.c,v
-retrieving revision 1.1
-diff -u -r1.1 eepro100.c
---- linux/dev/drivers/net/eepro100.c 17 Aug 2001 23:33:35 -0000 1.1
-+++ linux/dev/drivers/net/eepro100.c 20 Aug 2004 10:32:52 -0000
-@@ -358,6 +358,94 @@
- u16 vendor_id, device_id;
- int pci_index;
- } static pci_tbl[] = {
-+ { "Intel PCI EtherExpress Pro100 VE 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1031,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VE 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1032,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VM 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1033,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VM 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1034,
-+ 0
-+ },
-+ { "Intel PCI Phoneline Network Connection 82562EH",
-+ PCI_VENDOR_ID_INTEL, 0x1035,
-+ 0
-+ },
-+ { "Intel PCI Phoneline Network Connection 82562EH",
-+ PCI_VENDOR_ID_INTEL, 0x1036,
-+ 0
-+ },
-+ { "Intel PCI LAN Controller 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1037,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VM/KM 82801CAM",
-+ PCI_VENDOR_ID_INTEL, 0x1038,
-+ 0
-+ },
-+ { "Intel PCI LAN Controller with 82562ET/EZ PHY 82801DB",
-+ PCI_VENDOR_ID_INTEL, 0x1039,
-+ 0
-+ },
-+ { "Intel PCI LAN Controller with 82562ET/EZ (CNR) PHY 82801DB",
-+ PCI_VENDOR_ID_INTEL, 0x103A,
-+ 0
-+ },
-+ { "Intel PCI LAN Controller with 82562EM/EX PHY",
-+ PCI_VENDOR_ID_INTEL, 0x103B,
-+ 0
-+ },
-+ { "Intel PCI LAN Controller with 82562EM/EX (CNR) PHY 82801DB",
-+ PCI_VENDOR_ID_INTEL, 0x103C,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VE 82801DB",
-+ PCI_VENDOR_ID_INTEL, 0x103D,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VM 82801DB",
-+ PCI_VENDOR_ID_INTEL, 0x103E,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 VE 82801EB/ER",
-+ PCI_VENDOR_ID_INTEL, 0x1050,
-+ 0
-+ },
-+ { "Intel PCI Fast Ethernet/CardBus Controller 82551QM",
-+ PCI_VENDOR_ID_INTEL, 0x1059,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 82865",
-+ PCI_VENDOR_ID_INTEL, 0x1227,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 Smart (i960RP/RD)",
-+ PCI_VENDOR_ID_INTEL, 0x1228,
-+ 0
-+ },
-+ { "Intel PCI LAN0 Controller 82801E",
-+ PCI_VENDOR_ID_INTEL, 0x2459,
-+ 0
-+ },
-+ { "Intel PCI LAN1 Controller 82801E",
-+ PCI_VENDOR_ID_INTEL, 0x245D,
-+ 0
-+ },
-+ { "Intel PCI to PCI Bridge EtherExpress Pro100 Server Adapter",
-+ PCI_VENDOR_ID_INTEL, 0x5200,
-+ 0
-+ },
-+ { "Intel PCI EtherExpress Pro100 Server Adapter",
-+ PCI_VENDOR_ID_INTEL, 0x5201,
-+ 0
-+ },
- { "Intel PCI EtherExpress Pro100 82557",
- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
- 0
+RCS file: linux/dev/drivers/net/eepro100.c
+diff -N linux/dev/drivers/net/eepro100.c
+--- linux/dev/drivers/net/eepro100.c 17 Aug 2001 23:33:35 -0000 1.1
++++ /dev/null 1 Jan 1970 00:00:00 -0000
+@@ -1,2284 +0,0 @@
+-/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
+-/*
+- NOTICE: this version of the driver is supposed to work with 2.2 kernels.
+- Written 1996-1999 by Donald Becker.
+-
+- This software may be used and distributed according to the terms
+- of the GNU Public License, incorporated herein by reference.
+-
+- This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+- It should work with all i82557/558/559 boards.
+-
+- To use as a module, use the compile-command at the end of the file.
+-
+- The author may be reached as becker@CESDIS.usra.edu, or C/O
+- Center of Excellence in Space Data and Information Sciences
+- Code 930.5, NASA Goddard Space Flight Center, Greenbelt MD 20771
+- For updates see
+- http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html
+- For installation instructions
+- http://cesdis.gsfc.nasa.gov/linux/misc/modules.html
+- There is a Majordomo mailing list based at
+- linux-eepro100@cesdis.gsfc.nasa.gov
+-
+- The driver also contains updates by different kernel developers.
+- This driver clone is maintained by Andrey V. Savochkin <saw@saw.sw.com.sg>.
+- Please use this email address and linux-kernel mailing list for bug reports.
+-
+- Modification history:
+- 2000 Mar 24 Dragan Stancevic <visitor@valinux.com>
+- Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
+- 2000 May 27 Andrey Moruga <moruga@sw.com.sg>
+- Code duplication for 82559ER support was removed.
+- Accurate handling of all supported chips was implemented.
+- Some fixes in 2.3 clone of the driver were ported.
+- 2000 May 30 Dragan Stancevic <visitor@valinux.com> and
+- Andrey Moruga <moruga@sw.com.sg>
+- Honor PortReset timing specification.
+- 2000 Jul 25 Dragan Stancevic <visitor@valinux.com>
+- Changed to MMIO, resized FIFOs, resized rings, changed ISR timeout
+- Problem reported by:
+- Marc MERLIN <merlin@valinux.com>
+- 2000 Nov 15 Dragan Stancevic <visitor@valinux.com>
+- Changed command completion time and added debug info as to which
+- CMD timed out. Problem reported by:
+- "Ulrich Windl" <Ulrich.Windl@rz.uni-regensburg.de>
+-*/
+-
+-#define USE_IO
+-static const char *version =
+-"eepro100.c:v1.09j-t 9/29/99 Donald Becker http://cesdis.gsfc.nasa.gov/linux/drivers/eepro100.html\n"
+-"eepro100.c: $Revision: 1.1 $ 2000/05/31 Modified by Andrey V. Savochkin <saw@saw.sw.com.sg> and others\n"
+-"eepro100.c: VA Linux custom, Dragan Stancevic <visitor@valinux.com> 2000/11/15\n";
+-
+-/* A few user-configurable values that apply to all boards.
+- First set is undocumented and spelled per Intel recommendations. */
+-
+-static int congenb = 0; /* Enable congestion control in the DP83840. */
+-static int txfifo = 0; /* Tx FIFO threshold in 4 byte units, 0-15 */
+-static int rxfifo = 0xF; /* Rx FIFO threshold, default 32 bytes. */
+-/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+-static int txdmacount = 128;
+-static int rxdmacount = 0;
+-
+-/* Set the copy breakpoint for the copy-only-tiny-buffer Rx method.
+- Lower values use more memory, but are faster. */
+-#if defined(__alpha__) || defined(__sparc__)
+-/* force copying of all packets to avoid unaligned accesses on Alpha */
+-static int rx_copybreak = 1518;
+-#else
+-static int rx_copybreak = 200;
+-#endif
+-
+-/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+-static int max_interrupt_work = 200;
+-
+-/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+-static int multicast_filter_limit = 64;
+-
+-/* 'options' is used to pass a transceiver override or full-duplex flag
+- e.g. "options=16" for FD, "options=32" for 100mbps-only. */
+-static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+-#ifdef MODULE
+-static int debug = -1; /* The debug level */
+-#endif
+-
+-/* A few values that may be tweaked. */
+-/* The ring sizes should be a power of two for efficiency. */
+-#define TX_RING_SIZE 64
+-#define RX_RING_SIZE 64
+-/* How much slots multicast filter setup may take.
+- Do not descrease without changing set_rx_mode() implementaion. */
+-#define TX_MULTICAST_SIZE 2
+-#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
+-/* Actual number of TX packets queued, must be
+- <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
+-#define TX_QUEUE_LIMIT (TX_RING_SIZE-TX_MULTICAST_RESERV)
+-/* Hysteresis marking queue as no longer full. */
+-#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
+-
+-/* Operational parameters that usually are not changed. */
+-
+-/* Time in jiffies before concluding the transmitter is hung. */
+-#define TX_TIMEOUT (2*HZ)
+-/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
+-#define PKT_BUF_SZ 1536
+-
+-#if !defined(__OPTIMIZE__) || !defined(__KERNEL__)
+-#warning You must compile this file with the correct options!
+-#warning See the last lines of the source file.
+-#error You must compile this driver with "-O".
+-#endif
+-
+-#include <linux/version.h>
+-#include <linux/module.h>
+-#if defined(MODVERSIONS)
+-#include <linux/modversions.h>
+-#endif
+-
+-#include <linux/kernel.h>
+-#include <linux/string.h>
+-#include <linux/timer.h>
+-#include <linux/errno.h>
+-#include <linux/ioport.h>
+-#include <linux/malloc.h>
+-#include <linux/interrupt.h>
+-#include <linux/pci.h>
+-#include <linux/compatmac.h>
+-#include <asm/spinlock.h>
+-#include <asm/processor.h>
+-#include <asm/bitops.h>
+-#include <asm/io.h>
+-/* #include <asm/unaligned.h> */
+-/* #include <asm/byteorder.h> */
+-#define __LITTLE_ENDIAN
+-#include <asm/hardirq.h>
+-
+-#include <linux/netdevice.h>
+-#include <linux/etherdevice.h>
+-#include <linux/skbuff.h>
+-#include <linux/delay.h>
+-
+-#if defined(MODULE) && (LINUX_VERSION_CODE > 0x20115)
+-MODULE_AUTHOR("Maintainer: Andrey V. Savochkin <saw@saw.sw.com.sg>");
+-MODULE_DESCRIPTION("Intel i82557/i82558 PCI EtherExpressPro driver");
+-MODULE_PARM(debug, "i");
+-MODULE_PARM(options, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(full_duplex, "1-" __MODULE_STRING(8) "i");
+-MODULE_PARM(congenb, "i");
+-MODULE_PARM(txfifo, "i");
+-MODULE_PARM(rxfifo, "i");
+-MODULE_PARM(txdmacount, "i");
+-MODULE_PARM(rxdmacount, "i");
+-MODULE_PARM(rx_copybreak, "i");
+-MODULE_PARM(max_interrupt_work, "i");
+-MODULE_PARM(multicast_filter_limit, "i");
+-#endif
+-
+-#if (LINUX_VERSION_CODE >= 0x20100)
+-static char kernel_version[] = UTS_RELEASE;
+-#endif
+-
+-#if LINUX_VERSION_CODE < 0x20123
+-#define hard_smp_processor_id() smp_processor_id()
+-#define test_and_set_bit(val, addr) set_bit(val, addr)
+-#define le16_to_cpu(val) (val)
+-#define le32_to_cpu(val) (val)
+-#define cpu_to_le32(val) (val)
+-#define cpu_to_le16(val) (val)
+-#endif
+-#if LINUX_VERSION_CODE <= 0x20139
+-#define net_device_stats enet_statistics
+-#else
+-#define NETSTATS_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20155
+-/* Grrrr, the PCI code changed, but did not consider CardBus... */
+-#include <linux/bios32.h>
+-#define PCI_SUPPORT_VER1
+-#else
+-#define PCI_SUPPORT_VER2
+-#endif
+-#if LINUX_VERSION_CODE < 0x20159
+-#define dev_free_skb(skb) dev_kfree_skb(skb, FREE_WRITE);
+-#else
+-#define dev_free_skb(skb) dev_kfree_skb(skb);
+-#endif
+-#if ! defined(CAP_NET_ADMIN)
+-#define capable(CAP_XXX) (suser())
+-#endif
+-
+-#define RUN_AT(x) (jiffies + (x))
+-/* Condensed bus+endian portability operations. */
+-#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
+-#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
+-
+-#define net_device device
+-#define pci_base_address(p, n) (p)->base_address[n]
+-
+-#define netif_wake_queue(dev) do { \
+- clear_bit(0, (void*)&dev->tbusy); \
+- mark_bh(NET_BH); \
+- } while(0)
+-#define netif_start_queue(dev) clear_bit(0, (void*)&dev->tbusy)
+-#define netif_stop_queue(dev) set_bit(0, (void*)&dev->tbusy)
+-#ifndef PCI_DEVICE_ID_INTEL_82559ER
+-#define PCI_DEVICE_ID_INTEL_82559ER 0x1209
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID1029
+-#define PCI_DEVICE_ID_INTEL_ID1029 0x1029
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID1030
+-#define PCI_DEVICE_ID_INTEL_ID1030 0x1030
+-#endif
+-#ifndef PCI_DEVICE_ID_INTEL_ID2449
+-#define PCI_DEVICE_ID_INTEL_ID2449 0x2449
+-#endif
+-
+-/* The total I/O port extent of the board.
+- The registers beyond 0x18 only exist on the i82558. */
+-#define SPEEDO3_TOTAL_SIZE 0x20
+-
+-int speedo_debug = 1;
+-
+-/*
+- Theory of Operation
+-
+-I. Board Compatibility
+-
+-This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+-single-chip fast Ethernet controller for PCI, as used on the Intel
+-EtherExpress Pro 100 adapter.
+-
+-II. Board-specific settings
+-
+-PCI bus devices are configured by the system at boot time, so no jumpers
+-need to be set on the board. The system BIOS should be set to assign the
+-PCI INTA signal to an otherwise unused system IRQ line. While it's
+-possible to share PCI interrupt lines, it negatively impacts performance and
+-only recent kernels support it.
+-
+-III. Driver operation
+-
+-IIIA. General
+-The Speedo3 is very similar to other Intel network chips, that is to say
+-"apparently designed on a different planet". This chips retains the complex
+-Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+-also has simplified Tx and Rx buffer modes. This driver uses the "flexible"
+-Tx mode, but in a simplified lower-overhead manner: it associates only a
+-single buffer descriptor with each frame descriptor.
+-
+-Despite the extra space overhead in each receive skbuff, the driver must use
+-the simplified Rx buffer mode to assure that only a single data buffer is
+-associated with each RxFD. The driver implements this by reserving space
+-for the Rx descriptor at the head of each Rx skbuff.
+-
+-The Speedo-3 has receive and command unit base addresses that are added to
+-almost all descriptor pointers. The driver sets these to zero, so that all
+-pointer fields are absolute addresses.
+-
+-The System Control Block (SCB) of some previous Intel chips exists on the
+-chip in both PCI I/O and memory space. This driver uses the I/O space
+-registers, but might switch to memory mapped mode to better support non-x86
+-processors.
+-
+-IIIB. Transmit structure
+-
+-The driver must use the complex Tx command+descriptor mode in order to
+-have a indirect pointer to the skbuff data section. Each Tx command block
+-(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+-(TxBD). A fixed ring of these TxCB+TxBD pairs are kept as part of the
+-speedo_private data structure for each adapter instance.
+-
+-The newer i82558 explicitly supports this structure, and can read the two
+-TxBDs in the same PCI burst as the TxCB.
+-
+-This ring structure is used for all normal transmit packets, but the
+-transmit packet descriptors aren't long enough for most non-Tx commands such
+-as CmdConfigure. This is complicated by the possibility that the chip has
+-already loaded the link address in the previous descriptor. So for these
+-commands we convert the next free descriptor on the ring to a NoOp, and point
+-that descriptor's link to the complex command.
+-
+-An additional complexity of these non-transmit commands are that they may be
+-added asynchronous to the normal transmit queue, so we disable interrupts
+-whenever the Tx descriptor ring is manipulated.
+-
+-A notable aspect of these special configure commands is that they do
+-work with the normal Tx ring entry scavenge method. The Tx ring scavenge
+-is done at interrupt time using the 'dirty_tx' index, and checking for the
+-command-complete bit. While the setup frames may have the NoOp command on the
+-Tx ring marked as complete, but not have completed the setup command, this
+-is not a problem. The tx_ring entry can be still safely reused, as the
+-tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+-
+-Commands may have bits set e.g. CmdSuspend in the command word to either
+-suspend or stop the transmit/command unit. This driver always flags the last
+-command with CmdSuspend, erases the CmdSuspend in the previous command, and
+-then issues a CU_RESUME.
+-Note: Watch out for the potential race condition here: imagine
+- erasing the previous suspend
+- the chip processes the previous command
+- the chip processes the final command, and suspends
+- doing the CU_RESUME
+- the chip processes the next-yet-valid post-final-command.
+-So blindly sending a CU_RESUME is only safe if we do it immediately after
+-after erasing the previous CmdSuspend, without the possibility of an
+-intervening delay. Thus the resume command is always within the
+-interrupts-disabled region. This is a timing dependence, but handling this
+-condition in a timing-independent way would considerably complicate the code.
+-
+-Note: In previous generation Intel chips, restarting the command unit was a
+-notoriously slow process. This is presumably no longer true.
+-
+-IIIC. Receive structure
+-
+-Because of the bus-master support on the Speedo3 this driver uses the new
+-SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+-This scheme allocates full-sized skbuffs as receive buffers. The value
+-SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+-trade-off the memory wasted by passing the full-sized skbuff to the queue
+-layer for all frames vs. the copying cost of copying a frame to a
+-correctly-sized skbuff.
+-
+-For small frames the copying cost is negligible (esp. considering that we
+-are pre-loading the cache with immediately useful header information), so we
+-allocate a new, minimally-sized skbuff. For large frames the copying cost
+-is non-trivial, and the larger copy might flush the cache of useful data, so
+-we pass up the skbuff the packet was received into.
+-
+-IV. Notes
+-
+-Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+-that stated that I could disclose the information. But I still resent
+-having to sign an Intel NDA when I'm helping Intel sell their own product!
+-
+-*/
+-
+-/* This table drives the PCI probe routines. */
+-static struct net_device *speedo_found1(struct pci_dev *pdev, int pci_bus,
+- int pci_devfn, long ioaddr,
+- int chip_idx, int card_idx);
+-
+-#ifdef USE_IO
+-#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
+-#define SPEEDO_SIZE 32
+-#else
+-#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_MEM|PCI_ADDR0
+-#define SPEEDO_SIZE 0x1000
+-#endif
+-
+-enum pci_flags_bit {
+- PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+- PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+-};
+-struct pci_id_info {
+- const char *name;
+- u16 vendor_id, device_id;
+- int pci_index;
+-} static pci_tbl[] = {
+- { "Intel PCI EtherExpress Pro100 82557",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82557,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 82559ER",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82559ER,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 ID1029",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1029,
+- 0
+- },
+- { "Intel Corporation 82559 InBusiness 10/100",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID1030,
+- 0
+- },
+- { "Intel PCI EtherExpress Pro100 82562EM",
+- PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ID2449,
+- 0
+- },
+- {0,} /* 0 terminated list. */
+-};
+-
+-static inline unsigned int io_inw(unsigned long port)
+-{
+- return inw(port);
+-}
+-static inline void io_outw(unsigned int val, unsigned long port)
+-{
+- outw(val, port);
+-}
+-
+-#ifndef USE_IO
+-#undef inb
+-#undef inw
+-#undef inl
+-#undef outb
+-#undef outw
+-#undef outl
+-#define inb readb
+-#define inw readw
+-#define inl readl
+-#define outb writeb
+-#define outw writew
+-#define outl writel
+-#endif
+-
+-/* How to wait for the command unit to accept a command.
+- Typically this takes 0 ticks. */
+-static inline void wait_for_cmd_done(long cmd_ioaddr)
+-{
+- int wait = 20000;
+- char cmd_reg1, cmd_reg2;
+- do ;
+- while((cmd_reg1 = inb(cmd_ioaddr)) && (--wait >= 0));
+-
+- /* Last chance to change your mind --Dragan*/
+- if (wait < 0){
+- cmd_reg2 = inb(cmd_ioaddr);
+- if(cmd_reg2){
+- printk(KERN_ALERT "eepro100: cmd_wait for(%#2.2x) timedout with(%#2.2x)!\n",
+- cmd_reg1, cmd_reg2);
+-
+- }
+- }
+-
+-}
+-
+-/* Offsets to the various registers.
+- All accesses need not be longword aligned. */
+-enum speedo_offsets {
+- SCBStatus = 0, SCBCmd = 2, /* Rx/Command Unit command and status. */
+- SCBPointer = 4, /* General purpose pointer. */
+- SCBPort = 8, /* Misc. commands and operands. */
+- SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+- SCBCtrlMDI = 16, /* MDI interface control. */
+- SCBEarlyRx = 20, /* Early receive byte count. */
+-};
+-/* Commands that can be put in a command list entry. */
+-enum commands {
+- CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+- CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+- CmdDump = 0x60000, CmdDiagnose = 0x70000,
+- CmdSuspend = 0x40000000, /* Suspend after completion. */
+- CmdIntr = 0x20000000, /* Interrupt after completion. */
+- CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
+-};
+-/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+- status bits. Previous driver versions used separate 16 bit fields for
+- commands and statuses. --SAW
+- */
+-#if defined(__LITTLE_ENDIAN)
+-#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+-#elif defined(__BIG_ENDIAN)
+-#define clear_suspend(cmd) ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
+-#else
+-#error Unsupported byteorder
+-#endif
+-
+-enum SCBCmdBits {
+- SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+- SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+- SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+- /* The rest are Rx and Tx commands. */
+- CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
+- CUCmdBase=0x0060, /* CU Base address (set to zero) . */
+- CUDumpStats=0x0070, /* Dump then reset stats counters. */
+- RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+- RxResumeNoResources=0x0007,
+-};
+-
+-enum SCBPort_cmds {
+- PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+-};
+-
+-/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+-struct descriptor { /* A generic descriptor. */
+- s32 cmd_status; /* All command and status fields. */
+- u32 link; /* struct descriptor * */
+- unsigned char params[0];
+-};
+-
+-/* The Speedo3 Rx and Tx buffer descriptors. */
+-struct RxFD { /* Receive frame descriptor. */
+- s32 status;
+- u32 link; /* struct RxFD * */
+- u32 rx_buf_addr; /* void * */
+- u32 count;
+-};
+-
+-/* Selected elements of the Tx/RxFD.status word. */
+-enum RxFD_bits {
+- RxComplete=0x8000, RxOK=0x2000,
+- RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+- RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+- TxUnderrun=0x1000, StatusComplete=0x8000,
+-};
+-
+-struct TxFD { /* Transmit frame descriptor set. */
+- s32 status;
+- u32 link; /* void * */
+- u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
+- s32 count; /* # of TBD (=1), Tx start thresh., etc. */
+- /* This constitutes two "TBD" entries -- we only use one. */
+- u32 tx_buf_addr0; /* void *, frame to be transmitted. */
+- s32 tx_buf_size0; /* Length of Tx frame. */
+- u32 tx_buf_addr1; /* void *, frame to be transmitted. */
+- s32 tx_buf_size1; /* Length of Tx frame. */
+-};
+-
+-/* Multicast filter setting block. --SAW */
+-struct speedo_mc_block {
+- struct speedo_mc_block *next;
+- unsigned int tx;
+- struct descriptor frame __attribute__ ((__aligned__(16)));
+-};
+-
+-/* Elements of the dump_statistics block. This block must be lword aligned. */
+-struct speedo_stats {
+- u32 tx_good_frames;
+- u32 tx_coll16_errs;
+- u32 tx_late_colls;
+- u32 tx_underruns;
+- u32 tx_lost_carrier;
+- u32 tx_deferred;
+- u32 tx_one_colls;
+- u32 tx_multi_colls;
+- u32 tx_total_colls;
+- u32 rx_good_frames;
+- u32 rx_crc_errs;
+- u32 rx_align_errs;
+- u32 rx_resource_errs;
+- u32 rx_overrun_errs;
+- u32 rx_colls_errs;
+- u32 rx_runt_errs;
+- u32 done_marker;
+-};
+-
+-enum Rx_ring_state_bits {
+- RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
+-};
+-
+-/* Do not change the position (alignment) of the first few elements!
+- The later elements are grouped for cache locality. */
+-struct speedo_private {
+- struct TxFD tx_ring[TX_RING_SIZE]; /* Commands (usually CmdTxPacket). */
+- struct RxFD *rx_ringp[RX_RING_SIZE]; /* Rx descriptor, used as ring. */
+- /* The addresses of a Tx/Rx-in-place packets/buffers. */
+- struct sk_buff* tx_skbuff[TX_RING_SIZE];
+- struct sk_buff* rx_skbuff[RX_RING_SIZE];
+- struct descriptor *last_cmd; /* Last command sent. */
+- unsigned int cur_tx, dirty_tx; /* The ring entries to be free()ed. */
+- spinlock_t lock; /* Group with Tx control cache line. */
+- u32 tx_threshold; /* The value for txdesc.count. */
+- struct RxFD *last_rxf; /* Last command sent. */
+- unsigned int cur_rx, dirty_rx; /* The next free ring entry */
+- long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+- const char *product_name;
+- struct net_device *next_module;
+- void *priv_addr; /* Unaligned address for kfree */
+- struct enet_statistics stats;
+- struct speedo_stats lstats;
+- int chip_id;
+- unsigned char pci_bus, pci_devfn, acpi_pwr;
+- struct timer_list timer; /* Media selection timer. */
+- struct speedo_mc_block *mc_setup_head;/* Multicast setup frame list head. */
+- struct speedo_mc_block *mc_setup_tail;/* Multicast setup frame list tail. */
+- int in_interrupt; /* Word-aligned dev->interrupt */
+- char rx_mode; /* Current PROMISC/ALLMULTI setting. */
+- unsigned int tx_full:1; /* The Tx queue is full. */
+- unsigned int full_duplex:1; /* Full-duplex operation requested. */
+- unsigned int flow_ctrl:1; /* Use 802.3x flow control. */
+- unsigned int rx_bug:1; /* Work around receiver hang errata. */
+- unsigned int rx_bug10:1; /* Receiver might hang at 10mbps. */
+- unsigned int rx_bug100:1; /* Receiver might hang at 100mbps. */
+- unsigned char default_port:8; /* Last dev->if_port value. */
+- unsigned char rx_ring_state; /* RX ring status flags. */
+- unsigned short phy[2]; /* PHY media interfaces available. */
+- unsigned short advertising; /* Current PHY advertised caps. */
+- unsigned short partner; /* Link partner caps. */
+-};
+-
+-/* The parameters for a CmdConfigure operation.
+- There are so many options that it would be difficult to document each bit.
+- We mostly use the default or recommended settings. */
+-const char i82557_config_cmd[22] = {
+- 22, 0x08, 0, 0, 0, 0, 0x32, 0x03, 1, /* 1=Use MII 0=Use AUI */
+- 0, 0x2E, 0, 0x60, 0,
+- 0xf2, 0x48, 0, 0x40, 0xf2, 0x80, /* 0x40=Force full-duplex */
+- 0x3f, 0x05, };
+-const char i82558_config_cmd[22] = {
+- 22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
+- 0, 0x2E, 0, 0x60, 0x08, 0x88,
+- 0x68, 0, 0x40, 0xf2, 0x84, /* Disable FC */
+- 0x31, 0x05, };
+-
+-/* PHY media interface chips. */
+-static const char *phys[] = {
+- "None", "i82553-A/B", "i82553-C", "i82503",
+- "DP83840", "80c240", "80c24", "i82555",
+- "unknown-8", "unknown-9", "DP83840A", "unknown-11",
+- "unknown-12", "unknown-13", "unknown-14", "unknown-15", };
+-enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+- S80C24, I82555, DP83840A=10, };
+-static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
+-#define EE_READ_CMD (6)
+-
+-static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+-static int mdio_read(long ioaddr, int phy_id, int location);
+-static int mdio_write(long ioaddr, int phy_id, int location, int value);
+-static int speedo_open(struct net_device *dev);
+-static void speedo_resume(struct net_device *dev);
+-static void speedo_timer(unsigned long data);
+-static void speedo_init_rx_ring(struct net_device *dev);
+-static void speedo_tx_timeout(struct net_device *dev);
+-static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
+-static void speedo_refill_rx_buffers(struct net_device *dev, int force);
+-static int speedo_rx(struct net_device *dev);
+-static void speedo_tx_buffer_gc(struct net_device *dev);
+-static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
+-static int speedo_close(struct net_device *dev);
+-static struct enet_statistics *speedo_get_stats(struct net_device *dev);
+-static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+-static void set_rx_mode(struct net_device *dev);
+-static void speedo_show_state(struct net_device *dev);
+-
+-
+-
+-#ifdef honor_default_port
+-/* Optional driver feature to allow forcing the transceiver setting.
+- Not recommended. */
+-static int mii_ctrl[8] = { 0x3300, 0x3100, 0x0000, 0x0100,
+- 0x2000, 0x2100, 0x0400, 0x3100};
+-#endif
+-
+-/* A list of all installed Speedo devices, for removing the driver module. */
+-static struct net_device *root_speedo_dev = NULL;
+-
+-int eepro100_init(void)
+-{
+- int cards_found = 0;
+- int chip_idx;
+- struct pci_dev *pdev;
+- struct pci_dev rdev; pdev = &rdev;
+-
+- if (! pcibios_present())
+- return cards_found;
+-
+- for (chip_idx = 0; pci_tbl[chip_idx].name; chip_idx++) {
+- for (; pci_tbl[chip_idx].pci_index < 8; pci_tbl[chip_idx].pci_index++) {
+- unsigned char pci_bus, pci_device_fn, pci_latency;
+- unsigned long pciaddr;
+- long ioaddr;
+- int irq;
+-
+- u16 pci_command, new_command;
+-
+- if (pcibios_find_device(pci_tbl[chip_idx].vendor_id,
+- pci_tbl[chip_idx].device_id,
+- pci_tbl[chip_idx].pci_index, &pci_bus,
+- &pci_device_fn))
+- break;
+- {
+-#if defined(PCI_SUPPORT_VER2)
+- pdev = pci_find_slot(pci_bus, pci_device_fn);
+-#ifdef USE_IO
+- pciaddr = pci_base_address(pdev, 1); /* Use [0] to mem-map */
+-#else
+- pciaddr = pci_base_address(pdev, 0);
+-#endif
+- irq = pdev->irq;
+-#else
+- u32 pci_ioaddr;
+- u8 pci_irq_line;
+-#ifdef USE_IO
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_1, &pci_ioaddr);
+-#else
+- pcibios_read_config_dword(pci_bus, pci_device_fn,
+- PCI_BASE_ADDRESS_0, &pci_ioaddr);
+-#endif
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_INTERRUPT_LINE, &pci_irq_line);
+- pciaddr = pci_ioaddr;
+- irq = pci_irq_line;
+- pdev->irq = irq;
+-#endif
+- }
+- /* Remove I/O space marker in bit 0. */
+- if (pciaddr & 1) {
+- ioaddr = pciaddr & ~3UL;
+- if (check_region(ioaddr, 32))
+- continue;
+- } else {
+-#ifdef __sparc__
+- /* ioremap is hosed in 2.2.x on Sparc. */
+- ioaddr = pciaddr & ~0xfUL;
+-#else
+- if ((ioaddr = (long)ioremap(pciaddr & ~0xfUL, 0x1000)) == 0) {
+- printk(KERN_INFO "Failed to map PCI address %#lx.\n",
+- pciaddr);
+- continue;
+- }
+-#endif
+- }
+- if (speedo_debug > 2)
+- printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
+- ioaddr, irq);
+-
+- /* Get and check the bus-master and latency values. */
+- pcibios_read_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, &pci_command);
+- new_command = pci_command | PCI_COMMAND_MASTER|PCI_COMMAND_IO;
+- if (pci_command != new_command) {
+- printk(KERN_INFO " The PCI BIOS has not enabled this"
+- " device! Updating PCI command %4.4x->%4.4x.\n",
+- pci_command, new_command);
+- pcibios_write_config_word(pci_bus, pci_device_fn,
+- PCI_COMMAND, new_command);
+- }
+- pcibios_read_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, &pci_latency);
+- if (pci_latency < 32) {
+- printk(" PCI latency timer (CFLT) is unreasonably low at %d."
+- " Setting to 32 clocks.\n", pci_latency);
+- pcibios_write_config_byte(pci_bus, pci_device_fn,
+- PCI_LATENCY_TIMER, 32);
+- } else if (speedo_debug > 1)
+- printk(" PCI latency timer (CFLT) is %#x.\n", pci_latency);
+-
+- if (speedo_found1(pdev, pci_bus, pci_device_fn, ioaddr, chip_idx, cards_found))
+- cards_found++;
+- }
+- }
+-
+- return cards_found;
+-}
+-
+-static struct net_device *speedo_found1(struct pci_dev *pdev, int pci_bus,
+- int pci_devfn, long ioaddr,
+- int chip_idx, int card_idx)
+-{
+- struct net_device *dev;
+- struct speedo_private *sp;
+- const char *product;
+- int i, option;
+- u16 eeprom[0x100];
+- int acpi_idle_state = 0;
+-#ifndef MODULE
+- static int did_version = 0; /* Already printed version info. */
+- if (speedo_debug > 0 && did_version++ == 0)
+- printk(version);
+-#endif
+-
+- dev = init_etherdev(NULL, sizeof(struct speedo_private));
+-
+- if (dev->mem_start > 0)
+- option = dev->mem_start;
+- else if (card_idx >= 0 && options[card_idx] >= 0)
+- option = options[card_idx];
+- else
+- option = 0;
+-
+- /* Read the station address EEPROM before doing the reset.
+- Nominally his should even be done before accepting the device, but
+- then we wouldn't have a device name with which to report the error.
+- The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+- */
+- {
+- unsigned long iobase;
+- int read_cmd, ee_size;
+- u16 sum;
+- int j;
+-
+- /* Use IO only to avoid postponed writes and satisfy EEPROM timing
+- requirements. */
+-#if defined(PCI_SUPPORT_VER2)
+- iobase = pci_base_address(pdev, 1) & ~3UL;
+-#else
+- {
+- u32 pci_ioaddr;
+- pcibios_read_config_dword(pci_bus, pci_devfn,
+- PCI_BASE_ADDRESS_1, &pci_ioaddr);
+- iobase = pci_ioaddr & ~3UL;
+- }
+-#endif
+- if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
+- == 0xffe0000) {
+- ee_size = 0x100;
+- read_cmd = EE_READ_CMD << 24;
+- } else {
+- ee_size = 0x40;
+- read_cmd = EE_READ_CMD << 22;
+- }
+-
+- for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
+- u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
+- eeprom[i] = value;
+- sum += value;
+- if (i < 3) {
+- dev->dev_addr[j++] = value;
+- dev->dev_addr[j++] = value >> 8;
+- }
+- }
+- if (sum != 0xBABA)
+- printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+- "check settings before activating this device!\n",
+- dev->name, sum);
+- /* Don't unregister_netdev(dev); as the EEPro may actually be
+- usable, especially if the MAC address is set later.
+- On the other hand, it may be unusable if MDI data is corrupted. */
+- }
+-
+- /* Reset the chip: stop Tx and Rx processes and clear counters.
+- This takes less than 10usec and will easily finish before the next
+- action. */
+- outl(PortReset, ioaddr + SCBPort);
+- inl(ioaddr + SCBPort);
+- /* Honor PortReset timing. */
+- udelay(10);
+-
+- if (eeprom[3] & 0x0100)
+- product = "OEM i82557/i82558 10/100 Ethernet";
+- else
+- product = pci_tbl[chip_idx].name;
+-
+- printk(KERN_INFO "%s: %s, ", dev->name, product);
+-
+- for (i = 0; i < 5; i++)
+- printk("%2.2X:", dev->dev_addr[i]);
+- printk("%2.2X, ", dev->dev_addr[i]);
+-#ifdef USE_IO
+- printk("I/O at %#3lx, ", ioaddr);
+-#endif
+- printk("IRQ %d.\n", pdev->irq);
+-
+-#if 1 || defined(kernel_bloat)
+- /* OK, this is pure kernel bloat. I don't like it when other drivers
+- waste non-pageable kernel space to emit similar messages, but I need
+- them for bug reports. */
+- {
+- const char *connectors[] = {" RJ45", " BNC", " AUI", " MII"};
+- /* The self-test results must be paragraph aligned. */
+- s32 str[6], *volatile self_test_results;
+- int boguscnt = 16000; /* Timeout for set-test. */
+- if ((eeprom[3] & 0x03) != 0x03)
+- printk(KERN_INFO " Receiver lock-up bug exists -- enabling"
+- " work-around.\n");
+- printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
+- " connectors present:",
+- eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
+- for (i = 0; i < 4; i++)
+- if (eeprom[5] & (1<<i))
+- printk(connectors[i]);
+- printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
+- phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
+- if (eeprom[7] & 0x0700)
+- printk(KERN_INFO " Secondary interface chip %s.\n",
+- phys[(eeprom[7]>>8)&7]);
+- if (((eeprom[6]>>8) & 0x3f) == DP83840
+- || ((eeprom[6]>>8) & 0x3f) == DP83840A) {
+- int mdi_reg23 = mdio_read(ioaddr, eeprom[6] & 0x1f, 23) | 0x0422;
+- if (congenb)
+- mdi_reg23 |= 0x0100;
+- printk(KERN_INFO" DP83840 specific setup, setting register 23 to %4.4x.\n",
+- mdi_reg23);
+- mdio_write(ioaddr, eeprom[6] & 0x1f, 23, mdi_reg23);
+- }
+- if ((option >= 0) && (option & 0x70)) {
+- printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
+- (option & 0x20 ? 100 : 10),
+- (option & 0x10 ? "full" : "half"));
+- mdio_write(ioaddr, eeprom[6] & 0x1f, 0,
+- ((option & 0x20) ? 0x2000 : 0) | /* 100mbps? */
+- ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+- }
+-
+- /* Perform a system self-test. */
+- self_test_results = (s32*) ((((long) str) + 15) & ~0xf);
+- self_test_results[0] = 0;
+- self_test_results[1] = -1;
+- outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
+- do {
+- udelay(10);
+- } while (self_test_results[1] == -1 && --boguscnt >= 0);
+-
+- if (boguscnt < 0) { /* Test optimized out. */
+- printk(KERN_ERR "Self test failed, status %8.8x:\n"
+- KERN_ERR " Failure to initialize the i82557.\n"
+- KERN_ERR " Verify that the card is a bus-master"
+- " capable slot.\n",
+- self_test_results[1]);
+- } else
+- printk(KERN_INFO " General self-test: %s.\n"
+- KERN_INFO " Serial sub-system self-test: %s.\n"
+- KERN_INFO " Internal registers self-test: %s.\n"
+- KERN_INFO " ROM checksum self-test: %s (%#8.8x).\n",
+- self_test_results[1] & 0x1000 ? "failed" : "passed",
+- self_test_results[1] & 0x0020 ? "failed" : "passed",
+- self_test_results[1] & 0x0008 ? "failed" : "passed",
+- self_test_results[1] & 0x0004 ? "failed" : "passed",
+- self_test_results[0]);
+- }
+-#endif /* kernel_bloat */
+-
+- outl(PortReset, ioaddr + SCBPort);
+- inl(ioaddr + SCBPort);
+- /* Honor PortReset timing. */
+- udelay(10);
+-
+- /* We do a request_region() only to register /proc/ioports info. */
+- request_region(ioaddr, SPEEDO3_TOTAL_SIZE, "Intel Speedo3 Ethernet");
+-
+- dev->base_addr = ioaddr;
+- dev->irq = pdev->irq;
+-
+- sp = dev->priv;
+- if (dev->priv == NULL) {
+- void *mem = kmalloc(sizeof(*sp), GFP_KERNEL);
+- dev->priv = sp = mem; /* Cache align here if kmalloc does not. */
+- sp->priv_addr = mem;
+- }
+- memset(sp, 0, sizeof(*sp));
+- sp->next_module = root_speedo_dev;
+- root_speedo_dev = dev;
+-
+- sp->pci_bus = pci_bus;
+- sp->pci_devfn = pci_devfn;
+- sp->chip_id = chip_idx;
+- sp->acpi_pwr = acpi_idle_state;
+-
+- sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
+- if (card_idx >= 0) {
+- if (full_duplex[card_idx] >= 0)
+- sp->full_duplex = full_duplex[card_idx];
+- }
+- sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+-
+- sp->phy[0] = eeprom[6];
+- sp->phy[1] = eeprom[7];
+- sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+-
+- if (sp->rx_bug)
+- printk(KERN_INFO " Receiver lock-up workaround activated.\n");
+-
+- /* The Speedo-specific entries in the device structure. */
+- dev->open = &speedo_open;
+- dev->hard_start_xmit = &speedo_start_xmit;
+-#if defined(HAS_NETIF_QUEUE)
+- dev->tx_timeout = &speedo_tx_timeout;
+- dev->watchdog_timeo = TX_TIMEOUT;
+-#endif
+- dev->stop = &speedo_close;
+- dev->get_stats = &speedo_get_stats;
+- dev->set_multicast_list = &set_rx_mode;
+- dev->do_ioctl = &speedo_ioctl;
+-
+- return dev;
+-}
+-
+-/* Serial EEPROM section.
+- A "bit" grungy, but we work our way through bit-by-bit :->. */
+-/* EEPROM_Ctrl bits. */
+-#define EE_SHIFT_CLK 0x01 /* EEPROM shift clock. */
+-#define EE_CS 0x02 /* EEPROM chip select. */
+-#define EE_DATA_WRITE 0x04 /* EEPROM chip data in. */
+-#define EE_DATA_READ 0x08 /* EEPROM chip data out. */
+-#define EE_ENB (0x4800 | EE_CS)
+-#define EE_WRITE_0 0x4802
+-#define EE_WRITE_1 0x4806
+-#define EE_OFFSET SCBeeprom
+-
+-/* The fixes for the code were kindly provided by Dragan Stancevic
+- <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
+- access timing.
+- The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
+- interval for serial EEPROM. However, it looks like that there is an
+- additional requirement dictating larger udelay's in the code below.
+- 2000/05/24 SAW */
+-static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+-{
+- unsigned retval = 0;
+- long ee_addr = ioaddr + SCBeeprom;
+-
+- io_outw(EE_ENB, ee_addr); udelay(2);
+- io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
+-
+- /* Shift the command bits out. */
+- do {
+- short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+- io_outw(dataval, ee_addr); udelay(2);
+- io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+- retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+- } while (--cmd_len >= 0);
+- io_outw(EE_ENB, ee_addr); udelay(2);
+-
+- /* Terminate the EEPROM access. */
+- io_outw(EE_ENB & ~EE_CS, ee_addr);
+- return retval;
+-}
+-
+-static int mdio_read(long ioaddr, int phy_id, int location)
+-{
+- int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+- outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+- do {
+- val = inl(ioaddr + SCBCtrlMDI);
+- if (--boguscnt < 0) {
+- printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
+- break;
+- }
+- } while (! (val & 0x10000000));
+- return val & 0xffff;
+-}
+-
+-static int mdio_write(long ioaddr, int phy_id, int location, int value)
+-{
+- int val, boguscnt = 64*10; /* <64 usec. to complete, typ 27 ticks */
+- outl(0x04000000 | (location<<16) | (phy_id<<21) | value,
+- ioaddr + SCBCtrlMDI);
+- do {
+- val = inl(ioaddr + SCBCtrlMDI);
+- if (--boguscnt < 0) {
+- printk(KERN_ERR" mdio_write() timed out with val = %8.8x.\n", val);
+- break;
+- }
+- } while (! (val & 0x10000000));
+- return val & 0xffff;
+-}
+-
+-
+-static int
+-speedo_open(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+-
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", dev->name, dev->irq);
+-
+- MOD_INC_USE_COUNT;
+-
+- /* Set up the Tx queue early.. */
+- sp->cur_tx = 0;
+- sp->dirty_tx = 0;
+- sp->last_cmd = 0;
+- sp->tx_full = 0;
+- sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
+- sp->in_interrupt = 0;
+-
+- /* .. we can safely take handler calls during init. */
+- if (request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev)) {
+- MOD_DEC_USE_COUNT;
+- return -EAGAIN;
+- }
+-
+- dev->if_port = sp->default_port;
+-
+-#ifdef oh_no_you_dont_unless_you_honour_the_options_passed_in_to_us
+- /* Retrigger negotiation to reset previous errors. */
+- if ((sp->phy[0] & 0x8000) == 0) {
+- int phy_addr = sp->phy[0] & 0x1f ;
+- /* Use 0x3300 for restarting NWay, other values to force xcvr:
+- 0x0000 10-HD
+- 0x0100 10-FD
+- 0x2000 100-HD
+- 0x2100 100-FD
+- */
+-#ifdef honor_default_port
+- mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+-#else
+- mdio_write(ioaddr, phy_addr, 0, 0x3300);
+-#endif
+- }
+-#endif
+-
+- speedo_init_rx_ring(dev);
+-
+- /* Fire up the hardware. */
+- outw(SCBMaskAll, ioaddr + SCBCmd);
+- speedo_resume(dev);
+-
+- dev->interrupt = 0;
+- dev->start = 1;
+- netif_start_queue(dev);
+-
+- /* Setup the chip and configure the multicast list. */
+- sp->mc_setup_head = NULL;
+- sp->mc_setup_tail = NULL;
+- sp->flow_ctrl = sp->partner = 0;
+- sp->rx_mode = -1; /* Invalid -> always reset the mode. */
+- set_rx_mode(dev);
+- if ((sp->phy[0] & 0x8000) == 0)
+- sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4);
+-
+- if (speedo_debug > 2) {
+- printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
+- }
+-
+- /* Set the timer. The timer serves a dual purpose:
+- 1) to monitor the media interface (e.g. link beat) and perhaps switch
+- to an alternate media type
+- 2) to monitor Rx activity, and restart the Rx process if the receiver
+- hangs. */
+- init_timer(&sp->timer);
+- sp->timer.expires = RUN_AT((24*HZ)/10); /* 2.4 sec. */
+- sp->timer.data = (unsigned long)dev;
+- sp->timer.function = &speedo_timer; /* timer handler */
+- add_timer(&sp->timer);
+-
+- /* No need to wait for the command unit to accept here. */
+- if ((sp->phy[0] & 0x8000) == 0)
+- mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
+-
+- return 0;
+-}
+-
+-/* Start the chip hardware after a full reset. */
+-static void speedo_resume(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+-
+- /* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+- sp->tx_threshold = 0x01208000;
+-
+- /* Set the segment registers to '0'. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outl(0, ioaddr + SCBPointer);
+- /* impose a delay to avoid a bug */
+- inl(ioaddr + SCBPointer);
+- udelay(10);
+- outb(RxAddrLoad, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outb(CUCmdBase, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+-
+- /* Load the statistics block and rx ring addresses. */
+- outl(virt_to_bus(&sp->lstats), ioaddr + SCBPointer);
+- outb(CUStatsAddr, ioaddr + SCBCmd);
+- sp->lstats.done_marker = 0;
+- wait_for_cmd_done(ioaddr + SCBCmd);
+-
+- if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
+- dev->name);
+- } else {
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- }
+-
+- outb(CUDumpStats, ioaddr + SCBCmd);
+-
+- /* Fill the first command with our physical address. */
+- {
+- struct descriptor *ias_cmd;
+-
+- ias_cmd =
+- (struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
+- /* Avoid a bug(?!) here by marking the command already completed. */
+- ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+- ias_cmd->link =
+- virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+- memcpy(ias_cmd->params, dev->dev_addr, 6);
+- sp->last_cmd = ias_cmd;
+- }
+-
+- /* Start the chip's Tx process and unmask interrupts. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- /* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
+- remain masked --Dragan */
+- outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+-}
+-
+-/* Media monitoring and control. */
+-static void speedo_timer(unsigned long data)
+-{
+- struct net_device *dev = (struct net_device *)data;
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- int phy_num = sp->phy[0] & 0x1f;
+-
+- /* We have MII and lost link beat. */
+- if ((sp->phy[0] & 0x8000) == 0) {
+- int partner = mdio_read(ioaddr, phy_num, 5);
+- if (partner != sp->partner) {
+- int flow_ctrl = sp->advertising & partner & 0x0400 ? 1 : 0;
+- if (speedo_debug > 2) {
+- printk(KERN_DEBUG "%s: Link status change.\n", dev->name);
+- printk(KERN_DEBUG "%s: Old partner %x, new %x, adv %x.\n",
+- dev->name, sp->partner, partner, sp->advertising);
+- }
+- sp->partner = partner;
+- if (flow_ctrl != sp->flow_ctrl) {
+- sp->flow_ctrl = flow_ctrl;
+- sp->rx_mode = -1; /* Trigger a reload. */
+- }
+- /* Clear sticky bit. */
+- mdio_read(ioaddr, phy_num, 1);
+- /* If link beat has returned... */
+- if (mdio_read(ioaddr, phy_num, 1) & 0x0004)
+- dev->flags |= IFF_RUNNING;
+- else
+- dev->flags &= ~IFF_RUNNING;
+- }
+- }
+- if (speedo_debug > 3) {
+- printk(KERN_DEBUG "%s: Media control tick, status %4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
+- }
+- if (sp->rx_mode < 0 ||
+- (sp->rx_bug && jiffies - sp->last_rx_time > 2*HZ)) {
+- /* We haven't received a packet in a Long Time. We might have been
+- bitten by the receiver hang bug. This can be cleared by sending
+- a set multicast list command. */
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: Sending a multicast list set command"
+- " from a timer routine.\n", dev->name);
+- set_rx_mode(dev);
+- }
+- /* We must continue to monitor the media. */
+- sp->timer.expires = RUN_AT(2*HZ); /* 2.0 sec. */
+- add_timer(&sp->timer);
+-}
+-
+-static void speedo_show_state(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-#if 0
+- long ioaddr = dev->base_addr;
+- int phy_num = sp->phy[0] & 0x1f;
+-#endif
+- int i;
+-
+- /* Print a few items for debugging. */
+- if (speedo_debug > 0) {
+- int i;
+- printk(KERN_DEBUG "%s: Tx ring dump, Tx queue %u / %u:\n", dev->name,
+- sp->cur_tx, sp->dirty_tx);
+- for (i = 0; i < TX_RING_SIZE; i++)
+- printk(KERN_DEBUG "%s: %c%c%2d %8.8x.\n", dev->name,
+- i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+- i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+- i, sp->tx_ring[i].status);
+- }
+- printk(KERN_DEBUG "%s: Printing Rx ring"
+- " (next to receive into %u, dirty index %u).\n",
+- dev->name, sp->cur_rx, sp->dirty_rx);
+-
+- for (i = 0; i < RX_RING_SIZE; i++)
+- printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", dev->name,
+- sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
+- i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
+- i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
+- i, (sp->rx_ringp[i] != NULL) ?
+- (unsigned)sp->rx_ringp[i]->status : 0);
+-
+-#if 0
+- for (i = 0; i < 16; i++) {
+- /* FIXME: what does it mean? --SAW */
+- if (i == 6) i = 21;
+- printk(KERN_DEBUG "%s: PHY index %d register %d is %4.4x.\n",
+- dev->name, phy_num, i, mdio_read(ioaddr, phy_num, i));
+- }
+-#endif
+-
+-}
+-
+-/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+-static void
+-speedo_init_rx_ring(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- struct RxFD *rxf, *last_rxf = NULL;
+- int i;
+-
+- sp->cur_rx = 0;
+-
+- for (i = 0; i < RX_RING_SIZE; i++) {
+- struct sk_buff *skb;
+- skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
+- sp->rx_skbuff[i] = skb;
+- if (skb == NULL)
+- break; /* OK. Just initially short of Rx bufs. */
+- skb->dev = dev; /* Mark as being used by this device. */
+- rxf = (struct RxFD *)skb->tail;
+- sp->rx_ringp[i] = rxf;
+- skb_reserve(skb, sizeof(struct RxFD));
+- if (last_rxf)
+- last_rxf->link = virt_to_le32desc(rxf);
+- last_rxf = rxf;
+- rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
+- rxf->link = 0; /* None yet. */
+- /* This field unused by i82557. */
+- rxf->rx_buf_addr = 0xffffffff;
+- rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+- }
+- sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+- /* Mark the last entry as end-of-list. */
+- last_rxf->status = cpu_to_le32(0xC0000002); /* '2' is flag value only. */
+- sp->last_rxf = last_rxf;
+-}
+-
+-static void speedo_purge_tx(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int entry;
+-
+- while ((int)(sp->cur_tx - sp->dirty_tx) > 0) {
+- entry = sp->dirty_tx % TX_RING_SIZE;
+- if (sp->tx_skbuff[entry]) {
+- sp->stats.tx_errors++;
+- dev_free_skb(sp->tx_skbuff[entry]);
+- sp->tx_skbuff[entry] = 0;
+- }
+- sp->dirty_tx++;
+- }
+- while (sp->mc_setup_head != NULL) {
+- struct speedo_mc_block *t;
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- sp->mc_setup_tail = NULL;
+- sp->tx_full = 0;
+- netif_wake_queue(dev);
+-}
+-
+-static void reset_mii(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- /* Reset the MII transceiver, suggested by Fred Young @ scalable.com. */
+- if ((sp->phy[0] & 0x8000) == 0) {
+- int phy_addr = sp->phy[0] & 0x1f;
+- int advertising = mdio_read(ioaddr, phy_addr, 4);
+- int mii_bmcr = mdio_read(ioaddr, phy_addr, 0);
+- mdio_write(ioaddr, phy_addr, 0, 0x0400);
+- mdio_write(ioaddr, phy_addr, 1, 0x0000);
+- mdio_write(ioaddr, phy_addr, 4, 0x0000);
+- mdio_write(ioaddr, phy_addr, 0, 0x8000);
+-#ifdef honor_default_port
+- mdio_write(ioaddr, phy_addr, 0, mii_ctrl[dev->default_port & 7]);
+-#else
+- mdio_read(ioaddr, phy_addr, 0);
+- mdio_write(ioaddr, phy_addr, 0, mii_bmcr);
+- mdio_write(ioaddr, phy_addr, 4, advertising);
+-#endif
+- }
+-}
+-
+-static void speedo_tx_timeout(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- int status = inw(ioaddr + SCBStatus);
+- unsigned long flags;
+-
+- printk(KERN_WARNING "%s: Transmit timed out: status %4.4x "
+- " %4.4x at %d/%d command %8.8x.\n",
+- dev->name, status, inw(ioaddr + SCBCmd),
+- sp->dirty_tx, sp->cur_tx,
+- sp->tx_ring[sp->dirty_tx % TX_RING_SIZE].status);
+-
+- /* Trigger a stats dump to give time before the reset. */
+- speedo_get_stats(dev);
+-
+- speedo_show_state(dev);
+-#if 0
+- if ((status & 0x00C0) != 0x0080
+- && (status & 0x003C) == 0x0010) {
+- /* Only the command unit has stopped. */
+- printk(KERN_WARNING "%s: Trying to restart the transmitter...\n",
+- dev->name);
+- outl(virt_to_bus(&sp->tx_ring[sp->dirty_tx % TX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outw(CUStart, ioaddr + SCBCmd);
+- reset_mii(dev);
+- } else {
+-#else
+- {
+-#endif
+- start_bh_atomic();
+- /* Ensure that timer routine doesn't run! */
+- del_timer(&sp->timer);
+- end_bh_atomic();
+- /* Reset the Tx and Rx units. */
+- outl(PortReset, ioaddr + SCBPort);
+- /* We may get spurious interrupts here. But I don't think that they
+- may do much harm. 1999/12/09 SAW */
+- udelay(10);
+- /* Disable interrupts. */
+- outw(SCBMaskAll, ioaddr + SCBCmd);
+- synchronize_irq();
+- speedo_tx_buffer_gc(dev);
+- /* Free as much as possible.
+- It helps to recover from a hang because of out-of-memory.
+- It also simplifies speedo_resume() in case TX ring is full or
+- close-to-be full. */
+- speedo_purge_tx(dev);
+- speedo_refill_rx_buffers(dev, 1);
+- spin_lock_irqsave(&sp->lock, flags);
+- speedo_resume(dev);
+- sp->rx_mode = -1;
+- dev->trans_start = jiffies;
+- spin_unlock_irqrestore(&sp->lock, flags);
+- set_rx_mode(dev); /* it takes the spinlock itself --SAW */
+- /* Reset MII transceiver. Do it before starting the timer to serialize
+- mdio_xxx operations. Yes, it's a paranoya :-) 2000/05/09 SAW */
+- reset_mii(dev);
+- sp->timer.expires = RUN_AT(2*HZ);
+- add_timer(&sp->timer);
+- }
+- return;
+-}
+-
+-static int
+-speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- int entry;
+-
+-#if ! defined(HAS_NETIF_QUEUE)
+- if (test_bit(0, (void*)&dev->tbusy) != 0) {
+- int tickssofar = jiffies - dev->trans_start;
+- if (tickssofar < TX_TIMEOUT - 2)
+- return 1;
+- if (tickssofar < TX_TIMEOUT) {
+- /* Reap sent packets from the full Tx queue. */
+- unsigned long flags;
+- /* Take a spinlock to make wait_for_cmd_done and sending the
+- command atomic. --SAW */
+- spin_lock_irqsave(&sp->lock, flags);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outw(SCBTriggerIntr, ioaddr + SCBCmd);
+- spin_unlock_irqrestore(&sp->lock, flags);
+- return 1;
+- }
+- speedo_tx_timeout(dev);
+- return 1;
+- }
+-#endif
+-
+- { /* Prevent interrupts from changing the Tx ring from underneath us. */
+- unsigned long flags;
+-
+- spin_lock_irqsave(&sp->lock, flags);
+-
+- /* Check if there are enough space. */
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", dev->name);
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- spin_unlock_irqrestore(&sp->lock, flags);
+- return 1;
+- }
+-
+- /* Calculate the Tx descriptor entry. */
+- entry = sp->cur_tx++ % TX_RING_SIZE;
+-
+- sp->tx_skbuff[entry] = skb;
+- sp->tx_ring[entry].status =
+- cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+- if (!(entry & ((TX_RING_SIZE>>2)-1)))
+- sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
+- sp->tx_ring[entry].link =
+- virt_to_le32desc(&sp->tx_ring[sp->cur_tx % TX_RING_SIZE]);
+- sp->tx_ring[entry].tx_desc_addr =
+- virt_to_le32desc(&sp->tx_ring[entry].tx_buf_addr0);
+- /* The data region is always in one buffer descriptor. */
+- sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+- sp->tx_ring[entry].tx_buf_addr0 = virt_to_le32desc(skb->data);
+- sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+- /* Trigger the command unit resume. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- clear_suspend(sp->last_cmd);
+- /* We want the time window between clearing suspend flag on the previous
+- command and resuming CU to be as small as possible.
+- Interrupts in between are very undesired. --SAW */
+- outb(CUResume, ioaddr + SCBCmd);
+- sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+-
+- /* Leave room for set_rx_mode(). If there is no more space than reserved
+- for multicast filter mark the ring as full. */
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+-
+- spin_unlock_irqrestore(&sp->lock, flags);
+- }
+-
+- dev->trans_start = jiffies;
+-
+- return 0;
+-}
+-
+-static void speedo_tx_buffer_gc(struct net_device *dev)
+-{
+- unsigned int dirty_tx;
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-
+- dirty_tx = sp->dirty_tx;
+- while ((int)(sp->cur_tx - dirty_tx) > 0) {
+- int entry = dirty_tx % TX_RING_SIZE;
+- int status = le32_to_cpu(sp->tx_ring[entry].status);
+-
+- if (speedo_debug > 5)
+- printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+- entry, status);
+- if ((status & StatusComplete) == 0)
+- break; /* It still hasn't been processed. */
+- if (status & TxUnderrun)
+- if (sp->tx_threshold < 0x01e08000) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
+- dev->name);
+- sp->tx_threshold += 0x00040000;
+- }
+- /* Free the original skb. */
+- if (sp->tx_skbuff[entry]) {
+- sp->stats.tx_packets++; /* Count only user packets. */
+- /* sp->stats.tx_bytes += sp->tx_skbuff[entry]->len; */
+- dev_free_skb(sp->tx_skbuff[entry]);
+- sp->tx_skbuff[entry] = 0;
+- }
+- dirty_tx++;
+- }
+-
+- if (speedo_debug && (int)(sp->cur_tx - dirty_tx) > TX_RING_SIZE) {
+- printk(KERN_ERR "out-of-sync dirty pointer, %d vs. %d,"
+- " full=%d.\n",
+- dirty_tx, sp->cur_tx, sp->tx_full);
+- dirty_tx += TX_RING_SIZE;
+- }
+-
+- while (sp->mc_setup_head != NULL
+- && (int)(dirty_tx - sp->mc_setup_head->tx - 1) > 0) {
+- struct speedo_mc_block *t;
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: freeing mc frame.\n", dev->name);
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- if (sp->mc_setup_head == NULL)
+- sp->mc_setup_tail = NULL;
+-
+- sp->dirty_tx = dirty_tx;
+-}
+-
+-/* The interrupt handler does all of the Rx thread work and cleans up
+- after the Tx thread. */
+-static void speedo_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
+-{
+- struct net_device *dev = (struct net_device *)dev_instance;
+- struct speedo_private *sp;
+- long ioaddr, boguscnt = max_interrupt_work;
+- unsigned short status;
+-
+-#ifndef final_version
+- if (dev == NULL) {
+- printk(KERN_ERR "speedo_interrupt(): irq %d for unknown device.\n", irq);
+- return;
+- }
+-#endif
+-
+- ioaddr = dev->base_addr;
+- sp = (struct speedo_private *)dev->priv;
+-
+-#ifndef final_version
+- /* A lock to prevent simultaneous entry on SMP machines. */
+- if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+- printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+- dev->name);
+- sp->in_interrupt = 0; /* Avoid halting machine. */
+- return;
+- }
+- dev->interrupt = 1;
+-#endif
+-
+- do {
+- status = inw(ioaddr + SCBStatus);
+- /* Acknowledge all of the current interrupt sources ASAP. */
+- /* Will change from 0xfc00 to 0xff00 when we start handling
+- FCP and ER interrupts --Dragan */
+- outw(status & 0xfc00, ioaddr + SCBStatus);
+-
+- if (speedo_debug > 3)
+- printk(KERN_DEBUG "%s: interrupt status=%#4.4x.\n",
+- dev->name, status);
+-
+- if ((status & 0xfc00) == 0)
+- break;
+-
+- /* Always check if all rx buffers are allocated. --SAW */
+- speedo_refill_rx_buffers(dev, 0);
+-
+- if ((status & 0x5000) || /* Packet received, or Rx error. */
+- (sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+- /* Need to gather the postponed packet. */
+- speedo_rx(dev);
+-
+- if (status & 0x1000) {
+- spin_lock(&sp->lock);
+- if ((status & 0x003c) == 0x0028) { /* No more Rx buffers. */
+- struct RxFD *rxf;
+- printk(KERN_WARNING "%s: card reports no RX buffers.\n",
+- dev->name);
+- rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+- if (rxf == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: NULL cur_rx in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else if (rxf == sp->last_rxf) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: cur_rx is last in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else
+- outb(RxResumeNoResources, ioaddr + SCBCmd);
+- } else if ((status & 0x003c) == 0x0008) { /* No resources. */
+- struct RxFD *rxf;
+- printk(KERN_WARNING "%s: card reports no resources.\n",
+- dev->name);
+- rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+- if (rxf == NULL) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: NULL cur_rx in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else if (rxf == sp->last_rxf) {
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG
+- "%s: cur_rx is last in speedo_interrupt().\n",
+- dev->name);
+- sp->rx_ring_state |= RrNoMem|RrNoResources;
+- } else {
+- /* Restart the receiver. */
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
+- }
+- }
+- sp->stats.rx_errors++;
+- spin_unlock(&sp->lock);
+- }
+-
+- if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {
+- printk(KERN_WARNING
+- "%s: restart the receiver after a possible hang.\n",
+- dev->name);
+- spin_lock(&sp->lock);
+- /* Restart the receiver.
+- I'm not sure if it's always right to restart the receiver
+- here but I don't know another way to prevent receiver hangs.
+- 1999/12/25 SAW */
+- outl(virt_to_bus(sp->rx_ringp[sp->cur_rx % RX_RING_SIZE]),
+- ioaddr + SCBPointer);
+- outb(RxStart, ioaddr + SCBCmd);
+- sp->rx_ring_state &= ~RrNoResources;
+- spin_unlock(&sp->lock);
+- }
+-
+- /* User interrupt, Command/Tx unit interrupt or CU not active. */
+- if (status & 0xA400) {
+- spin_lock(&sp->lock);
+- speedo_tx_buffer_gc(dev);
+- if (sp->tx_full
+- && (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+- /* The ring is no longer full. */
+- sp->tx_full = 0;
+- netif_wake_queue(dev); /* Attention: under a spinlock. --SAW */
+- }
+- spin_unlock(&sp->lock);
+- }
+-
+- if (--boguscnt < 0) {
+- printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+- dev->name, status);
+- /* Clear all interrupt sources. */
+- /* Will change from 0xfc00 to 0xff00 when we start handling
+- FCP and ER interrupts --Dragan */
+- outl(0xfc00, ioaddr + SCBStatus);
+- break;
+- }
+- } while (1);
+-
+- if (speedo_debug > 3)
+- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
+-
+- dev->interrupt = 0;
+- clear_bit(0, (void*)&sp->in_interrupt);
+- return;
+-}
+-
+-static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- struct RxFD *rxf;
+- struct sk_buff *skb;
+- /* Get a fresh skbuff to replace the consumed one. */
+- skb = dev_alloc_skb(PKT_BUF_SZ + sizeof(struct RxFD));
+- sp->rx_skbuff[entry] = skb;
+- if (skb == NULL) {
+- sp->rx_ringp[entry] = NULL;
+- return NULL;
+- }
+- rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+- skb->dev = dev;
+- skb_reserve(skb, sizeof(struct RxFD));
+- rxf->rx_buf_addr = virt_to_bus(skb->tail);
+- return rxf;
+-}
+-
+-static inline void speedo_rx_link(struct net_device *dev, int entry,
+- struct RxFD *rxf)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- rxf->status = cpu_to_le32(0xC0000001); /* '1' for driver use only. */
+- rxf->link = 0; /* None yet. */
+- rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+- sp->last_rxf->link = virt_to_le32desc(rxf);
+- sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+- sp->last_rxf = rxf;
+-}
+-
+-static int speedo_refill_rx_buf(struct net_device *dev, int force)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int entry;
+- struct RxFD *rxf;
+-
+- entry = sp->dirty_rx % RX_RING_SIZE;
+- if (sp->rx_skbuff[entry] == NULL) {
+- rxf = speedo_rx_alloc(dev, entry);
+- if (rxf == NULL) {
+- unsigned int forw;
+- int forw_entry;
+- if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) {
+- printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
+- dev->name, force);
+- speedo_show_state(dev);
+- sp->rx_ring_state |= RrOOMReported;
+- }
+- if (!force)
+- return -1; /* Better luck next time! */
+- /* Borrow an skb from one of next entries. */
+- for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
+- if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
+- break;
+- if (forw == sp->cur_rx)
+- return -1;
+- forw_entry = forw % RX_RING_SIZE;
+- sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
+- sp->rx_skbuff[forw_entry] = NULL;
+- rxf = sp->rx_ringp[forw_entry];
+- sp->rx_ringp[forw_entry] = NULL;
+- sp->rx_ringp[entry] = rxf;
+- }
+- } else {
+- rxf = sp->rx_ringp[entry];
+- }
+- speedo_rx_link(dev, entry, rxf);
+- sp->dirty_rx++;
+- sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
+- return 0;
+-}
+-
+-static void speedo_refill_rx_buffers(struct net_device *dev, int force)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+-
+- /* Refill the RX ring. */
+- while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
+- speedo_refill_rx_buf(dev, force) != -1);
+-}
+-
+-static int
+-speedo_rx(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int entry = sp->cur_rx % RX_RING_SIZE;
+- int status;
+- int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+- int alloc_ok = 1;
+-
+- if (speedo_debug > 4)
+- printk(KERN_DEBUG " In speedo_rx().\n");
+- /* If we own the next entry, it's a new packet. Send it up. */
+- while (sp->rx_ringp[entry] != NULL &&
+- (status = le32_to_cpu(sp->rx_ringp[entry]->status)) & RxComplete) {
+- int pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+-
+- if (--rx_work_limit < 0)
+- break;
+-
+- /* Check for a rare out-of-memory case: the current buffer is
+- the last buffer allocated in the RX ring. --SAW */
+- if (sp->last_rxf == sp->rx_ringp[entry]) {
+- /* Postpone the packet. It'll be reaped at an interrupt when this
+- packet is no longer the last packet in the ring. */
+- if (speedo_debug > 2)
+- printk(KERN_DEBUG "%s: RX packet postponed!\n",
+- dev->name);
+- sp->rx_ring_state |= RrPostponed;
+- break;
+- }
+-
+- if (speedo_debug > 4)
+- printk(KERN_DEBUG " speedo_rx() status %8.8x len %d.\n", status,
+- pkt_len);
+- if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+- if (status & RxErrTooBig)
+- printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+- "status %8.8x!\n", dev->name, status);
+- else if (! (status & RxOK)) {
+- /* There was a fatal error. This *should* be impossible. */
+- sp->stats.rx_errors++;
+- printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+- "status %8.8x.\n",
+- dev->name, status);
+- }
+- } else {
+- struct sk_buff *skb;
+-
+- /* Check if the packet is long enough to just accept without
+- copying to a properly sized skbuff. */
+- if (pkt_len < rx_copybreak
+- && (skb = dev_alloc_skb(pkt_len + 2)) != 0) {
+- skb->dev = dev;
+- skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
+- /* 'skb_put()' points to the start of sk_buff data area. */
+-#if !defined(__alpha__)
+- /* Packet is in one chunk -- we can copy + cksum. */
+- eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0);
+- skb_put(skb, pkt_len);
+-#else
+- memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail,
+- pkt_len);
+-#endif
+- } else {
+- /* Pass up the already-filled skbuff. */
+- skb = sp->rx_skbuff[entry];
+- if (skb == NULL) {
+- printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+- dev->name);
+- break;
+- }
+- sp->rx_skbuff[entry] = NULL;
+- skb_put(skb, pkt_len);
+- sp->rx_ringp[entry] = NULL;
+- }
+- skb->protocol = eth_type_trans(skb, dev);
+- netif_rx(skb);
+- sp->stats.rx_packets++;
+- /* sp->stats.rx_bytes += pkt_len; */
+- }
+- entry = (++sp->cur_rx) % RX_RING_SIZE;
+- sp->rx_ring_state &= ~RrPostponed;
+- /* Refill the recently taken buffers.
+- Do it one-by-one to handle traffic bursts better. */
+- if (alloc_ok && speedo_refill_rx_buf(dev, 0) == -1)
+- alloc_ok = 0;
+- }
+-
+- /* Try hard to refill the recently taken buffers. */
+- speedo_refill_rx_buffers(dev, 1);
+-
+- sp->last_rx_time = jiffies;
+-
+- return 0;
+-}
+-
+-static int
+-speedo_close(struct net_device *dev)
+-{
+- long ioaddr = dev->base_addr;
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- int i;
+-
+- dev->start = 0;
+- netif_stop_queue(dev);
+-
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+- dev->name, inw(ioaddr + SCBStatus));
+-
+- /* Shut off the media monitoring timer. */
+- start_bh_atomic();
+- del_timer(&sp->timer);
+- end_bh_atomic();
+-
+- /* Shutting down the chip nicely fails to disable flow control. So.. */
+- outl(PortPartialReset, ioaddr + SCBPort);
+-
+- free_irq(dev->irq, dev);
+-
+- /* Print a few items for debugging. */
+- if (speedo_debug > 3)
+- speedo_show_state(dev);
+-
+- /* Free all the skbuffs in the Rx and Tx queues. */
+- for (i = 0; i < RX_RING_SIZE; i++) {
+- struct sk_buff *skb = sp->rx_skbuff[i];
+- sp->rx_skbuff[i] = 0;
+- /* Clear the Rx descriptors. */
+- if (skb)
+- dev_free_skb(skb);
+- }
+-
+- for (i = 0; i < TX_RING_SIZE; i++) {
+- struct sk_buff *skb = sp->tx_skbuff[i];
+- sp->tx_skbuff[i] = 0;
+- /* Clear the Tx descriptors. */
+- if (skb)
+- dev_free_skb(skb);
+- }
+-
+- /* Free multicast setting blocks. */
+- for (i = 0; sp->mc_setup_head != NULL; i++) {
+- struct speedo_mc_block *t;
+- t = sp->mc_setup_head->next;
+- kfree(sp->mc_setup_head);
+- sp->mc_setup_head = t;
+- }
+- sp->mc_setup_tail = NULL;
+- if (speedo_debug > 0)
+- printk(KERN_DEBUG "%s: %d multicast blocks dropped.\n", dev->name, i);
+-
+- MOD_DEC_USE_COUNT;
+-
+- return 0;
+-}
+-
+-/* The Speedo-3 has an especially awkward and unusable method of getting
+- statistics out of the chip. It takes an unpredictable length of time
+- for the dump-stats command to complete. To avoid a busy-wait loop we
+- update the stats with the previous dump results, and then trigger a
+- new dump.
+-
+- These problems are mitigated by the current /proc implementation, which
+- calls this routine first to judge the output length, and then to emit the
+- output.
+-
+- Oh, and incoming frames are dropped while executing dump-stats!
+- */
+-static struct enet_statistics *
+-speedo_get_stats(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+-
+- /* Update only if the previous dump finished. */
+- if (sp->lstats.done_marker == le32_to_cpu(0xA007)) {
+- sp->stats.tx_aborted_errors += le32_to_cpu(sp->lstats.tx_coll16_errs);
+- sp->stats.tx_window_errors += le32_to_cpu(sp->lstats.tx_late_colls);
+- sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_underruns);
+- sp->stats.tx_fifo_errors += le32_to_cpu(sp->lstats.tx_lost_carrier);
+- /*sp->stats.tx_deferred += le32_to_cpu(sp->lstats.tx_deferred);*/
+- sp->stats.collisions += le32_to_cpu(sp->lstats.tx_total_colls);
+- sp->stats.rx_crc_errors += le32_to_cpu(sp->lstats.rx_crc_errs);
+- sp->stats.rx_frame_errors += le32_to_cpu(sp->lstats.rx_align_errs);
+- sp->stats.rx_over_errors += le32_to_cpu(sp->lstats.rx_resource_errs);
+- sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
+- sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
+- sp->lstats.done_marker = 0x0000;
+- if (dev->start) {
+- unsigned long flags;
+- /* Take a spinlock to make wait_for_cmd_done and sending the
+- command atomic. --SAW */
+- spin_lock_irqsave(&sp->lock, flags);
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- outb(CUDumpStats, ioaddr + SCBCmd);
+- spin_unlock_irqrestore(&sp->lock, flags);
+- }
+- }
+- return &sp->stats;
+-}
+-
+-static int speedo_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- u16 *data = (u16 *)&rq->ifr_data;
+- int phy = sp->phy[0] & 0x1f;
+-
+- switch(cmd) {
+- case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
+- data[0] = phy;
+- case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
+- /* FIXME: these operations need to be serialized with MDIO
+- access from the timeout handler.
+- They are currently serialized only with MDIO access from the
+- timer routine. 2000/05/09 SAW */
+- start_bh_atomic();
+- data[3] = mdio_read(ioaddr, data[0], data[1]);
+- end_bh_atomic();
+- return 0;
+- case SIOCDEVPRIVATE+2: /* Write the specified MII register */
+- if (!capable(CAP_NET_ADMIN))
+- return -EPERM;
+- start_bh_atomic();
+- mdio_write(ioaddr, data[0], data[1], data[2]);
+- end_bh_atomic();
+- return 0;
+- default:
+- return -EOPNOTSUPP;
+- }
+-}
+-
+-/* Set or clear the multicast filter for this adaptor.
+- This is very ugly with Intel chips -- we usually have to execute an
+- entire configuration command, plus process a multicast command.
+- This is complicated. We must put a large configuration command and
+- an arbitrarily-sized multicast command in the transmit list.
+- To minimize the disruption -- the previous command might have already
+- loaded the link -- we convert the current command block, normally a Tx
+- command, into a no-op and link it to the new command.
+-*/
+-static void set_rx_mode(struct net_device *dev)
+-{
+- struct speedo_private *sp = (struct speedo_private *)dev->priv;
+- long ioaddr = dev->base_addr;
+- struct descriptor *last_cmd;
+- char new_rx_mode;
+- unsigned long flags;
+- int entry, i;
+-
+- if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+- new_rx_mode = 3;
+- } else if ((dev->flags & IFF_ALLMULTI) ||
+- dev->mc_count > multicast_filter_limit) {
+- new_rx_mode = 1;
+- } else
+- new_rx_mode = 0;
+-
+- if (speedo_debug > 3)
+- printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", dev->name,
+- sp->rx_mode, new_rx_mode);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
+- /* The Tx ring is full -- don't add anything! Hope the mode will be
+- * set again later. */
+- sp->rx_mode = -1;
+- return;
+- }
+-
+- if (new_rx_mode != sp->rx_mode) {
+- u8 *config_cmd_data;
+-
+- spin_lock_irqsave(&sp->lock, flags);
+- entry = sp->cur_tx++ % TX_RING_SIZE;
+- last_cmd = sp->last_cmd;
+- sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+-
+- sp->tx_skbuff[entry] = 0; /* Redundant. */
+- sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+- sp->tx_ring[entry].link =
+- virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+- config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+- /* Construct a full CmdConfig frame. */
+- memcpy(config_cmd_data, i82558_config_cmd, sizeof(i82558_config_cmd));
+- config_cmd_data[1] = (txfifo << 4) | rxfifo;
+- config_cmd_data[4] = rxdmacount;
+- config_cmd_data[5] = txdmacount + 0x80;
+- config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
+- /* 0x80 doesn't disable FC 0x84 does.
+- Disable Flow control since we are not ACK-ing any FC interrupts
+- for now. --Dragan */
+- config_cmd_data[19] = 0x84;
+- config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+- config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
+- if (sp->phy[0] & 0x8000) { /* Use the AUI port instead. */
+- config_cmd_data[15] |= 0x80;
+- config_cmd_data[8] = 0;
+- }
+- /* Trigger the command unit resume. */
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- clear_suspend(last_cmd);
+- outb(CUResume, ioaddr + SCBCmd);
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+- spin_unlock_irqrestore(&sp->lock, flags);
+- }
+-
+- if (new_rx_mode == 0 && dev->mc_count < 4) {
+- /* The simple case of 0-3 multicast list entries occurs often, and
+- fits within one tx_ring[] entry. */
+- struct dev_mc_list *mclist;
+- u16 *setup_params, *eaddrs;
+-
+- spin_lock_irqsave(&sp->lock, flags);
+- entry = sp->cur_tx++ % TX_RING_SIZE;
+- last_cmd = sp->last_cmd;
+- sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+-
+- sp->tx_skbuff[entry] = 0;
+- sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+- sp->tx_ring[entry].link =
+- virt_to_le32desc(&sp->tx_ring[(entry + 1) % TX_RING_SIZE]);
+- sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+- setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+- *setup_params++ = cpu_to_le16(dev->mc_count*6);
+- /* Fill in the multicast addresses. */
+- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+- i++, mclist = mclist->next) {
+- eaddrs = (u16 *)mclist->dmi_addr;
+- *setup_params++ = *eaddrs++;
+- *setup_params++ = *eaddrs++;
+- *setup_params++ = *eaddrs++;
+- }
+-
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- clear_suspend(last_cmd);
+- /* Immediately trigger the command unit resume. */
+- outb(CUResume, ioaddr + SCBCmd);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+- spin_unlock_irqrestore(&sp->lock, flags);
+- } else if (new_rx_mode == 0) {
+- struct dev_mc_list *mclist;
+- u16 *setup_params, *eaddrs;
+- struct speedo_mc_block *mc_blk;
+- struct descriptor *mc_setup_frm;
+- int i;
+-
+- mc_blk = kmalloc(sizeof(*mc_blk) + 2 + multicast_filter_limit*6,
+- GFP_ATOMIC);
+- if (mc_blk == NULL) {
+- printk(KERN_ERR "%s: Failed to allocate a setup frame.\n",
+- dev->name);
+- sp->rx_mode = -1; /* We failed, try again. */
+- return;
+- }
+- mc_blk->next = NULL;
+- mc_setup_frm = &mc_blk->frame;
+-
+- /* Fill the setup frame. */
+- if (speedo_debug > 1)
+- printk(KERN_DEBUG "%s: Constructing a setup frame at %p.\n",
+- dev->name, mc_setup_frm);
+- mc_setup_frm->cmd_status =
+- cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
+- /* Link set below. */
+- setup_params = (u16 *)&mc_setup_frm->params;
+- *setup_params++ = cpu_to_le16(dev->mc_count*6);
+- /* Fill in the multicast addresses. */
+- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
+- i++, mclist = mclist->next) {
+- eaddrs = (u16 *)mclist->dmi_addr;
+- *setup_params++ = *eaddrs++;
+- *setup_params++ = *eaddrs++;
+- *setup_params++ = *eaddrs++;
+- }
+-
+- /* Disable interrupts while playing with the Tx Cmd list. */
+- spin_lock_irqsave(&sp->lock, flags);
+-
+- if (sp->mc_setup_tail)
+- sp->mc_setup_tail->next = mc_blk;
+- else
+- sp->mc_setup_head = mc_blk;
+- sp->mc_setup_tail = mc_blk;
+- mc_blk->tx = sp->cur_tx;
+-
+- entry = sp->cur_tx++ % TX_RING_SIZE;
+- last_cmd = sp->last_cmd;
+- sp->last_cmd = mc_setup_frm;
+-
+- /* Change the command to a NoOp, pointing to the CmdMulti command. */
+- sp->tx_skbuff[entry] = 0;
+- sp->tx_ring[entry].status = cpu_to_le32(CmdNOp);
+- sp->tx_ring[entry].link = virt_to_le32desc(mc_setup_frm);
+-
+- /* Set the link in the setup frame. */
+- mc_setup_frm->link =
+- virt_to_le32desc(&(sp->tx_ring[(entry+1) % TX_RING_SIZE]));
+-
+- wait_for_cmd_done(ioaddr + SCBCmd);
+- clear_suspend(last_cmd);
+- /* Immediately trigger the command unit resume. */
+- outb(CUResume, ioaddr + SCBCmd);
+-
+- if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+- netif_stop_queue(dev);
+- sp->tx_full = 1;
+- }
+- spin_unlock_irqrestore(&sp->lock, flags);
+-
+- if (speedo_debug > 5)
+- printk(" CmdMCSetup frame length %d in entry %d.\n",
+- dev->mc_count, entry);
+- }
+-
+- sp->rx_mode = new_rx_mode;
+-}
+-
+-#ifdef MODULE
+-
+-int init_module(void)
+-{
+- int cards_found;
+-
+- if (debug >= 0 && speedo_debug != debug)
+- printk(KERN_INFO "eepro100.c: Debug level is %d.\n", debug);
+- if (debug >= 0)
+- speedo_debug = debug;
+- /* Always emit the version message. */
+- if (speedo_debug)
+- printk(KERN_INFO "%s", version);
+-
+- cards_found = eepro100_init();
+- if (cards_found <= 0) {
+- printk(KERN_INFO "eepro100: No cards found, driver not installed.\n");
+- return -ENODEV;
+- }
+- return 0;
+-}
+-
+-void
+-cleanup_module(void)
+-{
+- struct net_device *next_dev;
+-
+- /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+- while (root_speedo_dev) {
+- struct speedo_private *sp = (void *)root_speedo_dev->priv;
+- unregister_netdev(root_speedo_dev);
+- release_region(root_speedo_dev->base_addr, SPEEDO3_TOTAL_SIZE);
+-#ifndef USE_IO
+- iounmap((char *)root_speedo_dev->base_addr);
+-#endif
+- next_dev = sp->next_module;
+- if (sp->priv_addr)
+- kfree(sp->priv_addr);
+- kfree(root_speedo_dev);
+- root_speedo_dev = next_dev;
+- }
+-}
+-
+-#else /* not MODULE */
+-
+-int eepro100_probe(void)
+-{
+- int cards_found = 0;
+-
+- cards_found = eepro100_init();
+-
+- if (speedo_debug > 0 && cards_found)
+- printk(version);
+-
+- return cards_found ? 0 : -ENODEV;
+-}
+-#endif /* MODULE */
+-
+-/*
+- * Local variables:
+- * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * SMP-compile-command: "gcc -D__SMP__ -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c eepro100.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+- * c-indent-level: 4
+- * c-basic-offset: 4
+- * tab-width: 4
+- * End:
+- */
Index: linux/dev/include/linux/modversions.h
===================================================================
RCS file: linux/dev/include/linux/modversions.h
@@ -3597,8 +5781,8 @@ Index: linux/src/drivers/net/eepro100.c
RCS file: /cvsroot/hurd/gnumach/linux/src/drivers/net/Attic/eepro100.c,v
retrieving revision 1.2
diff -u -r1.2 eepro100.c
---- linux/src/drivers/net/eepro100.c 18 Aug 2001 00:56:42 -0000 1.2
-+++ linux/src/drivers/net/eepro100.c 20 Aug 2004 10:32:53 -0000
+--- linux/src/drivers/net/eepro100.c 18 Aug 2001 00:56:42 -0000 1.2
++++ linux/src/drivers/net/eepro100.c 10 Nov 2005 00:43:18 -0000
@@ -1,146 +1,166 @@
/* drivers/net/eepro100.c: An Intel i82557-559 Ethernet driver for Linux. */
/*
@@ -3999,7 +6183,7 @@ diff -u -r1.2 eepro100.c
#define SPEEDO_IOTYPE PCI_USES_MASTER|PCI_USES_IO|PCI_ADDR1
#define SPEEDO_SIZE 32
#else
-@@ -312,48 +320,48 @@
+@@ -312,48 +320,70 @@
#define SPEEDO_SIZE 0x1000
#endif
@@ -4034,6 +6218,10 @@ diff -u -r1.2 eepro100.c
- },
- {0,} /* 0 terminated list. */
+struct pci_id_info static pci_id_tbl[] = {
++ {"Intel PCI EtherExpress Pro100 82865", { 0x12278086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel PCI EtherExpress Pro100 Smart (i960RP/RD)",
++ { 0x12288086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel i82559 rev 8", { 0x12298086, ~0, 0,0, 8,0xff},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, HasChksum, },
+ {"Intel PCI EtherExpress Pro100", { 0x12298086, 0xffffffff,},
@@ -4046,8 +6234,20 @@ diff -u -r1.2 eepro100.c
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 V Network", { 0x24498086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel PCI LAN0 Controller 82801E", { 0x24598086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel PCI LAN1 Controller 82801E", { 0x245D8086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 1031)", { 0x10318086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 1032)", { 0x10328086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 1033)", { 0x10338086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 1034)", { 0x10348086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 1035)", { 0x10358086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1038)", { 0x10388086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (type 1039)", { 0x10398086, 0xffffffff,},
@@ -4061,8 +6261,14 @@ diff -u -r1.2 eepro100.c
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VE (type 103D)", { 0x103d8086, 0xffffffff,},
+ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel Pro/100 VE (type 103E)", { 0x103e8086, 0xffffffff,},
++ SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel EtherExpress Pro/100 865G Northbridge type 1051",
+ { 0x10518086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel PCI to PCI Bridge EtherExpress Pro100 Server Adapter",
++ { 0x52008086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
++ {"Intel PCI EtherExpress Pro100 Server Adapter",
++ { 0x52018086, 0xffffffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 VM (unknown type series 1030)",
+ { 0x10308086, 0xfff0ffff,}, SPEEDO_IOTYPE, SPEEDO_SIZE, 0, },
+ {"Intel Pro/100 (unknown type series 1050)",
@@ -4087,7 +6293,7 @@ diff -u -r1.2 eepro100.c
#undef inb
#undef inw
#undef inl
-@@ -368,27 +376,6 @@
+@@ -368,27 +398,6 @@
#define outl writel
#endif
@@ -4115,7 +6321,7 @@ diff -u -r1.2 eepro100.c
/* Offsets to the various registers.
All accesses need not be longword aligned. */
enum speedo_offsets {
-@@ -408,28 +395,36 @@
+@@ -408,28 +417,36 @@
CmdIntr = 0x20000000, /* Interrupt after completion. */
CmdTxFlex = 0x00080000, /* Use "Flexible mode" for CmdTx command. */
};
@@ -4170,7 +6376,7 @@ diff -u -r1.2 eepro100.c
};
enum SCBPort_cmds {
-@@ -437,9 +432,9 @@
+@@ -437,9 +454,9 @@
};
/* The Speedo3 Rx and Tx frame/buffer descriptors. */
@@ -4183,7 +6389,7 @@ diff -u -r1.2 eepro100.c
unsigned char params[0];
};
-@@ -464,18 +459,11 @@
+@@ -464,18 +481,11 @@
u32 link; /* void * */
u32 tx_desc_addr; /* Always points to the tx_buf_addr element. */
s32 count; /* # of TBD (=1), Tx start thresh., etc. */
@@ -4205,7 +6411,7 @@ diff -u -r1.2 eepro100.c
};
/* Elements of the dump_statistics block. This block must be lword aligned. */
-@@ -499,48 +487,70 @@
+@@ -499,48 +509,70 @@
u32 done_marker;
};
@@ -4291,7 +6497,7 @@ diff -u -r1.2 eepro100.c
};
/* The parameters for a CmdConfigure operation.
-@@ -554,10 +564,10 @@
+@@ -554,10 +586,10 @@
const char i82558_config_cmd[22] = {
22, 0x08, 0, 1, 0, 0, 0x22, 0x03, 1, /* 1=Use MII 0=Use AUI */
0, 0x2E, 0, 0x60, 0x08, 0x88,
@@ -4304,7 +6510,7 @@ diff -u -r1.2 eepro100.c
static const char *phys[] = {
"None", "i82553-A/B", "i82553-C", "i82503",
"DP83840", "80c240", "80c24", "i82555",
-@@ -566,10 +576,12 @@
+@@ -566,10 +598,12 @@
enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
S80C24, I82555, DP83840A=10, };
static const char is_mii[] = { 0, 1, 1, 0, 1, 1, 0, 1 };
@@ -4318,7 +6524,7 @@ diff -u -r1.2 eepro100.c
static int mdio_write(long ioaddr, int phy_id, int location, int value);
static int speedo_open(struct net_device *dev);
static void speedo_resume(struct net_device *dev);
-@@ -577,15 +589,12 @@
+@@ -577,15 +611,12 @@
static void speedo_init_rx_ring(struct net_device *dev);
static void speedo_tx_timeout(struct net_device *dev);
static int speedo_start_xmit(struct sk_buff *skb, struct net_device *dev);
@@ -4335,7 +6541,7 @@ diff -u -r1.2 eepro100.c
-@@ -599,112 +608,28 @@
+@@ -599,112 +630,28 @@
/* A list of all installed Speedo devices, for removing the driver module. */
static struct net_device *root_speedo_dev = NULL;
@@ -4457,7 +6663,7 @@ diff -u -r1.2 eepro100.c
/* Read the station address EEPROM before doing the reset.
Nominally his should even be done before accepting the device, but
-@@ -712,15 +637,11 @@
+@@ -712,15 +659,11 @@
The size test is for 6 bit vs. 8 bit address serial EEPROMs.
*/
{
@@ -4476,7 +6682,7 @@ diff -u -r1.2 eepro100.c
== 0xffe0000) {
ee_size = 0x100;
read_cmd = EE_READ_CMD << 24;
-@@ -729,8 +650,8 @@
+@@ -729,8 +672,8 @@
read_cmd = EE_READ_CMD << 22;
}
@@ -4487,7 +6693,7 @@ diff -u -r1.2 eepro100.c
eeprom[i] = value;
sum += value;
if (i < 3) {
-@@ -743,45 +664,41 @@
+@@ -743,45 +686,41 @@
"check settings before activating this device!\n",
dev->name, sum);
/* Don't unregister_netdev(dev); as the EEPro may actually be
@@ -4550,7 +6756,7 @@ diff -u -r1.2 eepro100.c
printk(KERN_INFO " Board assembly %4.4x%2.2x-%3.3d, Physical"
" connectors present:",
eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
-@@ -795,24 +712,42 @@
+@@ -795,24 +734,42 @@
phys[(eeprom[7]>>8)&7]);
if (((eeprom[6]>>8) & 0x3f) == DP83840
|| ((eeprom[6]>>8) & 0x3f) == DP83840A) {
@@ -4601,7 +6807,7 @@ diff -u -r1.2 eepro100.c
self_test_results[0] = 0;
self_test_results[1] = -1;
outl(virt_to_bus(self_test_results) | PortSelfTest, ioaddr + SCBPort);
-@@ -840,37 +775,36 @@
+@@ -840,37 +797,36 @@
#endif /* kernel_bloat */
outl(PortReset, ioaddr + SCBPort);
@@ -4654,7 +6860,7 @@ diff -u -r1.2 eepro100.c
sp->phy[0] = eeprom[6];
sp->phy[1] = eeprom[7];
-@@ -882,10 +816,6 @@
+@@ -882,10 +838,6 @@
/* The Speedo-specific entries in the device structure. */
dev->open = &speedo_open;
dev->hard_start_xmit = &speedo_start_xmit;
@@ -4665,7 +6871,7 @@ diff -u -r1.2 eepro100.c
dev->stop = &speedo_close;
dev->get_stats = &speedo_get_stats;
dev->set_multicast_list = &set_rx_mode;
-@@ -893,6 +823,50 @@
+@@ -893,6 +845,50 @@
return dev;
}
@@ -4716,7 +6922,7 @@ diff -u -r1.2 eepro100.c
/* Serial EEPROM section.
A "bit" grungy, but we work our way through bit-by-bit :->. */
-@@ -906,43 +880,48 @@
+@@ -906,43 +902,48 @@
#define EE_WRITE_1 0x4806
#define EE_OFFSET SCBeeprom
@@ -4781,7 +6987,7 @@ diff -u -r1.2 eepro100.c
break;
}
} while (! (val & 0x10000000));
-@@ -971,10 +950,11 @@
+@@ -971,10 +972,11 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
@@ -4796,7 +7002,7 @@ diff -u -r1.2 eepro100.c
/* Set up the Tx queue early.. */
sp->cur_tx = 0;
-@@ -982,19 +962,16 @@
+@@ -982,19 +984,16 @@
sp->last_cmd = 0;
sp->tx_full = 0;
sp->lock = (spinlock_t) SPIN_LOCK_UNLOCKED;
@@ -4823,7 +7029,7 @@ diff -u -r1.2 eepro100.c
int phy_addr = sp->phy[0] & 0x1f ;
/* Use 0x3300 for restarting NWay, other values to force xcvr:
0x0000 10-HD
-@@ -1008,31 +985,31 @@
+@@ -1008,31 +1007,31 @@
mdio_write(ioaddr, phy_addr, 0, 0x3300);
#endif
}
@@ -4869,7 +7075,7 @@ diff -u -r1.2 eepro100.c
/* Set the timer. The timer serves a dual purpose:
1) to monitor the media interface (e.g. link beat) and perhaps switch
-@@ -1040,15 +1017,14 @@
+@@ -1040,15 +1039,14 @@
2) to monitor Rx activity, and restart the Rx process if the receiver
hangs. */
init_timer(&sp->timer);
@@ -4887,7 +7093,7 @@ diff -u -r1.2 eepro100.c
return 0;
}
-@@ -1058,60 +1034,57 @@
+@@ -1058,60 +1056,57 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
@@ -4977,7 +7183,7 @@ diff -u -r1.2 eepro100.c
}
/* Media monitoring and control. */
-@@ -1121,90 +1094,116 @@
+@@ -1121,90 +1116,116 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
int phy_num = sp->phy[0] & 0x1f;
@@ -5134,7 +7340,7 @@ diff -u -r1.2 eepro100.c
}
-@@ -1217,10 +1216,18 @@
+@@ -1217,10 +1238,18 @@
int i;
sp->cur_rx = 0;
@@ -5154,7 +7360,7 @@ diff -u -r1.2 eepro100.c
sp->rx_skbuff[i] = skb;
if (skb == NULL)
break; /* OK. Just initially short of Rx bufs. */
-@@ -1233,9 +1240,13 @@
+@@ -1233,9 +1262,13 @@
last_rxf = rxf;
rxf->status = cpu_to_le32(0x00000001); /* '1' is flag value only. */
rxf->link = 0; /* None yet. */
@@ -5170,7 +7376,7 @@ diff -u -r1.2 eepro100.c
}
sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
/* Mark the last entry as end-of-list. */
-@@ -1243,121 +1254,86 @@
+@@ -1243,121 +1276,86 @@
sp->last_rxf = last_rxf;
}
@@ -5343,7 +7549,7 @@ diff -u -r1.2 eepro100.c
static int
speedo_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
-@@ -1365,154 +1341,82 @@
+@@ -1365,154 +1363,82 @@
long ioaddr = dev->base_addr;
int entry;
@@ -5528,7 +7734,7 @@ diff -u -r1.2 eepro100.c
#ifndef final_version
/* A lock to prevent simultaneous entry on SMP machines. */
if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
-@@ -1521,211 +1425,108 @@
+@@ -1521,211 +1447,108 @@
sp->in_interrupt = 0; /* Avoid halting machine. */
return;
}
@@ -5806,7 +8012,7 @@ diff -u -r1.2 eepro100.c
static int
speedo_rx(struct net_device *dev)
{
-@@ -1733,63 +1534,48 @@
+@@ -1733,63 +1556,48 @@
int entry = sp->cur_rx % RX_RING_SIZE;
int status;
int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
@@ -5881,7 +8087,7 @@ diff -u -r1.2 eepro100.c
/* Pass up the already-filled skbuff. */
skb = sp->rx_skbuff[entry];
if (skb == NULL) {
-@@ -1798,27 +1584,64 @@
+@@ -1798,27 +1606,64 @@
break;
}
sp->rx_skbuff[entry] = NULL;
@@ -5955,7 +8161,7 @@ diff -u -r1.2 eepro100.c
return 0;
}
-@@ -1829,34 +1652,33 @@
+@@ -1829,34 +1674,33 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
int i;
@@ -6002,7 +8208,7 @@ diff -u -r1.2 eepro100.c
}
for (i = 0; i < TX_RING_SIZE; i++) {
-@@ -1866,18 +1688,17 @@
+@@ -1866,18 +1710,17 @@
if (skb)
dev_free_skb(skb);
}
@@ -6030,7 +8236,7 @@ diff -u -r1.2 eepro100.c
MOD_DEC_USE_COUNT;
return 0;
-@@ -1895,8 +1716,7 @@
+@@ -1895,8 +1738,7 @@
Oh, and incoming frames are dropped while executing dump-stats!
*/
@@ -6040,7 +8246,7 @@ diff -u -r1.2 eepro100.c
{
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
-@@ -1915,14 +1735,9 @@
+@@ -1915,14 +1757,9 @@
sp->stats.rx_fifo_errors += le32_to_cpu(sp->lstats.rx_overrun_errs);
sp->stats.rx_length_errors += le32_to_cpu(sp->lstats.rx_runt_errs);
sp->lstats.done_marker = 0x0000;
@@ -6057,7 +8263,7 @@ diff -u -r1.2 eepro100.c
}
}
return &sp->stats;
-@@ -1933,26 +1748,68 @@
+@@ -1933,26 +1770,68 @@
struct speedo_private *sp = (struct speedo_private *)dev->priv;
long ioaddr = dev->base_addr;
u16 *data = (u16 *)&rq->ifr_data;
@@ -6139,7 +8345,7 @@ diff -u -r1.2 eepro100.c
return 0;
default:
return -EOPNOTSUPP;
-@@ -1978,21 +1835,18 @@
+@@ -1978,21 +1857,18 @@
int entry, i;
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
@@ -6169,7 +8375,7 @@ diff -u -r1.2 eepro100.c
return;
}
-@@ -2000,40 +1854,41 @@
+@@ -2000,40 +1876,41 @@
u8 *config_cmd_data;
spin_lock_irqsave(&sp->lock, flags);
@@ -6223,7 +8429,7 @@ diff -u -r1.2 eepro100.c
}
if (new_rx_mode == 0 && dev->mc_count < 4) {
-@@ -2043,14 +1898,16 @@
+@@ -2043,14 +1920,16 @@
u16 *setup_params, *eaddrs;
spin_lock_irqsave(&sp->lock, flags);
@@ -6241,7 +8447,7 @@ diff -u -r1.2 eepro100.c
sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
*setup_params++ = cpu_to_le16(dev->mc_count*6);
-@@ -2063,38 +1920,45 @@
+@@ -2063,38 +1942,45 @@
*setup_params++ = *eaddrs++;
}
@@ -6307,7 +8513,7 @@ diff -u -r1.2 eepro100.c
mc_setup_frm->cmd_status =
cpu_to_le32(CmdSuspend | CmdIntr | CmdMulticastList);
/* Link set below. */
-@@ -2111,81 +1975,125 @@
+@@ -2111,81 +1997,125 @@
/* Disable interrupts while playing with the Tx Cmd list. */
spin_lock_irqsave(&sp->lock, flags);
@@ -6469,7 +8675,7 @@ diff -u -r1.2 eepro100.c
next_dev = sp->next_module;
if (sp->priv_addr)
kfree(sp->priv_addr);
-@@ -2194,25 +2102,30 @@
+@@ -2194,25 +2124,30 @@
}
}
@@ -9950,8 +12156,8 @@ Index: linux/src/drivers/net/intel-gige.c
RCS file: linux/src/drivers/net/intel-gige.c
diff -N linux/src/drivers/net/intel-gige.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
-+++ linux/src/drivers/net/intel-gige.c 20 Aug 2004 10:32:53 -0000
-@@ -0,0 +1,1451 @@
++++ linux/src/drivers/net/intel-gige.c 10 Nov 2005 00:43:19 -0000
+@@ -0,0 +1,1450 @@
+/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
+/*
+ Written 2000-2002 by Donald Becker.
@@ -10412,8 +12618,7 @@ diff -N linux/src/drivers/net/intel-gige.c
+static struct net_device *root_net_dev = NULL;
+
+#ifndef MODULE
-+/* You *must* rename this! */
-+int skel_netdev_probe(struct net_device *dev)
++int igige_probe(struct net_device *dev)
+{
+ if (pci_drv_register(&igige_drv_id, dev) < 0)
+ return -ENODEV;
@@ -15604,7 +17809,7 @@ Index: linux/src/drivers/net/ns820.c
RCS file: linux/src/drivers/net/ns820.c
diff -N linux/src/drivers/net/ns820.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
-+++ linux/src/drivers/net/ns820.c 20 Aug 2004 10:32:54 -0000
++++ linux/src/drivers/net/ns820.c 10 Nov 2005 00:43:21 -0000
@@ -0,0 +1,1547 @@
+/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
+/*
@@ -15897,7 +18102,7 @@ diff -N linux/src/drivers/net/ns820.c
+ {0,}, /* 0 terminated list. */
+};
+
-+struct drv_id_info natsemi_drv_id = {
++struct drv_id_info ns820_drv_id = {
+ "ns820", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
+ ns820_probe1, power_event };
+
@@ -16044,7 +18249,7 @@ diff -N linux/src/drivers/net/ns820.c
+#ifndef MODULE
+int ns820_probe(struct net_device *dev)
+{
-+ if (pci_drv_register(&natsemi_drv_id, dev) < 0)
++ if (pci_drv_register(&ns820_drv_id, dev) < 0)
+ return -ENODEV;
+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
+ return 0;
@@ -17114,7 +19319,7 @@ diff -N linux/src/drivers/net/ns820.c
+ register_driver(&etherdev_ops);
+ return 0;
+#else
-+ return pci_drv_register(&natsemi_drv_id, NULL);
++ return pci_drv_register(&ns820_drv_id, NULL);
+#endif
+}
+
@@ -17125,7 +19330,7 @@ diff -N linux/src/drivers/net/ns820.c
+#ifdef CARDBUS
+ unregister_driver(&etherdev_ops);
+#else
-+ pci_drv_unregister(&natsemi_drv_id);
++ pci_drv_unregister(&ns820_drv_id);
+#endif
+
+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
@@ -34681,3011 +36886,6 @@ diff -u -r1.2 yellowfin.c
* c-indent-level: 4
* c-basic-offset: 4
* tab-width: 4
-diff -Naur linux/dev/drivers/net/intel-gige.c linux/dev/drivers/net/intel-gige.c
---- linux/dev/drivers/net/intel-gige.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux/dev/drivers/net/intel-gige.c 2004-10-25 06:20:37.000000000 +0200
-@@ -0,0 +1,1450 @@
-+/* intel-gige.c: A Linux device driver for Intel Gigabit Ethernet adapters. */
-+/*
-+ Written 2000-2002 by Donald Becker.
-+ Copyright Scyld Computing Corporation.
-+
-+ This software may be used and distributed according to the terms of
-+ the GNU General Public License (GPL), incorporated herein by reference.
-+ You should have received a copy of the GPL with this file.
-+ Drivers based on or derived from this code fall under the GPL and must
-+ retain the authorship, copyright and license notice. This file is not
-+ a complete program and may only be used when the entire operating
-+ system is licensed under the GPL.
-+
-+ The author may be reached as becker@scyld.com, or C/O
-+ Scyld Computing Corporation
-+ 410 Severn Ave., Suite 210
-+ Annapolis MD 21403
-+
-+ Support information and updates available at
-+ http://www.scyld.com/network/ethernet.html
-+*/
-+
-+/* These identify the driver base version and may not be removed. */
-+static const char version1[] =
-+"intel-gige.c:v0.14 11/17/2002 Written by Donald Becker <becker@scyld.com>\n";
-+static const char version2[] =
-+" http://www.scyld.com/network/ethernet.html\n";
-+
-+/* Automatically extracted configuration info:
-+probe-func: igige_probe
-+config-in: tristate 'Intel PCI Gigabit Ethernet support' CONFIG_IGIGE
-+
-+c-help-name: Intel PCI Gigabit Ethernet support
-+c-help-symbol: CONFIG_IGIGE
-+c-help: This driver is for the Intel PCI Gigabit Ethernet
-+c-help: adapter series.
-+c-help: More specific information and updates are available from
-+c-help: http://www.scyld.com/network/drivers.html
-+*/
-+
-+/* The user-configurable values.
-+ These may be modified when a driver module is loaded.*/
-+
-+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
-+static int debug = 2;
-+
-+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-+static int max_interrupt_work = 20;
-+
-+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
-+ This chip has a 16 element perfect filter, and an unusual 4096 bit
-+ hash filter based directly on address bits, not the Ethernet CRC.
-+ It is costly to recalculate a large, frequently changing table.
-+ However even a large table may useful in some nearly-static environments.
-+*/
-+static int multicast_filter_limit = 15;
-+
-+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
-+ Setting to > 1518 effectively disables this feature. */
-+static int rx_copybreak = 0;
-+
-+/* Used to pass the media type, etc.
-+ The media type is passed in 'options[]'. The full_duplex[] table only
-+ allows the duplex to be forced on, implicitly disabling autonegotiation.
-+ Setting the entry to zero still allows a link to autonegotiate to full
-+ duplex.
-+*/
-+#define MAX_UNITS 8 /* More are supported, limit only on options */
-+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-+
-+/* The delay before announcing a Rx or Tx has completed. */
-+static int rx_intr_holdoff = 0;
-+static int tx_intr_holdoff = 128;
-+
-+/* Operational parameters that are set at compile time. */
-+
-+/* Keep the ring sizes a power of two to avoid divides.
-+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
-+ Making the Tx ring too large decreases the effectiveness of channel
-+ bonding and packet priority.
-+ There are no ill effects from too-large receive rings. */
-+#if ! defined(final_version) /* Stress the driver. */
-+#define TX_RING_SIZE 8
-+#define TX_QUEUE_LEN 5
-+#define RX_RING_SIZE 4
-+#else
-+#define TX_RING_SIZE 16
-+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
-+#define RX_RING_SIZE 32
-+#endif
-+
-+/* Operational parameters that usually are not changed. */
-+/* Time in jiffies before concluding the transmitter is hung. */
-+#define TX_TIMEOUT (6*HZ)
-+
-+/* Allocation size of Rx buffers with normal sized Ethernet frames.
-+ Do not change this value without good reason. This is not a limit,
-+ but a way to keep a consistent allocation size among drivers.
-+ */
-+#define PKT_BUF_SZ 1536
-+
-+#ifndef __KERNEL__
-+#define __KERNEL__
-+#endif
-+#if !defined(__OPTIMIZE__)
-+#warning You must compile this file with the correct options!
-+#warning See the last lines of the source file.
-+#error You must compile this driver with "-O".
-+#endif
-+
-+/* Include files, designed to support most kernel versions 2.0.0 and later. */
-+#include <linux/config.h>
-+#if defined(CONFIG_SMP) && ! defined(__SMP__)
-+#define __SMP__
-+#endif
-+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+
-+#include <linux/version.h>
-+#if defined(MODVERSIONS)
-+#include <linux/modversions.h>
-+#endif
-+#include <linux/module.h>
-+
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/timer.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#if LINUX_VERSION_CODE >= 0x20400
-+#include <linux/slab.h>
-+#else
-+#include <linux/malloc.h>
-+#endif
-+#include <linux/interrupt.h>
-+#include <linux/pci.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <asm/processor.h> /* Processor type for cache alignment. */
-+#include <asm/bitops.h>
-+#include <asm/io.h>
-+
-+#ifdef INLINE_PCISCAN
-+#include "k_compat.h"
-+#else
-+#include "pci-scan.h"
-+#include "kern_compat.h"
-+#endif
-+
-+/* Condensed operations for readability. */
-+#define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
-+#define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
-+
-+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
-+char kernel_version[] = UTS_RELEASE;
-+#endif
-+
-+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-+MODULE_DESCRIPTION("Intel Gigabit Ethernet driver");
-+MODULE_LICENSE("GPL");
-+MODULE_PARM(debug, "i");
-+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
-+MODULE_PARM(rx_copybreak, "i");
-+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
-+MODULE_PARM(multicast_filter_limit, "i");
-+MODULE_PARM(max_interrupt_work, "i");
-+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
-+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
-+MODULE_PARM_DESC(max_interrupt_work,
-+ "Driver maximum events handled per interrupt");
-+MODULE_PARM_DESC(full_duplex,
-+ "Non-zero to set forced full duplex (deprecated).");
-+MODULE_PARM_DESC(rx_copybreak,
-+ "Breakpoint in bytes for copy-only-tiny-frames");
-+MODULE_PARM_DESC(multicast_filter_limit,
-+ "Multicast addresses before switching to Rx-all-multicast");
-+
-+/*
-+ Theory of Operation
-+
-+I. Board Compatibility
-+
-+This driver is for the Intel Gigabit Ethernet adapter.
-+
-+II. Board-specific settings
-+
-+III. Driver operation
-+
-+IIIa. Descriptor Rings
-+
-+This driver uses two statically allocated fixed-size descriptor arrays
-+treated as rings by the hardware. The ring sizes are set at compile time
-+by RX/TX_RING_SIZE.
-+
-+IIIb/c. Transmit/Receive Structure
-+
-+This driver uses a zero-copy receive and transmit scheme.
-+The driver allocates full frame size skbuffs for the Rx ring buffers at
-+open() time and passes the skb->data field to the chip as receive data
-+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
-+a fresh skbuff is allocated and the frame is copied to the new skbuff.
-+When the incoming frame is larger, the skbuff is passed directly up the
-+protocol stack. Buffers consumed this way are replaced by newly allocated
-+skbuffs in a later phase of receives.
-+
-+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
-+using a full-sized skbuff for small frames vs. the copying costs of larger
-+frames. New boards are typically used in generously configured machines
-+and the underfilled buffers have negligible impact compared to the benefit of
-+a single allocation size, so the default value of zero results in never
-+copying packets. When copying is done, the cost is usually mitigated by using
-+a combined copy/checksum routine. Copying also preloads the cache, which is
-+most useful with small frames.
-+
-+A subtle aspect of the operation is that the IP header at offset 14 in an
-+ethernet frame isn't longword aligned for further processing.
-+When unaligned buffers are permitted by the hardware (and always on copies)
-+frames are put into the skbuff at an offset of "+2", 16-byte aligning
-+the IP header.
-+
-+IIId. Synchronization
-+
-+The driver runs as two independent, single-threaded flows of control.
-+One is the send-packet routine which is single-threaded by the queue
-+layer. The other thread is the interrupt handler, which is single
-+threaded by the hardware and interrupt handling software.
-+
-+The send packet thread has partial control over the Tx ring. At the
-+start of a transmit attempt netif_pause_tx_queue(dev) is called. If the
-+transmit attempt fills the Tx queue controlled by the chip, the driver
-+informs the software queue layer by not calling
-+netif_unpause_tx_queue(dev) on exit.
-+
-+The interrupt handler has exclusive control over the Rx ring and records stats
-+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
-+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
-+clears both the tx_full and tbusy flags.
-+
-+IIId. SMP semantics
-+
-+The following are serialized with respect to each other via the "xmit_lock".
-+ dev->hard_start_xmit() Transmit a packet
-+ dev->tx_timeout() Transmit watchdog for stuck Tx
-+ dev->set_multicast_list() Set the recieve filter.
-+Note: The Tx timeout watchdog code is implemented by the timer routine in
-+kernels up to 2.2.*. In 2.4.* and later the timeout code is part of the
-+driver interface.
-+
-+The following fall under the global kernel lock. The module will not be
-+unloaded during the call, unless a call with a potential reschedule e.g.
-+kmalloc() is called. No other synchronization assertion is made.
-+ dev->open()
-+ dev->do_ioctl()
-+ dev->get_stats()
-+Caution: The lock for dev->open() is commonly broken with request_irq() or
-+kmalloc(). It is best to avoid any lock-breaking call in do_ioctl() and
-+get_stats(), or additional module locking code must be implemented.
-+
-+The following is self-serialized (no simultaneous entry)
-+ An handler registered with request_irq().
-+
-+IV. Notes
-+
-+IVb. References
-+
-+Intel has also released a Linux driver for this product, "e1000".
-+
-+IVc. Errata
-+
-+*/
-+
-+
-+
-+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
-+ long ioaddr, int irq, int chip_idx, int find_cnt);
-+static int netdev_pwr_event(void *dev_instance, int event);
-+enum chip_capability_flags { CanHaveMII=1, };
-+#define PCI_IOTYPE ()
-+
-+static struct pci_id_info pci_id_tbl[] = {
-+ {"Intel Gigabit Ethernet adapter", {0x10008086, 0xffffffff, },
-+ PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR0, 0x1ffff, 0},
-+ {0,}, /* 0 terminated list. */
-+};
-+
-+struct drv_id_info igige_drv_id = {
-+ "intel-gige", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
-+ igige_probe1, netdev_pwr_event };
-+
-+/* This hardware only has a PCI memory space BAR, not I/O space. */
-+#ifdef USE_IO_OPS
-+#error This driver only works with PCI memory space access.
-+#endif
-+
-+/* Offsets to the device registers.
-+*/
-+enum register_offsets {
-+ ChipCtrl=0x00, ChipStatus=0x08, EECtrl=0x10,
-+ FlowCtrlAddrLo=0x028, FlowCtrlAddrHi=0x02c, FlowCtrlType=0x030,
-+ VLANetherType=0x38,
-+
-+ RxAddrCAM=0x040,
-+ IntrStatus=0x0C0, /* Interrupt, Clear on Read, AKA ICR */
-+ IntrEnable=0x0D0, /* Set enable mask when '1' AKA IMS */
-+ IntrDisable=0x0D8, /* Clear enable mask when '1' */
-+
-+ RxControl=0x100,
-+ RxQ0IntrDelay=0x108, /* Rx list #0 interrupt delay timer. */
-+ RxRingPtr=0x110, /* Rx Desc. list #0 base address, 64bits */
-+ RxRingLen=0x118, /* Num bytes of Rx descriptors in ring. */
-+ RxDescHead=0x120,
-+ RxDescTail=0x128,
-+
-+ RxQ1IntrDelay=0x130, /* Rx list #1 interrupt delay timer. */
-+ RxRing1Ptr=0x138, /* Rx Desc. list #1 base address, 64bits */
-+ RxRing1Len=0x140, /* Num bytes of Rx descriptors in ring. */
-+ RxDesc1Head=0x148,
-+ RxDesc1Tail=0x150,
-+
-+ FlowCtrlTimer=0x170, FlowCtrlThrshHi=0x160, FlowCtrlThrshLo=0x168,
-+ TxConfigReg=0x178,
-+ RxConfigReg=0x180,
-+ MulticastArray=0x200,
-+
-+ TxControl=0x400,
-+ TxQState=0x408, /* 64 bit queue state */
-+ TxIPG=0x410, /* Inter-Packet Gap */
-+ TxRingPtr=0x420, TxRingLen=0x428,
-+ TxDescHead=0x430, TxDescTail=0x438, TxIntrDelay=0x440,
-+
-+ RxCRCErrs=0x4000, RxMissed=0x4010,
-+
-+ TxStatus=0x408,
-+ RxStatus=0x180,
-+};
-+
-+/* Bits in the interrupt status/mask registers. */
-+enum intr_status_bits {
-+ IntrTxDone=0x0001, /* Tx packet queued */
-+ IntrLinkChange=0x0004, /* Link Status Change */
-+ IntrRxSErr=0x0008, /* Rx Symbol/Sequence error */
-+ IntrRxEmpty=0x0010, /* Rx queue 0 Empty */
-+ IntrRxQ1Empty=0x0020, /* Rx queue 1 Empty */
-+ IntrRxDone=0x0080, /* Rx Done, Queue 0*/
-+ IntrRxDoneQ1=0x0100, /* Rx Done, Queue 0*/
-+ IntrPCIErr=0x0200, /* PCI Bus Error */
-+
-+ IntrTxEmpty=0x0002, /* Guess */
-+ StatsMax=0x1000, /* Unknown */
-+};
-+
-+/* Bits in the RxFilterMode register. */
-+enum rx_mode_bits {
-+ RxCtrlReset=0x01, RxCtrlEnable=0x02, RxCtrlAllUnicast=0x08,
-+ RxCtrlAllMulticast=0x10,
-+ RxCtrlLoopback=0xC0, /* We never configure loopback */
-+ RxCtrlAcceptBroadcast=0x8000,
-+ /* Aliased names.*/
-+ AcceptAllPhys=0x08, AcceptAllMulticast=0x10, AcceptBroadcast=0x8000,
-+ AcceptMyPhys=0,
-+ AcceptMulticast=0,
-+};
-+
-+/* The Rx and Tx buffer descriptors. */
-+struct rx_desc {
-+ u32 buf_addr;
-+ u32 buf_addr_hi;
-+ u32 csum_length; /* Checksum and length */
-+ u32 status; /* Errors and status. */
-+};
-+
-+struct tx_desc {
-+ u32 buf_addr;
-+ u32 buf_addr_hi;
-+ u32 cmd_length;
-+ u32 status; /* And errors */
-+};
-+
-+/* Bits in tx_desc.cmd_length */
-+enum tx_cmd_bits {
-+ TxDescEndPacket=0x02000000, TxCmdIntrDelay=0x80000000,
-+ TxCmdAddCRC=0x02000000, TxCmdDoTx=0x13000000,
-+};
-+enum tx_status_bits {
-+ TxDescDone=0x0001, TxDescEndPkt=0x0002,
-+};
-+
-+/* Bits in tx_desc.status */
-+enum rx_status_bits {
-+ RxDescDone=0x0001, RxDescEndPkt=0x0002,
-+};
-+
-+
-+#define PRIV_ALIGN 15 /* Required alignment mask */
-+/* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
-+ within the structure. */
-+struct netdev_private {
-+ struct net_device *next_module; /* Link for devices of this type. */
-+ void *priv_addr; /* Unaligned address for kfree */
-+ const char *product_name;
-+ /* The addresses of receive-in-place skbuffs. */
-+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
-+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
-+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
-+ struct net_device_stats stats;
-+ struct timer_list timer; /* Media monitoring timer. */
-+ /* Keep frequently used values adjacent for cache effect. */
-+ int msg_level;
-+ int chip_id, drv_flags;
-+ struct pci_dev *pci_dev;
-+ int max_interrupt_work;
-+ int intr_enable;
-+ long in_interrupt; /* Word-long for SMP locks. */
-+
-+ struct rx_desc *rx_ring;
-+ struct rx_desc *rx_head_desc;
-+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
-+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
-+ int rx_copybreak;
-+
-+ struct tx_desc *tx_ring;
-+ unsigned int cur_tx, dirty_tx;
-+ unsigned int tx_full:1; /* The Tx queue is full. */
-+
-+ unsigned int rx_mode;
-+ unsigned int tx_config;
-+ int multicast_filter_limit;
-+ /* These values track the transceiver/media in use. */
-+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
-+ unsigned int duplex_lock:1;
-+ unsigned int medialock:1; /* Do not sense media. */
-+ unsigned int default_port; /* Last dev->if_port value. */
-+};
-+
-+static int eeprom_read(long ioaddr, int location);
-+static int netdev_open(struct net_device *dev);
-+static int change_mtu(struct net_device *dev, int new_mtu);
-+static void check_duplex(struct net_device *dev);
-+static void netdev_timer(unsigned long data);
-+static void tx_timeout(struct net_device *dev);
-+static void init_ring(struct net_device *dev);
-+static int start_tx(struct sk_buff *skb, struct net_device *dev);
-+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
-+static void netdev_error(struct net_device *dev, int intr_status);
-+static int netdev_rx(struct net_device *dev);
-+static void netdev_error(struct net_device *dev, int intr_status);
-+static void set_rx_mode(struct net_device *dev);
-+static struct net_device_stats *get_stats(struct net_device *dev);
-+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-+static int netdev_close(struct net_device *dev);
-+
-+
-+
-+/* A list of our installed devices, for removing the driver module. */
-+static struct net_device *root_net_dev = NULL;
-+
-+#ifndef MODULE
-+int igige_probe(struct net_device *dev)
-+{
-+ if (pci_drv_register(&igige_drv_id, dev) < 0)
-+ return -ENODEV;
-+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
-+ return 0;
-+}
-+#endif
-+
-+static void *igige_probe1(struct pci_dev *pdev, void *init_dev,
-+ long ioaddr, int irq, int chip_idx, int card_idx)
-+{
-+ struct net_device *dev;
-+ struct netdev_private *np;
-+ void *priv_mem;
-+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-+
-+ dev = init_etherdev(init_dev, 0);
-+ if (!dev)
-+ return NULL;
-+
-+ printk(KERN_INFO "%s: %s at 0x%lx, ",
-+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
-+
-+ for (i = 0; i < 3; i++)
-+ ((u16*)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, i));
-+ for (i = 0; i < 5; i++)
-+ printk("%2.2x:", dev->dev_addr[i]);
-+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
-+
-+ /* Make certain elements e.g. descriptor lists are aligned. */
-+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
-+ /* Check for the very unlikely case of no memory. */
-+ if (priv_mem == NULL)
-+ return NULL;
-+
-+ /* Do bogusness checks before this point.
-+ We do a request_region() only to register /proc/ioports info. */
-+ request_region(ioaddr, pci_id_tbl[chip_idx].io_size, dev->name);
-+
-+ /* Reset the chip to erase previous misconfiguration. */
-+ writel(0x04000000, ioaddr + ChipCtrl);
-+
-+ dev->base_addr = ioaddr;
-+ dev->irq = irq;
-+
-+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
-+ memset(np, 0, sizeof(*np));
-+ np->priv_addr = priv_mem;
-+
-+ np->next_module = root_net_dev;
-+ root_net_dev = dev;
-+
-+ np->pci_dev = pdev;
-+ np->chip_id = chip_idx;
-+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
-+ np->msg_level = (1 << debug) - 1;
-+ np->rx_copybreak = rx_copybreak;
-+ np->max_interrupt_work = max_interrupt_work;
-+ np->multicast_filter_limit = multicast_filter_limit;
-+
-+ if (dev->mem_start)
-+ option = dev->mem_start;
-+
-+ /* The lower four bits are the media type. */
-+ if (option > 0) {
-+ if (option & 0x2220)
-+ np->full_duplex = 1;
-+ np->default_port = option & 0x3330;
-+ if (np->default_port)
-+ np->medialock = 1;
-+ }
-+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-+ np->full_duplex = 1;
-+
-+ if (np->full_duplex)
-+ np->duplex_lock = 1;
-+
-+#if ! defined(final_version) /* Dump the EEPROM contents during development. */
-+ if (np->msg_level & NETIF_MSG_MISC) {
-+ int sum = 0;
-+ for (i = 0; i < 0x40; i++) {
-+ int eeval = eeprom_read(ioaddr, i);
-+ printk("%4.4x%s", eeval, i % 16 != 15 ? " " : "\n");
-+ sum += eeval;
-+ }
-+ printk(KERN_DEBUG "%s: EEPROM checksum %4.4X (expected value 0xBABA).\n",
-+ dev->name, sum & 0xffff);
-+ }
-+#endif
-+
-+ /* The chip-specific entries in the device structure. */
-+ dev->open = &netdev_open;
-+ dev->hard_start_xmit = &start_tx;
-+ dev->stop = &netdev_close;
-+ dev->get_stats = &get_stats;
-+ dev->set_multicast_list = &set_rx_mode;
-+ dev->do_ioctl = &mii_ioctl;
-+ dev->change_mtu = &change_mtu;
-+
-+ /* Turn off VLAN and clear the VLAN filter. */
-+ writel(0x04000000, ioaddr + VLANetherType);
-+ for (i = 0x600; i < 0x800; i+=4)
-+ writel(0, ioaddr + i);
-+ np->tx_config = 0x80000020;
-+ writel(np->tx_config, ioaddr + TxConfigReg);
-+ {
-+ int eeword10 = eeprom_read(ioaddr, 10);
-+ writel(((eeword10 & 0x01e0) << 17) | ((eeword10 & 0x0010) << 3),
-+ ioaddr + ChipCtrl);
-+ }
-+
-+ return dev;
-+}
-+
-+
-+/* Read the EEPROM interface with a serial bit streams generated by the
-+ host processor.
-+ The example below is for the common 93c46 EEPROM, 64 16 bit words. */
-+
-+/* Delay between EEPROM clock transitions.
-+ The effectivly flushes the write cache to prevent quick double-writes.
-+*/
-+#define eeprom_delay(ee_addr) readl(ee_addr)
-+
-+enum EEPROM_Ctrl_Bits {
-+ EE_ShiftClk=0x01, EE_ChipSelect=0x02, EE_DataIn=0x08, EE_DataOut=0x04,
-+};
-+#define EE_Write0 (EE_ChipSelect)
-+#define EE_Write1 (EE_ChipSelect | EE_DataOut)
-+
-+/* The EEPROM commands include the alway-set leading bit. */
-+enum EEPROM_Cmds { EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7, };
-+
-+static int eeprom_read(long addr, int location)
-+{
-+ int i;
-+ int retval = 0;
-+ long ee_addr = addr + EECtrl;
-+ int read_cmd = ((EE_ReadCmd<<6) | location) << 16 ;
-+ int cmd_len = 2+6+16;
-+ u32 baseval = readl(ee_addr) & ~0x0f;
-+
-+ writel(EE_Write0 | baseval, ee_addr);
-+
-+ /* Shift the read command bits out. */
-+ for (i = cmd_len; i >= 0; i--) {
-+ int dataval = baseval |
-+ ((read_cmd & (1 << i)) ? EE_Write1 : EE_Write0);
-+ writel(dataval, ee_addr);
-+ eeprom_delay(ee_addr);
-+ writel(dataval | EE_ShiftClk, ee_addr);
-+ eeprom_delay(ee_addr);
-+ retval = (retval << 1) | ((readl(ee_addr) & EE_DataIn) ? 1 : 0);
-+ }
-+
-+ /* Terminate the EEPROM access. */
-+ writel(baseval | EE_Write0, ee_addr);
-+ writel(baseval & ~EE_ChipSelect, ee_addr);
-+ return retval;
-+}
-+
-+
-+
-+static int netdev_open(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ /* Some chips may need to be reset. */
-+
-+ MOD_INC_USE_COUNT;
-+
-+ if (np->tx_ring == 0)
-+ np->tx_ring = (void *)get_free_page(GFP_KERNEL);
-+ if (np->tx_ring == 0)
-+ return -ENOMEM;
-+ if (np->rx_ring == 0)
-+ np->rx_ring = (void *)get_free_page(GFP_KERNEL);
-+ if (np->tx_ring == 0) {
-+ free_page((long)np->tx_ring);
-+ return -ENOMEM;
-+ }
-+
-+ /* Note that both request_irq() and init_ring() call kmalloc(), which
-+ break the global kernel lock protecting this routine. */
-+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
-+ MOD_DEC_USE_COUNT;
-+ return -EAGAIN;
-+ }
-+
-+ if (np->msg_level & NETIF_MSG_IFUP)
-+ printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
-+ dev->name, dev->irq);
-+
-+ init_ring(dev);
-+
-+ writel(0, ioaddr + RxControl);
-+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
-+#if ADDRLEN == 64
-+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtr + 4);
-+#else
-+ writel(0, ioaddr + RxRingPtr + 4);
-+#endif
-+
-+ writel(RX_RING_SIZE * sizeof(struct rx_desc), ioaddr + RxRingLen);
-+ writel(0x80000000 | rx_intr_holdoff, ioaddr + RxQ0IntrDelay);
-+ writel(0, ioaddr + RxDescHead);
-+ writel(np->dirty_rx + RX_RING_SIZE, ioaddr + RxDescTail);
-+
-+ /* Zero the unused Rx ring #1. */
-+ writel(0, ioaddr + RxQ1IntrDelay);
-+ writel(0, ioaddr + RxRing1Ptr);
-+ writel(0, ioaddr + RxRing1Ptr + 4);
-+ writel(0, ioaddr + RxRing1Len);
-+ writel(0, ioaddr + RxDesc1Head);
-+ writel(0, ioaddr + RxDesc1Tail);
-+
-+ /* Use 0x002000FA for half duplex. */
-+ writel(0x000400FA, ioaddr + TxControl);
-+
-+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
-+#if ADDRLEN == 64
-+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtr + 4);
-+#else
-+ writel(0, ioaddr + TxRingPtr + 4);
-+#endif
-+
-+ writel(TX_RING_SIZE * sizeof(struct tx_desc), ioaddr + TxRingLen);
-+ writel(0, ioaddr + TxDescHead);
-+ writel(0, ioaddr + TxDescTail);
-+ writel(0, ioaddr + TxQState);
-+ writel(0, ioaddr + TxQState + 4);
-+
-+ /* Set IPG register with Ethernet standard values. */
-+ writel(0x00A0080A, ioaddr + TxIPG);
-+ /* The delay before announcing a Tx has completed. */
-+ writel(tx_intr_holdoff, ioaddr + TxIntrDelay);
-+
-+ writel(((u32*)dev->dev_addr)[0], ioaddr + RxAddrCAM);
-+ writel(0x80000000 | ((((u32*)dev->dev_addr)[1]) & 0xffff),
-+ ioaddr + RxAddrCAM + 4);
-+
-+ /* Initialize other registers. */
-+ /* Configure the PCI bus bursts and FIFO thresholds. */
-+
-+ if (dev->if_port == 0)
-+ dev->if_port = np->default_port;
-+
-+ np->in_interrupt = 0;
-+
-+ np->rx_mode = RxCtrlEnable;
-+ set_rx_mode(dev);
-+
-+ /* Tx mode */
-+ np->tx_config = 0x80000020;
-+ writel(np->tx_config, ioaddr + TxConfigReg);
-+
-+ /* Flow control */
-+ writel(0x00C28001, ioaddr + FlowCtrlAddrLo);
-+ writel(0x00000100, ioaddr + FlowCtrlAddrHi);
-+ writel(0x8808, ioaddr + FlowCtrlType);
-+ writel(0x0100, ioaddr + FlowCtrlTimer);
-+ writel(0x8000, ioaddr + FlowCtrlThrshHi);
-+ writel(0x4000, ioaddr + FlowCtrlThrshLo);
-+
-+ netif_start_tx_queue(dev);
-+
-+ /* Enable interrupts by setting the interrupt mask. */
-+ writel(IntrTxDone | IntrLinkChange | IntrRxDone | IntrPCIErr
-+ | IntrRxEmpty | IntrRxSErr, ioaddr + IntrEnable);
-+
-+ /* writel(1, dev->base_addr + RxCmd);*/
-+
-+ if (np->msg_level & NETIF_MSG_IFUP)
-+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x Rx %x Tx %x.\n",
-+ dev->name, (int)readl(ioaddr + ChipStatus),
-+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + TxStatus));
-+
-+ /* Set the timer to check for link beat. */
-+ init_timer(&np->timer);
-+ np->timer.expires = jiffies + 3*HZ;
-+ np->timer.data = (unsigned long)dev;
-+ np->timer.function = &netdev_timer; /* timer handler */
-+ add_timer(&np->timer);
-+
-+ return 0;
-+}
-+
-+/* Update for jumbo frames...
-+ Changing the MTU while active is not allowed.
-+ */
-+static int change_mtu(struct net_device *dev, int new_mtu)
-+{
-+ if ((new_mtu < 68) || (new_mtu > 1500))
-+ return -EINVAL;
-+ if (netif_running(dev))
-+ return -EBUSY;
-+ dev->mtu = new_mtu;
-+ return 0;
-+}
-+
-+static void check_duplex(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+ int chip_ctrl = readl(ioaddr + ChipCtrl);
-+ int rx_cfg = readl(ioaddr + RxConfigReg);
-+ int tx_cfg = readl(ioaddr + TxConfigReg);
-+#if 0
-+ int chip_status = readl(ioaddr + ChipStatus);
-+#endif
-+
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_DEBUG "%s: Link changed status. Ctrl %x rxcfg %8.8x "
-+ "txcfg %8.8x.\n",
-+ dev->name, chip_ctrl, rx_cfg, tx_cfg);
-+ if (np->medialock) {
-+ if (np->full_duplex)
-+ ;
-+ }
-+ /* writew(new_tx_mode, ioaddr + TxMode); */
-+}
-+
-+static void netdev_timer(unsigned long data)
-+{
-+ struct net_device *dev = (struct net_device *)data;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+ int next_tick = 10*HZ;
-+
-+ if (np->msg_level & NETIF_MSG_TIMER) {
-+ printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x, "
-+ "Tx %x Rx %x.\n",
-+ dev->name, (int)readl(ioaddr + ChipStatus),
-+ (int)readl(ioaddr + TxStatus), (int)readl(ioaddr + RxStatus));
-+ }
-+ /* This will either have a small false-trigger window or will not catch
-+ tbusy incorrectly set when the queue is empty. */
-+ if ((jiffies - dev->trans_start) > TX_TIMEOUT &&
-+ (np->cur_tx - np->dirty_tx > 0 ||
-+ netif_queue_paused(dev)) ) {
-+ tx_timeout(dev);
-+ }
-+ check_duplex(dev);
-+ np->timer.expires = jiffies + next_tick;
-+ add_timer(&np->timer);
-+}
-+
-+static void tx_timeout(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
-+ " resetting...\n", dev->name, (int)readl(ioaddr + ChipStatus));
-+
-+#ifndef __alpha__
-+ if (np->msg_level & NETIF_MSG_TX_ERR) {
-+ int i;
-+ printk(KERN_DEBUG " Tx registers: ");
-+ for (i = 0x400; i < 0x444; i += 8)
-+ printk(" %8.8x", (int)readl(ioaddr + i));
-+ printk("\n"KERN_DEBUG " Rx ring %p: ", np->rx_ring);
-+ for (i = 0; i < RX_RING_SIZE; i++)
-+ printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
-+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
-+ for (i = 0; i < TX_RING_SIZE; i++)
-+ printk(" %4.4x", np->tx_ring[i].status);
-+ printk("\n");
-+ }
-+#endif
-+
-+ /* Perhaps we should reinitialize the hardware here. */
-+ dev->if_port = 0;
-+ /* Stop and restart the chip's Tx processes . */
-+
-+ /* Trigger an immediate transmit demand. */
-+
-+ dev->trans_start = jiffies;
-+ np->stats.tx_errors++;
-+ return;
-+}
-+
-+
-+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-+static void init_ring(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int i;
-+
-+ np->tx_full = 0;
-+ np->cur_rx = np->cur_tx = 0;
-+ np->dirty_rx = np->dirty_tx = 0;
-+
-+ np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
-+ np->rx_head_desc = &np->rx_ring[0];
-+
-+ /* Initialize all Rx descriptors. */
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ np->rx_skbuff[i] = 0;
-+ }
-+
-+ /* The number of ring descriptors is set by the ring length register,
-+ thus the chip does not use 'next_desc' chains. */
-+
-+ /* Fill in the Rx buffers. Allocation failures are acceptable. */
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
-+ np->rx_skbuff[i] = skb;
-+ if (skb == NULL)
-+ break;
-+ skb->dev = dev; /* Mark as being used by this device. */
-+ skb_reserve(skb, 2); /* 16 byte align the IP header. */
-+ np->rx_ring[i].buf_addr = virt_to_le32desc(skb->tail);
-+ np->rx_ring[i].buf_addr_hi = 0;
-+ np->rx_ring[i].status = 0;
-+ }
-+ np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
-+
-+ for (i = 0; i < TX_RING_SIZE; i++) {
-+ np->tx_skbuff[i] = 0;
-+ np->tx_ring[i].status = 0;
-+ }
-+ return;
-+}
-+
-+static int start_tx(struct sk_buff *skb, struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ unsigned entry;
-+
-+ /* Block a timer-based transmit from overlapping. This happens when
-+ packets are presumed lost, and we use this check the Tx status. */
-+ if (netif_pause_tx_queue(dev) != 0) {
-+ /* This watchdog code is redundant with the media monitor timer. */
-+ if (jiffies - dev->trans_start > TX_TIMEOUT)
-+ tx_timeout(dev);
-+ return 1;
-+ }
-+
-+ /* Calculate the next Tx descriptor entry. */
-+ entry = np->cur_tx % TX_RING_SIZE;
-+
-+ np->tx_skbuff[entry] = skb;
-+
-+ /* Note: Descriptors may be uncached. Write each field only once. */
-+ np->tx_ring[entry].buf_addr = virt_to_le32desc(skb->data);
-+ np->tx_ring[entry].buf_addr_hi = 0;
-+ np->tx_ring[entry].cmd_length = cpu_to_le32(TxCmdDoTx | skb->len);
-+ np->tx_ring[entry].status = 0;
-+
-+ /* Non-CC architectures: explicitly flush descriptor and packet.
-+ cache_flush(np->tx_ring[entry], sizeof np->tx_ring[entry]);
-+ cache_flush(skb->data, skb->len);
-+ */
-+
-+ np->cur_tx++;
-+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
-+ np->tx_full = 1;
-+ /* Check for a just-cleared queue. */
-+ if (np->cur_tx - (volatile int)np->dirty_tx < TX_QUEUE_LEN - 2) {
-+ netif_unpause_tx_queue(dev);
-+ np->tx_full = 0;
-+ } else
-+ netif_stop_tx_queue(dev);
-+ } else
-+ netif_unpause_tx_queue(dev); /* Typical path */
-+
-+ /* Inform the chip we have another Tx. */
-+ if (np->msg_level & NETIF_MSG_TX_QUEUED)
-+ printk(KERN_DEBUG "%s: Tx queued to slot %d, desc tail now %d "
-+ "writing %d.\n",
-+ dev->name, entry, (int)readl(dev->base_addr + TxDescTail),
-+ np->cur_tx % TX_RING_SIZE);
-+ writel(np->cur_tx % TX_RING_SIZE, dev->base_addr + TxDescTail);
-+
-+ dev->trans_start = jiffies;
-+
-+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
-+ printk(KERN_DEBUG "%s: Transmit frame #%d (%x) queued in slot %d.\n",
-+ dev->name, np->cur_tx, (int)virt_to_bus(&np->tx_ring[entry]),
-+ entry);
-+ }
-+ return 0;
-+}
-+
-+/* The interrupt handler does all of the Rx thread work and cleans up
-+ after the Tx thread. */
-+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
-+{
-+ struct net_device *dev = (struct net_device *)dev_instance;
-+ struct netdev_private *np;
-+ long ioaddr;
-+ int work_limit;
-+
-+ ioaddr = dev->base_addr;
-+ np = (struct netdev_private *)dev->priv;
-+ work_limit = np->max_interrupt_work;
-+
-+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
-+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
-+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
-+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
-+ dev->name);
-+ dev->interrupt = 0; /* Avoid halting machine. */
-+ return;
-+ }
-+#endif
-+
-+ do {
-+ u32 intr_status = readl(ioaddr + IntrStatus);
-+
-+ if (np->msg_level & NETIF_MSG_INTR)
-+ printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
-+ dev->name, intr_status);
-+
-+ if (intr_status == 0 || intr_status == 0xffffffff)
-+ break;
-+
-+ if (intr_status & IntrRxDone)
-+ netdev_rx(dev);
-+
-+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
-+ int entry = np->dirty_tx % TX_RING_SIZE;
-+ if (np->tx_ring[entry].status == 0)
-+ break;
-+ if (np->msg_level & NETIF_MSG_TX_DONE)
-+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
-+ dev->name, np->tx_ring[entry].status);
-+ np->stats.tx_packets++;
-+#if LINUX_VERSION_CODE > 0x20127
-+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
-+#endif
-+ /* Free the original skb. */
-+ dev_free_skb_irq(np->tx_skbuff[entry]);
-+ np->tx_skbuff[entry] = 0;
-+ }
-+ /* Note the 4 slot hysteresis to mark the queue non-full. */
-+ if (np->tx_full && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
-+ /* The ring is no longer full, allow new TX entries. */
-+ np->tx_full = 0;
-+ netif_resume_tx_queue(dev);
-+ }
-+
-+ /* Abnormal error summary/uncommon events handlers. */
-+ if (intr_status & (IntrPCIErr | IntrLinkChange | StatsMax))
-+ netdev_error(dev, intr_status);
-+
-+ if (--work_limit < 0) {
-+ printk(KERN_WARNING "%s: Too much work at interrupt, "
-+ "status=0x%4.4x.\n",
-+ dev->name, intr_status);
-+ break;
-+ }
-+ } while (1);
-+
-+ if (np->msg_level & NETIF_MSG_INTR)
-+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
-+ dev->name, (int)readl(ioaddr + IntrStatus));
-+
-+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
-+ clear_bit(0, (void*)&dev->interrupt);
-+#endif
-+ return;
-+}
-+
-+/* This routine is logically part of the interrupt handler, but separated
-+ for clarity and better register allocation. */
-+static int netdev_rx(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int entry = np->cur_rx % RX_RING_SIZE;
-+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
-+
-+ if (np->msg_level & NETIF_MSG_RX_STATUS) {
-+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
-+ entry, np->rx_ring[entry].status);
-+ }
-+
-+ /* If EOP is set on the next entry, it's a new packet. Send it up. */
-+ while (np->rx_head_desc->status & cpu_to_le32(RxDescDone)) {
-+ struct rx_desc *desc = np->rx_head_desc;
-+ u32 desc_status = le32_to_cpu(desc->status);
-+ int data_size = le32_to_cpu(desc->csum_length);
-+
-+ if (np->msg_level & NETIF_MSG_RX_STATUS)
-+ printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
-+ desc_status);
-+ if (--boguscnt < 0)
-+ break;
-+ if ( ! (desc_status & RxDescEndPkt)) {
-+ printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
-+ "multiple buffers, entry %#x length %d status %4.4x!\n",
-+ dev->name, np->cur_rx, data_size, desc_status);
-+ np->stats.rx_length_errors++;
-+ } else {
-+ struct sk_buff *skb;
-+ /* Reported length should omit the CRC. */
-+ int pkt_len = (data_size & 0xffff) - 4;
-+
-+#ifndef final_version
-+ if (np->msg_level & NETIF_MSG_RX_STATUS)
-+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
-+ " of %d, bogus_cnt %d.\n",
-+ pkt_len, data_size, boguscnt);
-+#endif
-+ /* Check if the packet is long enough to accept without copying
-+ to a minimally-sized skbuff. */
-+ if (pkt_len < np->rx_copybreak
-+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-+ skb->dev = dev;
-+ skb_reserve(skb, 2); /* 16 byte align the IP header */
-+#if HAS_IP_COPYSUM /* Call copy + cksum if available. */
-+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
-+ skb_put(skb, pkt_len);
-+#else
-+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
-+ pkt_len);
-+#endif
-+ } else {
-+ char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
-+ np->rx_skbuff[entry] = NULL;
-+#ifndef final_version /* Remove after testing. */
-+ if (le32desc_to_virt(np->rx_ring[entry].buf_addr) != temp)
-+ printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
-+ "do not match in netdev_rx: %p vs. %p / %p.\n",
-+ dev->name,
-+ le32desc_to_virt(np->rx_ring[entry].buf_addr),
-+ skb->head, temp);
-+#endif
-+ }
-+#ifndef final_version /* Remove after testing. */
-+ /* You will want this info for the initial debug. */
-+ if (np->msg_level & NETIF_MSG_PKTDATA)
-+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
-+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
-+ "%d.%d.%d.%d.\n",
-+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
-+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
-+ skb->data[8], skb->data[9], skb->data[10],
-+ skb->data[11], skb->data[12], skb->data[13],
-+ skb->data[14], skb->data[15], skb->data[16],
-+ skb->data[17]);
-+#endif
-+ skb->protocol = eth_type_trans(skb, dev);
-+ /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
-+ netif_rx(skb);
-+ dev->last_rx = jiffies;
-+ np->stats.rx_packets++;
-+#if LINUX_VERSION_CODE > 0x20127
-+ np->stats.rx_bytes += pkt_len;
-+#endif
-+ }
-+ entry = (++np->cur_rx) % RX_RING_SIZE;
-+ np->rx_head_desc = &np->rx_ring[entry];
-+ }
-+
-+ /* Refill the Rx ring buffers. */
-+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
-+ struct sk_buff *skb;
-+ entry = np->dirty_rx % RX_RING_SIZE;
-+ if (np->rx_skbuff[entry] == NULL) {
-+ skb = dev_alloc_skb(np->rx_buf_sz);
-+ np->rx_skbuff[entry] = skb;
-+ if (skb == NULL)
-+ break; /* Better luck next round. */
-+ skb->dev = dev; /* Mark as being used by this device. */
-+ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
-+ np->rx_ring[entry].buf_addr = virt_to_le32desc(skb->tail);
-+ }
-+ np->rx_ring[entry].status = 0;
-+ }
-+
-+ /* Restart Rx engine if stopped. */
-+ /* writel(1, dev->base_addr + RxCmd); */
-+ return 0;
-+}
-+
-+static void netdev_error(struct net_device *dev, int intr_status)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+
-+ if (intr_status & IntrLinkChange) {
-+ int chip_ctrl = readl(ioaddr + ChipCtrl);
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_ERR "%s: Link changed: Autonegotiation on-going.\n",
-+ dev->name);
-+ if (chip_ctrl & 1)
-+ netif_link_up(dev);
-+ else
-+ netif_link_down(dev);
-+ check_duplex(dev);
-+ }
-+ if (intr_status & StatsMax) {
-+ get_stats(dev);
-+ }
-+ if ((intr_status & ~(IntrLinkChange|StatsMax))
-+ && (np->msg_level & NETIF_MSG_DRV))
-+ printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
-+ dev->name, intr_status);
-+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
-+ if (intr_status & IntrPCIErr)
-+ np->stats.tx_fifo_errors++;
-+}
-+
-+static struct net_device_stats *get_stats(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int crc_errs = readl(ioaddr + RxCRCErrs);
-+
-+ if (crc_errs != 0xffffffff) {
-+ /* We need not lock this segment of code for SMP.
-+ The non-atomic-add vulnerability is very small
-+ and statistics are non-critical. */
-+ np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
-+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
-+ }
-+
-+ return &np->stats;
-+}
-+
-+/* The little-endian AUTODIN II ethernet CRC calculations.
-+ A big-endian version is also available.
-+ This is slow but compact code. Do not use this routine for bulk data,
-+ use a table-based routine instead.
-+ This is common code and should be moved to net/core/crc.c.
-+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
-+ them. Select the endian-ness that results in minimal calculations.
-+*/
-+static unsigned const ethernet_polynomial_le = 0xedb88320U;
-+static inline unsigned ether_crc_le(int length, unsigned char *data)
-+{
-+ unsigned int crc = 0xffffffff; /* Initial value. */
-+ while(--length >= 0) {
-+ unsigned char current_octet = *data++;
-+ int bit;
-+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
-+ if ((crc ^ current_octet) & 1) {
-+ crc >>= 1;
-+ crc ^= ethernet_polynomial_le;
-+ } else
-+ crc >>= 1;
-+ }
-+ }
-+ return crc;
-+}
-+
-+static void set_rx_mode(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ u32 new_mc_filter[128]; /* Multicast filter table */
-+ u32 new_rx_mode = np->rx_mode;
-+
-+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
-+ /* Unconditionally log net taps. */
-+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
-+ new_rx_mode |=
-+ RxCtrlAcceptBroadcast | RxCtrlAllMulticast | RxCtrlAllUnicast;
-+ } else if ((dev->mc_count > np->multicast_filter_limit)
-+ || (dev->flags & IFF_ALLMULTI)) {
-+ /* Too many to match, or accept all multicasts. */
-+ new_rx_mode &= ~RxCtrlAllUnicast;
-+ new_rx_mode |= RxCtrlAcceptBroadcast | RxCtrlAllMulticast;
-+ } else {
-+ struct dev_mc_list *mclist;
-+ int i;
-+ memset(new_mc_filter, 0, sizeof(new_mc_filter));
-+ for (i = 0, mclist = dev->mc_list; mclist && i < 15;
-+ i++, mclist = mclist->next) {
-+ writel(((u32*)mclist->dmi_addr)[0], ioaddr + RxAddrCAM + 8 + i*8);
-+ writel((((u32*)mclist->dmi_addr)[1] & 0xffff) | 0x80000000,
-+ ioaddr + RxAddrCAM + 12 + i*8);
-+ }
-+ for (; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
-+ set_bit(((u32*)mclist->dmi_addr)[1] & 0xfff,
-+ new_mc_filter);
-+ }
-+ new_rx_mode &= ~RxCtrlAllUnicast | RxCtrlAllMulticast;
-+ new_rx_mode |= RxCtrlAcceptBroadcast;
-+ if (dev->mc_count > 15)
-+ for (i = 0; i < 128; i++)
-+ writel(new_mc_filter[i], ioaddr + MulticastArray + (i<<2));
-+ }
-+ if (np->rx_mode != new_rx_mode)
-+ writel(np->rx_mode = new_rx_mode, ioaddr + RxControl);
-+}
-+
-+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ u32 *data32 = (void *)&rq->ifr_data;
-+
-+ switch(cmd) {
-+ case SIOCGPARAMS:
-+ data32[0] = np->msg_level;
-+ data32[1] = np->multicast_filter_limit;
-+ data32[2] = np->max_interrupt_work;
-+ data32[3] = np->rx_copybreak;
-+ return 0;
-+ case SIOCSPARAMS:
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ np->msg_level = data32[0];
-+ np->multicast_filter_limit = data32[1];
-+ np->max_interrupt_work = data32[2];
-+ np->rx_copybreak = data32[3];
-+ return 0;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static int netdev_close(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int i;
-+
-+ netif_stop_tx_queue(dev);
-+
-+ if (np->msg_level & NETIF_MSG_IFDOWN) {
-+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
-+ "Rx %4.4x Int %2.2x.\n",
-+ dev->name, (int)readl(ioaddr + TxStatus),
-+ (int)readl(ioaddr + RxStatus), (int)readl(ioaddr + IntrStatus));
-+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
-+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
-+ }
-+
-+ /* Disable interrupts by clearing the interrupt mask. */
-+ writel(~0, ioaddr + IntrDisable);
-+ readl(ioaddr + IntrStatus);
-+
-+ /* Reset everything. */
-+ writel(0x04000000, ioaddr + ChipCtrl);
-+
-+ del_timer(&np->timer);
-+
-+#ifdef __i386__
-+ if (np->msg_level & NETIF_MSG_IFDOWN) {
-+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
-+ (int)virt_to_bus(np->tx_ring));
-+ for (i = 0; i < TX_RING_SIZE; i++)
-+ printk(" #%d desc. buf %8.8x, length %8.8x, status %8.8x.\n",
-+ i, np->tx_ring[i].buf_addr, np->tx_ring[i].cmd_length,
-+ np->tx_ring[i].status);
-+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
-+ (int)virt_to_bus(np->rx_ring));
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
-+ i, np->rx_ring[i].csum_length,
-+ np->rx_ring[i].status, np->rx_ring[i].buf_addr);
-+ if (np->rx_ring[i].buf_addr) {
-+ if (*(u8*)np->rx_skbuff[i]->tail != 0x69) {
-+ u16 *pkt_buf = (void *)np->rx_skbuff[i]->tail;
-+ int j;
-+ for (j = 0; j < 0x50; j++)
-+ printk(" %4.4x", pkt_buf[j]);
-+ printk("\n");
-+ }
-+ }
-+ }
-+ }
-+#endif /* __i386__ debugging only */
-+
-+ free_irq(dev->irq, dev);
-+
-+ /* Free all the skbuffs in the Rx queue. */
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ np->rx_ring[i].status = 0;
-+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
-+ if (np->rx_skbuff[i]) {
-+#if LINUX_VERSION_CODE < 0x20100
-+ np->rx_skbuff[i]->free = 1;
-+#endif
-+ dev_free_skb(np->rx_skbuff[i]);
-+ }
-+ np->rx_skbuff[i] = 0;
-+ }
-+ for (i = 0; i < TX_RING_SIZE; i++) {
-+ if (np->tx_skbuff[i])
-+ dev_free_skb(np->tx_skbuff[i]);
-+ np->tx_skbuff[i] = 0;
-+ }
-+
-+ MOD_DEC_USE_COUNT;
-+
-+ return 0;
-+}
-+
-+static int netdev_pwr_event(void *dev_instance, int event)
-+{
-+ struct net_device *dev = dev_instance;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
-+ switch(event) {
-+ case DRV_ATTACH:
-+ MOD_INC_USE_COUNT;
-+ break;
-+ case DRV_SUSPEND:
-+ /* Disable interrupts, stop Tx and Rx. */
-+ writel(~0, ioaddr + IntrDisable);
-+ /* writel(2, ioaddr + RxCmd); */
-+ /* writew(2, ioaddr + TxCmd); */
-+ break;
-+ case DRV_RESUME:
-+ /* This is incomplete: the actions are very chip specific. */
-+ set_rx_mode(dev);
-+ break;
-+ case DRV_DETACH: {
-+ struct net_device **devp, **next;
-+ if (dev->flags & IFF_UP) {
-+ /* Some, but not all, kernel versions close automatically. */
-+ dev_close(dev);
-+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
-+ }
-+ unregister_netdev(dev);
-+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
-+ iounmap((char *)dev->base_addr);
-+ for (devp = &root_net_dev; *devp; devp = next) {
-+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
-+ if (*devp == dev) {
-+ *devp = *next;
-+ break;
-+ }
-+ }
-+ if (np->priv_addr)
-+ kfree(np->priv_addr);
-+ kfree(dev);
-+ MOD_DEC_USE_COUNT;
-+ break;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+
-+#ifdef MODULE
-+int init_module(void)
-+{
-+ /* Emit version even if no cards detected. */
-+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
-+ return pci_drv_register(&igige_drv_id, NULL);
-+}
-+
-+void cleanup_module(void)
-+{
-+ struct net_device *next_dev;
-+
-+ pci_drv_unregister(&igige_drv_id);
-+
-+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
-+ while (root_net_dev) {
-+ struct netdev_private *np = (void *)(root_net_dev->priv);
-+ unregister_netdev(root_net_dev);
-+ release_region(root_net_dev->base_addr,
-+ pci_id_tbl[np->chip_id].io_size);
-+ iounmap((char *)(root_net_dev->base_addr));
-+ next_dev = np->next_module;
-+ if (np->tx_ring == 0)
-+ free_page((long)np->tx_ring);
-+ if (np->rx_ring == 0)
-+ free_page((long)np->rx_ring);
-+ if (np->priv_addr)
-+ kfree(np->priv_addr);
-+ kfree(root_net_dev);
-+ root_net_dev = next_dev;
-+ }
-+}
-+
-+#endif /* MODULE */
-+
-+/*
-+ * Local variables:
-+ * compile-command: "make KERNVER=`uname -r` intel-gige.o"
-+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c intel-gige.c"
-+ * simple-compile-command: "gcc -DMODULE -O6 -c intel-gige.c"
-+ * c-indent-level: 4
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * End:
-+ */
-diff -Naur linux/dev/drivers/net/ns820.c linux/dev/drivers/net/ns820.c
---- linux/dev/drivers/net/ns820.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux/dev/drivers/net/ns820.c 2004-10-25 05:46:15.000000000 +0200
-@@ -0,0 +1,1547 @@
-+/* ns820.c: A Linux Gigabit Ethernet driver for the NatSemi DP83820 series. */
-+/*
-+ Written/copyright 1999-2003 by Donald Becker.
-+ Copyright 2002-2003 by Scyld Computing Corporation.
-+
-+ This software may be used and distributed according to the terms of
-+ the GNU General Public License (GPL), incorporated herein by reference.
-+ Drivers based on or derived from this code fall under the GPL and must
-+ retain the authorship, copyright and license notice. This file is not
-+ a complete program and may only be used when the entire operating
-+ system is licensed under the GPL. License for under other terms may be
-+ available. Contact the original author for details.
-+
-+ The original author may be reached as becker@scyld.com, or at
-+ Scyld Computing Corporation
-+ 914 Bay Ridge Road, Suite 220
-+ Annapolis MD 21403
-+
-+ Support information and updates available at
-+ http://www.scyld.com/network/natsemi.html
-+ The information and support mailing lists are based at
-+ http://www.scyld.com/mailman/listinfo/
-+*/
-+
-+/* These identify the driver base version and may not be removed. */
-+static const char version1[] =
-+"ns820.c:v1.03a 8/09/2003 Written by Donald Becker <becker@scyld.com>\n";
-+static const char version2[] =
-+" http://www.scyld.com/network/natsemi.html\n";
-+/* Updated to recommendations in pci-skeleton v2.13. */
-+
-+/* Automatically extracted configuration info:
-+probe-func: ns820_probe
-+config-in: tristate 'National Semiconductor DP8382x series PCI Ethernet support' CONFIG_NATSEMI820
-+
-+c-help-name: National Semiconductor DP8382x series PCI Ethernet support
-+c-help-symbol: CONFIG_NATSEMI820
-+c-help: This driver is for the National Semiconductor DP83820 Gigabit Ethernet
-+c-help: adapter series.
-+c-help: More specific information and updates are available from
-+c-help: http://www.scyld.com/network/natsemi.html
-+*/
-+
-+/* The user-configurable values.
-+ These may be modified when a driver module is loaded.*/
-+
-+/* Message enable level: 0..31 = no..all messages. See NETIF_MSG docs. */
-+static int debug = 2;
-+
-+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
-+static int max_interrupt_work = 20;
-+
-+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
-+ This chip uses a 2048 element hash table based on the Ethernet CRC.
-+ Previous natsemi chips had unreliable multicast filter circuitry.
-+ To work around an observed problem set this value to '0',
-+ which will immediately switch to Rx-all-multicast.
-+ */
-+static int multicast_filter_limit = 100;
-+
-+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
-+ Setting to > 1518 effectively disables this feature.
-+ This chip can only receive into aligned buffers, so architectures such
-+ as the Alpha AXP might benefit from a copy-align.
-+*/
-+static int rx_copybreak = 0;
-+
-+/* Used to pass the media type, etc.
-+ Both 'options[]' and 'full_duplex[]' should exist for driver
-+ interoperability, however setting full_duplex[] is deprecated.
-+ The media type is usually passed in 'options[]'.
-+ The default is autonegotation for speed and duplex.
-+ This should rarely be overridden.
-+ Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
-+ Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
-+ Use option values 0x20 and 0x200 for forcing full duplex operation.
-+ Use 0x1000 or 0x2000 for gigabit.
-+*/
-+#define MAX_UNITS 8 /* More are supported, limit only on options */
-+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
-+
-+/* Operational parameters that are set at compile time. */
-+
-+/* Keep the ring sizes a power of two for compile efficiency.
-+ Understand the implications before changing these settings!
-+ The compiler will convert <unsigned>'%'<2^N> into a bit mask.
-+ Making the Tx ring too large decreases the effectiveness of channel
-+ bonding and packet priority, confuses the system network buffer limits,
-+ and wastes memory.
-+ Too-large receive rings waste memory and confound network buffer limits.
-+*/
-+#define TX_RING_SIZE 16
-+#define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
-+#define RX_RING_SIZE 64
-+
-+/* Operational parameters that usually are not changed. */
-+/* Time in jiffies before concluding the transmitter is hung.
-+ Re-autonegotiation may take up to 3 seconds.
-+ */
-+#define TX_TIMEOUT (6*HZ)
-+
-+/* Allocation size of Rx buffers with normal sized Ethernet frames.
-+ Do not change this value without good reason. This is not a limit,
-+ but a way to keep a consistent allocation size among drivers.
-+ */
-+#define PKT_BUF_SZ 1536
-+
-+#ifndef __KERNEL__
-+#define __KERNEL__
-+#endif
-+#if !defined(__OPTIMIZE__)
-+#warning You must compile this file with the correct options!
-+#warning See the last lines of the source file.
-+#error You must compile this driver with "-O".
-+#endif
-+
-+/* Include files, designed to support most kernel versions 2.0.0 and later. */
-+#include <linux/config.h>
-+#if defined(CONFIG_SMP) && ! defined(__SMP__)
-+#define __SMP__
-+#endif
-+#if defined(MODULE) && defined(CONFIG_MODVERSIONS) && ! defined(MODVERSIONS)
-+#define MODVERSIONS
-+#endif
-+
-+#include <linux/version.h>
-+#if defined(MODVERSIONS)
-+#include <linux/modversions.h>
-+#endif
-+#include <linux/module.h>
-+
-+#include <linux/kernel.h>
-+#include <linux/string.h>
-+#include <linux/timer.h>
-+#include <linux/errno.h>
-+#include <linux/ioport.h>
-+#if LINUX_VERSION_CODE >= 0x20400
-+#include <linux/slab.h>
-+#else
-+#include <linux/malloc.h>
-+#endif
-+#include <linux/interrupt.h>
-+#include <linux/pci.h>
-+#include <linux/netdevice.h>
-+#include <linux/etherdevice.h>
-+#include <linux/skbuff.h>
-+#include <asm/processor.h> /* Processor type for cache alignment. */
-+#include <asm/bitops.h>
-+#include <asm/io.h>
-+
-+#ifdef INLINE_PCISCAN
-+#include "k_compat.h"
-+#else
-+#include "pci-scan.h"
-+#include "kern_compat.h"
-+#endif
-+
-+#if (LINUX_VERSION_CODE >= 0x20100) && defined(MODULE)
-+char kernel_version[] = UTS_RELEASE;
-+#endif
-+
-+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
-+MODULE_DESCRIPTION("National Semiconductor DP83820 series PCI Ethernet driver");
-+MODULE_LICENSE("GPL");
-+MODULE_PARM(debug, "i");
-+MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
-+MODULE_PARM(max_interrupt_work, "i");
-+MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
-+MODULE_PARM(rx_copybreak, "i");
-+MODULE_PARM(multicast_filter_limit, "i");
-+MODULE_PARM_DESC(debug, "Driver message level (0-31)");
-+MODULE_PARM_DESC(options, "Force transceiver type or fixed speed+duplex");
-+MODULE_PARM_DESC(max_interrupt_work,
-+ "Driver maximum events handled per interrupt");
-+MODULE_PARM_DESC(full_duplex,
-+ "Non-zero to force full duplex, non-negotiated link "
-+ "(deprecated).");
-+MODULE_PARM_DESC(rx_copybreak,
-+ "Breakpoint in bytes for copy-only-tiny-frames");
-+MODULE_PARM_DESC(multicast_filter_limit,
-+ "Multicast addresses before switching to Rx-all-multicast");
-+
-+/*
-+ Theory of Operation
-+
-+I. Board Compatibility
-+
-+This driver is designed for National Semiconductor DP83820 10/100/1000
-+Ethernet NIC. It is superficially similar to the 810 series "natsemi.c"
-+driver, however the register layout, descriptor layout and element
-+length of the new chip series is different.
-+
-+II. Board-specific settings
-+
-+This driver requires the PCI interrupt line to be configured.
-+It honors the EEPROM-set values.
-+
-+III. Driver operation
-+
-+IIIa. Ring buffers
-+
-+This driver uses two statically allocated fixed-size descriptor lists
-+formed into rings by a branch from the final descriptor to the beginning of
-+the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
-+The NatSemi design uses a 'next descriptor' pointer that the driver forms
-+into a list, thus rings can be arbitrarily sized. Before changing the
-+ring sizes you should understand the flow and cache effects of the
-+full/available/empty hysteresis.
-+
-+IIIb/c. Transmit/Receive Structure
-+
-+This driver uses a zero-copy receive and transmit scheme.
-+The driver allocates full frame size skbuffs for the Rx ring buffers at
-+open() time and passes the skb->data field to the chip as receive data
-+buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
-+a fresh skbuff is allocated and the frame is copied to the new skbuff.
-+When the incoming frame is larger, the skbuff is passed directly up the
-+protocol stack. Buffers consumed this way are replaced by newly allocated
-+skbuffs in a later phase of receives.
-+
-+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
-+using a full-sized skbuff for small frames vs. the copying costs of larger
-+frames. New boards are typically used in generously configured machines
-+and the underfilled buffers have negligible impact compared to the benefit of
-+a single allocation size, so the default value of zero results in never
-+copying packets. When copying is done, the cost is usually mitigated by using
-+a combined copy/checksum routine. Copying also preloads the cache, which is
-+most useful with small frames.
-+
-+A subtle aspect of the operation is that unaligned buffers are not permitted
-+by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
-+longword aligned for further processing. On copies frames are put into the
-+skbuff at an offset of "+2", 16-byte aligning the IP header.
-+
-+IIId. Synchronization
-+
-+The driver runs as two independent, single-threaded flows of control. One
-+is the send-packet routine, which enforces single-threaded use by the
-+dev->tbusy flag. The other thread is the interrupt handler, which is single
-+threaded by the hardware and interrupt handling software.
-+
-+The send packet thread has partial control over the Tx ring and 'dev->tbusy'
-+flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
-+queue slot is empty, it clears the tbusy flag when finished otherwise it sets
-+the 'lp->tx_full' flag.
-+
-+The interrupt handler has exclusive control over the Rx ring and records stats
-+from the Tx ring. After reaping the stats, it marks the Tx queue entry as
-+empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
-+clears both the tx_full and tbusy flags.
-+
-+IV. Notes
-+
-+The NatSemi 820 series PCI gigabit chips are very common on low-cost NICs.
-+The '821 appears to be the same as '820 chip, only with pins for the upper
-+32 bits marked "N/C".
-+
-+IVb. References
-+
-+http://www.scyld.com/expert/100mbps.html
-+http://www.scyld.com/expert/NWay.html
-+The NatSemi dp83820 datasheet is available: search www.natsemi.com
-+
-+IVc. Errata
-+
-+None characterised.
-+
-+*/
-+
-+
-+
-+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
-+ long ioaddr, int irq, int chip_idx, int find_cnt);
-+static int power_event(void *dev_instance, int event);
-+enum chip_capability_flags {FDXActiveLow=1, InvertGbXcvrPwr=2, };
-+#ifdef USE_IO_OPS
-+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
-+#else
-+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
-+#endif
-+
-+static struct pci_id_info pci_id_tbl[] = {
-+ { "D-Link DGE-500T (DP83820)",
-+ { 0x0022100B, 0xffffffff, 0x49001186, 0xffffffff, },
-+ PCI_IOTYPE, 256, FDXActiveLow},
-+ {"NatSemi DP83820", { 0x0022100B, 0xffffffff },
-+ PCI_IOTYPE, 256, 0},
-+ {0,}, /* 0 terminated list. */
-+};
-+
-+struct drv_id_info ns820_drv_id = {
-+ "ns820", PCI_HOTSWAP, PCI_CLASS_NETWORK_ETHERNET<<8, pci_id_tbl,
-+ ns820_probe1, power_event };
-+
-+/* Offsets to the device registers.
-+ Unlike software-only systems, device drivers interact with complex hardware.
-+ It's not useful to define symbolic names for every register bit in the
-+ device. Please do not change these names without good reason.
-+*/
-+enum register_offsets {
-+ ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
-+ IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18, IntrHoldoff=0x1C,
-+ TxRingPtr=0x20, TxRingPtrHi=0x24, TxConfig=0x28,
-+ RxRingPtr=0x30, RxRingPtrHi=0x34, RxConfig=0x38,
-+ WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
-+ BootRomAddr=0x50, BootRomData=0x54, ChipRevReg=0x58,
-+ StatsCtrl=0x5C, RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
-+};
-+
-+/* Bits in ChipCmd. */
-+enum ChipCmdBits {
-+ ChipReset=0x100, SoftIntr=0x80, RxReset=0x20, TxReset=0x10,
-+ RxOff=0x08, RxOn=0x04, TxOff=0x02, TxOn=0x01,
-+};
-+
-+/* Bits in ChipConfig. */
-+enum ChipConfigBits {
-+ CfgLinkGood=0x80000000, CfgFDX=0x10000000,
-+ CfgXcrReset=0x0400, CfgXcrOff=0x0200,
-+};
-+
-+/* Bits in the interrupt status/mask registers. */
-+enum intr_status_bits {
-+ IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
-+ IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
-+ IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
-+ IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
-+ StatsMax=0x0800, IntrDrv=0x1000, WOLPkt=0x2000, LinkChange=0x4000,
-+ RxStatusOverrun=0x10000,
-+ RxResetDone=0x00200000, TxResetDone=0x00400000,
-+ IntrPCIErr=0x001E0000,
-+ IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
-+};
-+
-+/* Bits in the RxMode register. */
-+enum rx_mode_bits {
-+ AcceptErr=0x20, AcceptRunt=0x10,
-+ AcceptBroadcast=0xC0000000,
-+ AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
-+ AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
-+};
-+
-+/* The Rx and Tx buffer descriptors. */
-+/* Note that using only 32 bit fields simplifies conversion to big-endian
-+ architectures. */
-+struct netdev_desc {
-+#if ADDRLEN == 64
-+ u64 next_desc;
-+ u64 buf_addr;
-+#endif
-+ u32 next_desc;
-+ u32 buf_addr;
-+ s32 cmd_status;
-+ u32 vlan_status;
-+};
-+
-+/* Bits in network_desc.status */
-+enum desc_status_bits {
-+ DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
-+ DescNoCRC=0x10000000,
-+ DescPktOK=0x08000000, RxTooLong=0x00400000,
-+};
-+
-+#define PRIV_ALIGN 15 /* Required alignment mask */
-+struct netdev_private {
-+ /* Descriptor rings first for alignment. */
-+ struct netdev_desc rx_ring[RX_RING_SIZE];
-+ struct netdev_desc tx_ring[TX_RING_SIZE];
-+ struct net_device *next_module; /* Link for devices of this type. */
-+ void *priv_addr; /* Unaligned address for kfree */
-+ const char *product_name;
-+ /* The addresses of receive-in-place skbuffs. */
-+ struct sk_buff* rx_skbuff[RX_RING_SIZE];
-+ /* The saved address of a sent-in-place packet/buffer, for later free(). */
-+ struct sk_buff* tx_skbuff[TX_RING_SIZE];
-+ struct net_device_stats stats;
-+ struct timer_list timer; /* Media monitoring timer. */
-+ /* Frequently used values: keep some adjacent for cache effect. */
-+ int msg_level;
-+ int chip_id, drv_flags;
-+ struct pci_dev *pci_dev;
-+ long in_interrupt; /* Word-long for SMP locks. */
-+ int max_interrupt_work;
-+ int intr_enable;
-+ unsigned int restore_intr_enable:1; /* Set if temporarily masked. */
-+ unsigned int rx_q_empty:1; /* Set out-of-skbuffs. */
-+
-+ struct netdev_desc *rx_head_desc;
-+ unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
-+ unsigned int rx_buf_sz; /* Based on MTU+slack. */
-+ int rx_copybreak;
-+
-+ unsigned int cur_tx, dirty_tx;
-+ unsigned int tx_full:1; /* The Tx queue is full. */
-+ /* These values keep track of the transceiver/media in use. */
-+ unsigned int full_duplex:1; /* Full-duplex operation requested. */
-+ unsigned int duplex_lock:1;
-+ unsigned int medialock:1; /* Do not sense media. */
-+ unsigned int default_port; /* Last dev->if_port value. */
-+ /* Rx filter. */
-+ u32 cur_rx_mode;
-+ u32 rx_filter[16];
-+ int multicast_filter_limit;
-+ /* FIFO and PCI burst thresholds. */
-+ int tx_config, rx_config;
-+ /* MII transceiver section. */
-+ u16 advertising; /* NWay media advertisement */
-+};
-+
-+static int eeprom_read(long ioaddr, int location);
-+static void mdio_sync(long mdio_addr);
-+static int mdio_read(struct net_device *dev, int phy_id, int location);
-+static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
-+static int netdev_open(struct net_device *dev);
-+static void check_duplex(struct net_device *dev);
-+static void netdev_timer(unsigned long data);
-+static void tx_timeout(struct net_device *dev);
-+static int rx_ring_fill(struct net_device *dev);
-+static void init_ring(struct net_device *dev);
-+static int start_tx(struct sk_buff *skb, struct net_device *dev);
-+static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
-+static void netdev_error(struct net_device *dev, int intr_status);
-+static int netdev_rx(struct net_device *dev);
-+static void netdev_error(struct net_device *dev, int intr_status);
-+static void set_rx_mode(struct net_device *dev);
-+static struct net_device_stats *get_stats(struct net_device *dev);
-+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
-+static int netdev_close(struct net_device *dev);
-+
-+
-+
-+/* A list of our installed devices, for removing the driver module. */
-+static struct net_device *root_net_dev = NULL;
-+
-+#ifndef MODULE
-+int ns820_probe(struct net_device *dev)
-+{
-+ if (pci_drv_register(&ns820_drv_id, dev) < 0)
-+ return -ENODEV;
-+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
-+ return 0;
-+}
-+#endif
-+
-+static void *ns820_probe1(struct pci_dev *pdev, void *init_dev,
-+ long ioaddr, int irq, int chip_idx, int card_idx)
-+{
-+ struct net_device *dev;
-+ struct netdev_private *np;
-+ void *priv_mem;
-+ int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
-+
-+ dev = init_etherdev(init_dev, 0);
-+ if (!dev)
-+ return NULL;
-+
-+ /* Perhaps NETIF_MSG_PROBE */
-+ printk(KERN_INFO "%s: %s at 0x%lx, ",
-+ dev->name, pci_id_tbl[chip_idx].name, ioaddr);
-+
-+ for (i = 0; i < 3; i++)
-+ ((u16 *)dev->dev_addr)[i] = le16_to_cpu(eeprom_read(ioaddr, 12 - i));
-+ for (i = 0; i < 5; i++)
-+ printk("%2.2x:", dev->dev_addr[i]);
-+ printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
-+
-+ /* Reset the chip to erase previous misconfiguration. */
-+ writel(ChipReset, ioaddr + ChipCmd);
-+ /* Power up Xcvr. */
-+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
-+
-+ /* Make certain elements e.g. descriptor lists are aligned. */
-+ priv_mem = kmalloc(sizeof(*np) + PRIV_ALIGN, GFP_KERNEL);
-+ /* Check for the very unlikely case of no memory. */
-+ if (priv_mem == NULL)
-+ return NULL;
-+
-+ dev->base_addr = ioaddr;
-+ dev->irq = irq;
-+
-+ dev->priv = np = (void *)(((long)priv_mem + PRIV_ALIGN) & ~PRIV_ALIGN);
-+ memset(np, 0, sizeof(*np));
-+ np->priv_addr = priv_mem;
-+
-+ np->next_module = root_net_dev;
-+ root_net_dev = dev;
-+
-+ np->pci_dev = pdev;
-+ np->chip_id = chip_idx;
-+ np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
-+ np->msg_level = (1 << debug) - 1;
-+ np->rx_copybreak = rx_copybreak;
-+ np->max_interrupt_work = max_interrupt_work;
-+ np->multicast_filter_limit = multicast_filter_limit;
-+
-+ if (dev->mem_start)
-+ option = dev->mem_start;
-+
-+ /* The lower four bits are the media type. */
-+ if (option > 0) {
-+ if (option & 0x220)
-+ np->full_duplex = 1;
-+ np->default_port = option & 0x33ff;
-+ if (np->default_port & 0x330)
-+ np->medialock = 1;
-+ }
-+ if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
-+ np->full_duplex = 1;
-+
-+ if (np->full_duplex) {
-+ if (np->msg_level & NETIF_MSG_PROBE)
-+ printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
-+ " disabled.\n", dev->name);
-+ np->duplex_lock = 1;
-+ }
-+
-+ /* The chip-specific entries in the device structure. */
-+ dev->open = &netdev_open;
-+ dev->hard_start_xmit = &start_tx;
-+ dev->stop = &netdev_close;
-+ dev->get_stats = &get_stats;
-+ dev->set_multicast_list = &set_rx_mode;
-+ dev->do_ioctl = &mii_ioctl;
-+
-+ /* Allow forcing the media type. */
-+ if (option > 0) {
-+ if (option & 0x220)
-+ np->full_duplex = 1;
-+ np->default_port = option & 0x3ff;
-+ if (np->default_port & 0x330) {
-+ np->medialock = 1;
-+ if (np->msg_level & NETIF_MSG_PROBE)
-+ printk(KERN_INFO " Forcing %dMbs %s-duplex operation.\n",
-+ (option & 0x300 ? 100 : 10),
-+ (np->full_duplex ? "full" : "half"));
-+ mdio_write(dev, 1, 0,
-+ ((option & 0x300) ? 0x2000 : 0) | /* 100mbps? */
-+ (np->full_duplex ? 0x0100 : 0)); /* Full duplex? */
-+ }
-+ }
-+
-+ return dev;
-+}
-+
-+
-+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
-+ The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses.
-+ Update to the code in other drivers for 8/10 bit addresses.
-+*/
-+
-+/* Delay between EEPROM clock transitions.
-+ This "delay" forces out buffered PCI writes, which is sufficient to meet
-+ the timing requirements of most EEPROMs.
-+*/
-+#define eeprom_delay(ee_addr) readl(ee_addr)
-+
-+enum EEPROM_Ctrl_Bits {
-+ EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
-+};
-+#define EE_Write0 (EE_ChipSelect)
-+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
-+
-+/* The EEPROM commands include the 01 preamble. */
-+enum EEPROM_Cmds {
-+ EE_WriteCmd=5, EE_ReadCmd=6, EE_EraseCmd=7,
-+};
-+
-+static int eeprom_read(long addr, int location)
-+{
-+ long eeprom_addr = addr + EECtrl;
-+ int read_cmd = (EE_ReadCmd << 6) | location;
-+ int retval = 0;
-+ int i;
-+
-+ writel(EE_Write0, eeprom_addr);
-+
-+ /* Shift the read command bits out. */
-+ for (i = 10; i >= 0; i--) {
-+ int dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
-+ writel(dataval, eeprom_addr);
-+ eeprom_delay(eeprom_addr);
-+ writel(dataval | EE_ShiftClk, eeprom_addr);
-+ eeprom_delay(eeprom_addr);
-+ }
-+ writel(EE_ChipSelect, eeprom_addr);
-+ eeprom_delay(eeprom_addr);
-+
-+ for (i = 15; i >= 0; i--) {
-+ writel(EE_ChipSelect | EE_ShiftClk, eeprom_addr);
-+ eeprom_delay(eeprom_addr);
-+ retval |= (readl(eeprom_addr) & EE_DataOut) ? 1 << i : 0;
-+ writel(EE_ChipSelect, eeprom_addr);
-+ eeprom_delay(eeprom_addr);
-+ }
-+
-+ /* Terminate the EEPROM access. */
-+ writel(EE_Write0, eeprom_addr);
-+ writel(0, eeprom_addr);
-+ return retval;
-+}
-+
-+/* MII transceiver control section.
-+ Read and write MII registers using software-generated serial MDIO
-+ protocol. See the MII specifications or DP83840A data sheet for details.
-+
-+ The maximum data clock rate is 2.5 Mhz. To meet minimum timing we
-+ must flush writes to the PCI bus with a PCI read. */
-+#define mdio_delay(mdio_addr) readl(mdio_addr)
-+
-+/* Set iff a MII transceiver on any interface requires mdio preamble.
-+ This only set with older tranceivers, so the extra
-+ code size of a per-interface flag is not worthwhile. */
-+static char mii_preamble_required = 0;
-+
-+enum mii_reg_bits {
-+ MDIO_ShiftClk=0x0040, MDIO_Data=0x0010, MDIO_EnbOutput=0x0020,
-+};
-+#define MDIO_EnbIn (0)
-+#define MDIO_WRITE0 (MDIO_EnbOutput)
-+#define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
-+
-+/* Generate the preamble required for initial synchronization and
-+ a few older transceivers. */
-+static void mdio_sync(long mdio_addr)
-+{
-+ int bits = 32;
-+
-+ /* Establish sync by sending at least 32 logic ones. */
-+ while (--bits >= 0) {
-+ writel(MDIO_WRITE1, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ writel(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ }
-+}
-+
-+static int mdio_read(struct net_device *dev, int phy_id, int location)
-+{
-+ long mdio_addr = dev->base_addr + EECtrl;
-+ int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
-+ int i, retval = 0;
-+
-+ if (mii_preamble_required)
-+ mdio_sync(mdio_addr);
-+
-+ /* Shift the read command bits out. */
-+ for (i = 15; i >= 0; i--) {
-+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
-+
-+ writel(dataval, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ writel(dataval | MDIO_ShiftClk, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ }
-+ /* Read the two transition, 16 data, and wire-idle bits. */
-+ for (i = 19; i > 0; i--) {
-+ writel(MDIO_EnbIn, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ retval = (retval << 1) | ((readl(mdio_addr) & MDIO_Data) ? 1 : 0);
-+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ }
-+ return (retval>>1) & 0xffff;
-+}
-+
-+static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
-+{
-+ long mdio_addr = dev->base_addr + EECtrl;
-+ int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
-+ int i;
-+
-+ if (mii_preamble_required)
-+ mdio_sync(mdio_addr);
-+
-+ /* Shift the command bits out. */
-+ for (i = 31; i >= 0; i--) {
-+ int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
-+
-+ writel(dataval, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ writel(dataval | MDIO_ShiftClk, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ }
-+ /* Clear out extra bits. */
-+ for (i = 2; i > 0; i--) {
-+ writel(MDIO_EnbIn, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ writel(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
-+ mdio_delay(mdio_addr);
-+ }
-+ return;
-+}
-+
-+static int netdev_open(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+ int i;
-+ u32 intr_status = readl(ioaddr + IntrStatus);
-+
-+ /* We have not yet encountered a case where we need to reset the chip. */
-+
-+ MOD_INC_USE_COUNT;
-+
-+ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev)) {
-+ MOD_DEC_USE_COUNT;
-+ return -EAGAIN;
-+ }
-+
-+ /* Power up Xcvr. */
-+ writel((~CfgXcrOff & readl(ioaddr + ChipConfig)) | 0x00400000,
-+ ioaddr + ChipConfig);
-+ if (np->msg_level & NETIF_MSG_IFUP)
-+ printk(KERN_DEBUG "%s: netdev_open() irq %d intr_status %8.8x.\n",
-+ dev->name, dev->irq, intr_status);
-+
-+ init_ring(dev);
-+
-+#if defined(ADDR_64BITS) && defined(__alpha__)
-+ writel(virt_to_bus(np->rx_ring) >> 32, ioaddr + RxRingPtrHi);
-+ writel(virt_to_bus(np->tx_ring) >> 32, ioaddr + TxRingPtrHi);
-+#else
-+ writel(0, ioaddr + RxRingPtrHi);
-+ writel(0, ioaddr + TxRingPtrHi);
-+#endif
-+ writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
-+ writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
-+
-+ for (i = 0; i < 6; i += 2) {
-+ writel(i, ioaddr + RxFilterAddr);
-+ writel(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
-+ ioaddr + RxFilterData);
-+ }
-+
-+ /* Initialize other registers. */
-+ /* Configure the PCI bus bursts and FIFO thresholds. */
-+ /* Configure for standard, in-spec Ethernet. */
-+
-+ if (np->full_duplex ||
-+ ((readl(ioaddr + ChipConfig) & CfgFDX) == 0) ^
-+ ((np->drv_flags & FDXActiveLow) != 0)) {
-+ np->tx_config = 0xD0801002;
-+ np->rx_config = 0x10000020;
-+ } else {
-+ np->tx_config = 0x10801002;
-+ np->rx_config = 0x0020;
-+ }
-+ if (dev->mtu > 1500)
-+ np->rx_config |= 0x08000000;
-+ writel(np->tx_config, ioaddr + TxConfig);
-+ writel(np->rx_config, ioaddr + RxConfig);
-+ if (np->msg_level & NETIF_MSG_IFUP)
-+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x.\n",
-+ dev->name, (int)readl(ioaddr + TxConfig));
-+
-+ if (dev->if_port == 0)
-+ dev->if_port = np->default_port;
-+
-+ np->in_interrupt = 0;
-+
-+ check_duplex(dev);
-+ set_rx_mode(dev);
-+ netif_start_tx_queue(dev);
-+
-+ /* Enable interrupts by setting the interrupt mask. */
-+ np->intr_enable = IntrNormalSummary | IntrAbnormalSummary | 0x1f;
-+ writel(np->intr_enable, ioaddr + IntrMask);
-+ writel(1, ioaddr + IntrEnable);
-+
-+ writel(RxOn | TxOn, ioaddr + ChipCmd);
-+ writel(4, ioaddr + StatsCtrl); /* Clear Stats */
-+
-+ if (np->msg_level & NETIF_MSG_IFUP)
-+ printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
-+ dev->name, (int)readl(ioaddr + ChipCmd));
-+
-+ /* Set the timer to check for link beat. */
-+ init_timer(&np->timer);
-+ np->timer.expires = jiffies + 3*HZ;
-+ np->timer.data = (unsigned long)dev;
-+ np->timer.function = &netdev_timer; /* timer handler */
-+ add_timer(&np->timer);
-+
-+ return 0;
-+}
-+
-+static void check_duplex(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+ int duplex;
-+
-+ if (np->duplex_lock)
-+ return;
-+ duplex = readl(ioaddr + ChipConfig) & CfgFDX ? 1 : 0;
-+ if (np->full_duplex != duplex) {
-+ np->full_duplex = duplex;
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
-+ " capability.\n", dev->name,
-+ duplex ? "full" : "half");
-+ if (duplex) {
-+ np->rx_config |= 0x10000000;
-+ np->tx_config |= 0xC0000000;
-+ } else {
-+ np->rx_config &= ~0x10000000;
-+ np->tx_config &= ~0xC0000000;
-+ }
-+ writel(np->tx_config, ioaddr + TxConfig);
-+ writel(np->rx_config, ioaddr + RxConfig);
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_DEBUG "%s: Setting TxConfig to %8.8x (%8.8x).\n",
-+ dev->name, np->tx_config, (int)readl(ioaddr + TxConfig));
-+ }
-+}
-+
-+static void netdev_timer(unsigned long data)
-+{
-+ struct net_device *dev = (struct net_device *)data;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+ int next_tick = 10*HZ;
-+
-+ if (np->msg_level & NETIF_MSG_TIMER)
-+ printk(KERN_DEBUG "%s: Driver monitor timer tick, status %8.8x.\n",
-+ dev->name, (int)readl(ioaddr + ChipConfig));
-+ if (np->rx_q_empty) {
-+ /* Trigger an interrupt to refill. */
-+ writel(SoftIntr, ioaddr + ChipCmd);
-+ }
-+ if (netif_queue_paused(dev) &&
-+ np->cur_tx - np->dirty_tx > 1 &&
-+ (jiffies - dev->trans_start) > TX_TIMEOUT) {
-+ tx_timeout(dev);
-+ }
-+ check_duplex(dev);
-+ np->timer.expires = jiffies + next_tick;
-+ add_timer(&np->timer);
-+}
-+
-+static void tx_timeout(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
-+ " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
-+
-+ if (np->msg_level & NETIF_MSG_TX_ERR) {
-+ int i;
-+ printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
-+ for (i = 0; i < RX_RING_SIZE; i++)
-+ printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
-+ printk("\n"KERN_DEBUG" Tx ring %p: ", np->tx_ring);
-+ for (i = 0; i < TX_RING_SIZE; i++)
-+ printk(" %4.4x", np->tx_ring[i].cmd_status);
-+ printk("\n");
-+ }
-+
-+ /* Perhaps we should reinitialize the hardware here. */
-+ dev->if_port = 0;
-+ /* Stop and restart the chip's Tx processes . */
-+
-+ /* Trigger an immediate transmit demand. */
-+
-+ dev->trans_start = jiffies;
-+ np->stats.tx_errors++;
-+ return;
-+}
-+
-+/* Refill the Rx ring buffers, returning non-zero if not full. */
-+static int rx_ring_fill(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ unsigned int entry;
-+
-+ for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
-+ entry = np->dirty_rx % RX_RING_SIZE;
-+ if (np->rx_skbuff[entry] == NULL) {
-+ struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
-+ np->rx_skbuff[entry] = skb;
-+ if (skb == NULL)
-+ return 1; /* Better luck next time. */
-+ skb->dev = dev; /* Mark as being used by this device. */
-+ np->rx_ring[entry].buf_addr = virt_to_bus(skb->tail);
-+ }
-+ np->rx_ring[entry].cmd_status = cpu_to_le32(DescIntr | np->rx_buf_sz);
-+ }
-+ return 0;
-+}
-+
-+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-+static void init_ring(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int i;
-+
-+ np->tx_full = 0;
-+ np->cur_rx = np->cur_tx = 0;
-+ np->dirty_rx = np->dirty_tx = 0;
-+
-+ /* MAX(PKT_BUF_SZ, dev->mtu + 8); */
-+ /* I know you _want_ to change this without understanding it. Don't. */
-+ np->rx_buf_sz = (dev->mtu <= 1532 ? PKT_BUF_SZ : dev->mtu + 8);
-+ np->rx_head_desc = &np->rx_ring[0];
-+
-+ /* Initialize all Rx descriptors. */
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ np->rx_ring[i].next_desc = virt_to_bus(&np->rx_ring[i+1]);
-+ np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
-+ np->rx_skbuff[i] = 0;
-+ }
-+ /* Mark the last entry as wrapping the ring. */
-+ np->rx_ring[i-1].next_desc = virt_to_bus(&np->rx_ring[0]);
-+
-+ for (i = 0; i < TX_RING_SIZE; i++) {
-+ np->tx_skbuff[i] = 0;
-+ np->tx_ring[i].next_desc = virt_to_bus(&np->tx_ring[i+1]);
-+ np->tx_ring[i].cmd_status = 0;
-+ }
-+ np->tx_ring[i-1].next_desc = virt_to_bus(&np->tx_ring[0]);
-+
-+ /* Fill in the Rx buffers.
-+ Allocation failure just leaves a "negative" np->dirty_rx. */
-+ np->dirty_rx = (unsigned int)(0 - RX_RING_SIZE);
-+ rx_ring_fill(dev);
-+
-+ return;
-+}
-+
-+static int start_tx(struct sk_buff *skb, struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ unsigned int entry;
-+
-+ /* Block a timer-based transmit from overlapping. This happens when
-+ packets are presumed lost, and we use this check the Tx status. */
-+ if (netif_pause_tx_queue(dev) != 0) {
-+ /* This watchdog code is redundant with the media monitor timer. */
-+ if (jiffies - dev->trans_start > TX_TIMEOUT)
-+ tx_timeout(dev);
-+ return 1;
-+ }
-+
-+ /* Note: Ordering is important here, set the field with the
-+ "ownership" bit last, and only then increment cur_tx.
-+ No spinlock is needed for either Tx or Rx.
-+ */
-+
-+ /* Calculate the next Tx descriptor entry. */
-+ entry = np->cur_tx % TX_RING_SIZE;
-+
-+ np->tx_skbuff[entry] = skb;
-+
-+ np->tx_ring[entry].buf_addr = virt_to_bus(skb->data);
-+ np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
-+ np->cur_tx++;
-+
-+ /* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
-+
-+ if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
-+ np->tx_full = 1;
-+ /* Check for a just-cleared queue. */
-+ if (np->cur_tx - (volatile unsigned int)np->dirty_tx
-+ < TX_QUEUE_LEN - 4) {
-+ np->tx_full = 0;
-+ netif_unpause_tx_queue(dev);
-+ } else
-+ netif_stop_tx_queue(dev);
-+ } else
-+ netif_unpause_tx_queue(dev); /* Typical path */
-+ /* Wake the potentially-idle transmit channel. */
-+ writel(TxOn, dev->base_addr + ChipCmd);
-+
-+ dev->trans_start = jiffies;
-+
-+ if (np->msg_level & NETIF_MSG_TX_QUEUED) {
-+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
-+ dev->name, np->cur_tx, entry);
-+ }
-+ return 0;
-+}
-+
-+/* The interrupt handler does all of the Rx thread work and cleans up
-+ after the Tx thread. */
-+static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
-+{
-+ struct net_device *dev = (struct net_device *)dev_instance;
-+ struct netdev_private *np;
-+ long ioaddr;
-+ int boguscnt;
-+
-+#ifndef final_version /* Can never occur. */
-+ if (dev == NULL) {
-+ printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
-+ "device.\n", irq);
-+ return;
-+ }
-+#endif
-+
-+ ioaddr = dev->base_addr;
-+ np = (struct netdev_private *)dev->priv;
-+ boguscnt = np->max_interrupt_work;
-+
-+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
-+ /* A lock to prevent simultaneous entry bug on Intel SMP machines. */
-+ if (test_and_set_bit(0, (void*)&dev->interrupt)) {
-+ printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
-+ dev->name);
-+ dev->interrupt = 0; /* Avoid halting machine. */
-+ return;
-+ }
-+#endif
-+
-+ do {
-+ u32 intr_status = readl(ioaddr + IntrStatus);
-+
-+ if (np->msg_level & NETIF_MSG_INTR)
-+ printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
-+ dev->name, intr_status);
-+
-+ if (intr_status == 0 || intr_status == 0xffffffff)
-+ break;
-+
-+ /* Acknowledge all of the current interrupt sources ASAP.
-+ Nominally the read above accomplishes this, but... */
-+ writel(intr_status & 0x001ffff, ioaddr + IntrStatus);
-+
-+ if (intr_status & (IntrRxDone | IntrRxIntr)) {
-+ netdev_rx(dev);
-+ np->rx_q_empty = rx_ring_fill(dev);
-+ }
-+
-+ if (intr_status & (IntrRxIdle | IntrDrv)) {
-+ unsigned int old_dirty_rx = np->dirty_rx;
-+ if (rx_ring_fill(dev) == 0)
-+ np->rx_q_empty = 0;
-+ /* Restart Rx engine iff we did add a buffer. */
-+ if (np->dirty_rx != old_dirty_rx)
-+ writel(RxOn, dev->base_addr + ChipCmd);
-+ }
-+
-+ for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
-+ int entry = np->dirty_tx % TX_RING_SIZE;
-+ if (np->msg_level & NETIF_MSG_INTR)
-+ printk(KERN_DEBUG "%s: Tx entry %d @%p status %8.8x.\n",
-+ dev->name, entry, &np->tx_ring[entry],
-+ np->tx_ring[entry].cmd_status);
-+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
-+ break;
-+ if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
-+ if (np->msg_level & NETIF_MSG_TX_DONE)
-+ printk(KERN_DEBUG "%s: Transmit done, Tx status %8.8x.\n",
-+ dev->name, np->tx_ring[entry].cmd_status);
-+ np->stats.tx_packets++;
-+#if LINUX_VERSION_CODE > 0x20127
-+ np->stats.tx_bytes += np->tx_skbuff[entry]->len;
-+#endif
-+ } else { /* Various Tx errors */
-+ int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
-+ if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
-+ if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
-+ if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
-+ if (tx_status & 0x00200000) np->stats.tx_window_errors++;
-+ if (np->msg_level & NETIF_MSG_TX_ERR)
-+ printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
-+ dev->name, tx_status);
-+ np->stats.tx_errors++;
-+ }
-+ /* Free the original skb. */
-+ dev_free_skb_irq(np->tx_skbuff[entry]);
-+ np->tx_skbuff[entry] = 0;
-+ }
-+ /* Note the 4 slot hysteresis to mark the queue non-full. */
-+ if (np->tx_full
-+ && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
-+ /* The ring is no longer full, allow new TX entries. */
-+ np->tx_full = 0;
-+ netif_resume_tx_queue(dev);
-+ }
-+
-+ /* Abnormal error summary/uncommon events handlers. */
-+ if (intr_status & IntrAbnormalSummary)
-+ netdev_error(dev, intr_status);
-+
-+ if (--boguscnt < 0) {
-+ printk(KERN_WARNING "%s: Too much work at interrupt, "
-+ "status=0x%4.4x.\n",
-+ dev->name, intr_status);
-+ np->restore_intr_enable = 1;
-+ break;
-+ }
-+ } while (1);
-+
-+ if (np->msg_level & NETIF_MSG_INTR)
-+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
-+ dev->name, (int)readl(ioaddr + IntrStatus));
-+
-+#if defined(__i386__) && LINUX_VERSION_CODE < 0x020300
-+ clear_bit(0, (void*)&dev->interrupt);
-+#endif
-+ return;
-+}
-+
-+/* This routine is logically part of the interrupt handler, but separated
-+ for clarity and better register allocation. */
-+static int netdev_rx(struct net_device *dev)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int entry = np->cur_rx % RX_RING_SIZE;
-+ int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
-+ s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
-+
-+ /* If the driver owns the next entry it's a new packet. Send it up. */
-+ while (desc_status < 0) { /* e.g. & DescOwn */
-+ if (np->msg_level & NETIF_MSG_RX_STATUS)
-+ printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
-+ entry, desc_status);
-+ if (--boguscnt < 0)
-+ break;
-+ if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
-+ if (desc_status & DescMore) {
-+ printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
-+ "multiple buffers, entry %#x status %x.\n",
-+ dev->name, np->cur_rx, desc_status);
-+ np->stats.rx_length_errors++;
-+ } else {
-+ /* There was a error. */
-+ if (np->msg_level & NETIF_MSG_RX_ERR)
-+ printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
-+ desc_status);
-+ np->stats.rx_errors++;
-+ if (desc_status & 0x06000000) np->stats.rx_over_errors++;
-+ if (desc_status & 0x00600000) np->stats.rx_length_errors++;
-+ if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
-+ if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
-+ }
-+ } else {
-+ struct sk_buff *skb;
-+ int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
-+ /* Check if the packet is long enough to accept without copying
-+ to a minimally-sized skbuff. */
-+ if (pkt_len < np->rx_copybreak
-+ && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-+ skb->dev = dev;
-+ skb_reserve(skb, 2); /* 16 byte align the IP header */
-+#if HAS_IP_COPYSUM
-+ eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
-+ skb_put(skb, pkt_len);
-+#else
-+ memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
-+ pkt_len);
-+#endif
-+ } else {
-+ skb_put(skb = np->rx_skbuff[entry], pkt_len);
-+ np->rx_skbuff[entry] = NULL;
-+ }
-+#ifndef final_version /* Remove after testing. */
-+ /* You will want this info for the initial debug. */
-+ if (np->msg_level & NETIF_MSG_PKTDATA)
-+ printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
-+ "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
-+ "%d.%d.%d.%d.\n",
-+ skb->data[0], skb->data[1], skb->data[2], skb->data[3],
-+ skb->data[4], skb->data[5], skb->data[6], skb->data[7],
-+ skb->data[8], skb->data[9], skb->data[10],
-+ skb->data[11], skb->data[12], skb->data[13],
-+ skb->data[14], skb->data[15], skb->data[16],
-+ skb->data[17]);
-+#endif
-+ skb->protocol = eth_type_trans(skb, dev);
-+ /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
-+ netif_rx(skb);
-+ dev->last_rx = jiffies;
-+ np->stats.rx_packets++;
-+#if LINUX_VERSION_CODE > 0x20127
-+ np->stats.rx_bytes += pkt_len;
-+#endif
-+ }
-+ entry = (++np->cur_rx) % RX_RING_SIZE;
-+ np->rx_head_desc = &np->rx_ring[entry];
-+ desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
-+ }
-+
-+ /* Refill is now done in the main interrupt loop. */
-+ return 0;
-+}
-+
-+static void netdev_error(struct net_device *dev, int intr_status)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ if (intr_status & LinkChange) {
-+ int chip_config = readl(ioaddr + ChipConfig);
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
-+ " %4.4x partner %4.4x.\n", dev->name,
-+ (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
-+ if (chip_config & CfgLinkGood)
-+ netif_link_up(dev);
-+ else
-+ netif_link_down(dev);
-+ check_duplex(dev);
-+ }
-+ if (intr_status & StatsMax) {
-+ get_stats(dev);
-+ }
-+ if (intr_status & IntrTxUnderrun) {
-+ /* Increase the Tx threshold, 32 byte units. */
-+ if ((np->tx_config & 0x3f) < 62)
-+ np->tx_config += 2; /* +64 bytes */
-+ writel(np->tx_config, ioaddr + TxConfig);
-+ }
-+ if (intr_status & WOLPkt) {
-+ int wol_status = readl(ioaddr + WOLCmd);
-+ printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
-+ dev->name, wol_status);
-+ }
-+ if (intr_status & (RxStatusOverrun | IntrRxOverrun)) {
-+ if (np->msg_level & NETIF_MSG_DRV)
-+ printk(KERN_ERR "%s: Rx overflow! ns820 %8.8x.\n",
-+ dev->name, intr_status);
-+ np->stats.rx_fifo_errors++;
-+ }
-+ if (intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|
-+ RxStatusOverrun|0xA7ff)) {
-+ if (np->msg_level & NETIF_MSG_DRV)
-+ printk(KERN_ERR "%s: Something Wicked happened! ns820 %8.8x.\n",
-+ dev->name, intr_status);
-+ }
-+ /* Hmmmmm, it's not clear how to recover from PCI faults. */
-+ if (intr_status & IntrPCIErr) {
-+ np->stats.tx_fifo_errors++;
-+ np->stats.rx_fifo_errors++;
-+ }
-+}
-+
-+static struct net_device_stats *get_stats(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int crc_errs = readl(ioaddr + RxCRCErrs);
-+
-+ if (crc_errs != 0xffffffff) {
-+ /* We need not lock this segment of code for SMP.
-+ There is no atomic-add vulnerability for most CPUs,
-+ and statistics are non-critical. */
-+ /* The chip only need report frame silently dropped. */
-+ np->stats.rx_crc_errors += crc_errs;
-+ np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
-+ }
-+
-+ return &np->stats;
-+}
-+
-+/* The little-endian AUTODIN II ethernet CRC calculations.
-+ A big-endian version is also available.
-+ This is slow but compact code. Do not use this routine for bulk data,
-+ use a table-based routine instead.
-+ This is common code and should be moved to net/core/crc.c.
-+ Chips may use the upper or lower CRC bits, and may reverse and/or invert
-+ them. Select the endian-ness that results in minimal calculations.
-+*/
-+static unsigned const ethernet_polynomial_le = 0xedb88320U;
-+static inline unsigned ether_crc_le(int length, unsigned char *data)
-+{
-+ unsigned int crc = 0xffffffff; /* Initial value. */
-+ while(--length >= 0) {
-+ unsigned char current_octet = *data++;
-+ int bit;
-+ for (bit = 8; --bit >= 0; current_octet >>= 1) {
-+ if ((crc ^ current_octet) & 1) {
-+ crc >>= 1;
-+ crc ^= ethernet_polynomial_le;
-+ } else
-+ crc >>= 1;
-+ }
-+ }
-+ return crc;
-+}
-+
-+static void set_rx_mode(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ u8 mc_filter[64]; /* Multicast hash filter */
-+ u32 rx_mode;
-+
-+ if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
-+ /* Unconditionally log net taps. */
-+ printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
-+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
-+ | AcceptMyPhys;
-+ } else if ((dev->mc_count > np->multicast_filter_limit)
-+ || (dev->flags & IFF_ALLMULTI)) {
-+ rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
-+ } else {
-+ struct dev_mc_list *mclist;
-+ int i;
-+ memset(mc_filter, 0, sizeof(mc_filter));
-+ for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
-+ i++, mclist = mclist->next) {
-+ set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x7ff,
-+ mc_filter);
-+ }
-+ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
-+ for (i = 0; i < 64; i += 2) {
-+ writel(rx_mode + 0x200 + i, ioaddr + RxFilterAddr);
-+ writel((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
-+ }
-+ }
-+ writel(rx_mode, ioaddr + RxFilterAddr);
-+ np->cur_rx_mode = rx_mode;
-+}
-+
-+static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-+{
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ u16 *data = (u16 *)&rq->ifr_data;
-+ u32 *data32 = (void *)&rq->ifr_data;
-+
-+ switch(cmd) {
-+ case 0x8947: case 0x89F0:
-+ /* SIOCGMIIPHY: Get the address of the PHY in use. */
-+ data[0] = 1;
-+ /* Fall Through */
-+ case 0x8948: case 0x89F1:
-+ /* SIOCGMIIREG: Read the specified MII register. */
-+ data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
-+ return 0;
-+ case 0x8949: case 0x89F2:
-+ /* SIOCSMIIREG: Write the specified MII register */
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ if (data[0] == 1) {
-+ u16 miireg = data[1] & 0x1f;
-+ u16 value = data[2];
-+ switch (miireg) {
-+ case 0:
-+ /* Check for autonegotiation on or reset. */
-+ np->duplex_lock = (value & 0x9000) ? 0 : 1;
-+ if (np->duplex_lock)
-+ np->full_duplex = (value & 0x0100) ? 1 : 0;
-+ break;
-+ case 4: np->advertising = value; break;
-+ }
-+ }
-+ mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
-+ return 0;
-+ case SIOCGPARAMS:
-+ data32[0] = np->msg_level;
-+ data32[1] = np->multicast_filter_limit;
-+ data32[2] = np->max_interrupt_work;
-+ data32[3] = np->rx_copybreak;
-+ return 0;
-+ case SIOCSPARAMS:
-+ if (!capable(CAP_NET_ADMIN))
-+ return -EPERM;
-+ np->msg_level = data32[0];
-+ np->multicast_filter_limit = data32[1];
-+ np->max_interrupt_work = data32[2];
-+ np->rx_copybreak = data32[3];
-+ return 0;
-+ default:
-+ return -EOPNOTSUPP;
-+ }
-+}
-+
-+static int netdev_close(struct net_device *dev)
-+{
-+ long ioaddr = dev->base_addr;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ int i;
-+
-+ netif_stop_tx_queue(dev);
-+
-+ if (np->msg_level & NETIF_MSG_IFDOWN) {
-+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
-+ "Int %2.2x.\n",
-+ dev->name, (int)readl(ioaddr + ChipCmd),
-+ (int)readl(ioaddr + IntrStatus));
-+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
-+ dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
-+ }
-+
-+ /* We don't want the timer to re-start anything. */
-+ del_timer(&np->timer);
-+
-+ /* Disable interrupts using the mask. */
-+ writel(0, ioaddr + IntrMask);
-+ writel(0, ioaddr + IntrEnable);
-+ writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
-+
-+ /* Stop the chip's Tx and Rx processes. */
-+ writel(RxOff | TxOff, ioaddr + ChipCmd);
-+
-+ get_stats(dev);
-+
-+#ifdef __i386__
-+ if (np->msg_level & NETIF_MSG_IFDOWN) {
-+ printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
-+ (int)virt_to_bus(np->tx_ring));
-+ for (i = 0; i < TX_RING_SIZE; i++)
-+ printk(" #%d desc. %8.8x %8.8x.\n",
-+ i, np->tx_ring[i].cmd_status, (u32)np->tx_ring[i].buf_addr);
-+ printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
-+ (int)virt_to_bus(np->rx_ring));
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
-+ i, np->rx_ring[i].cmd_status, (u32)np->rx_ring[i].buf_addr);
-+ }
-+ }
-+#endif /* __i386__ debugging only */
-+
-+ free_irq(dev->irq, dev);
-+
-+ /* Free all the skbuffs in the Rx queue. */
-+ for (i = 0; i < RX_RING_SIZE; i++) {
-+ np->rx_ring[i].cmd_status = 0;
-+ np->rx_ring[i].buf_addr = 0xBADF00D0; /* An invalid address. */
-+ if (np->rx_skbuff[i]) {
-+#if LINUX_VERSION_CODE < 0x20100
-+ np->rx_skbuff[i]->free = 1;
-+#endif
-+ dev_free_skb(np->rx_skbuff[i]);
-+ }
-+ np->rx_skbuff[i] = 0;
-+ }
-+ for (i = 0; i < TX_RING_SIZE; i++) {
-+ if (np->tx_skbuff[i])
-+ dev_free_skb(np->tx_skbuff[i]);
-+ np->tx_skbuff[i] = 0;
-+ }
-+
-+ /* Power down Xcvr. */
-+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
-+
-+ MOD_DEC_USE_COUNT;
-+
-+ return 0;
-+}
-+
-+static int power_event(void *dev_instance, int event)
-+{
-+ struct net_device *dev = dev_instance;
-+ struct netdev_private *np = (struct netdev_private *)dev->priv;
-+ long ioaddr = dev->base_addr;
-+
-+ if (np->msg_level & NETIF_MSG_LINK)
-+ printk(KERN_DEBUG "%s: Handling power event %d.\n", dev->name, event);
-+ switch(event) {
-+ case DRV_ATTACH:
-+ MOD_INC_USE_COUNT;
-+ break;
-+ case DRV_SUSPEND:
-+ /* Disable interrupts, freeze stats, stop Tx and Rx. */
-+ writel(0, ioaddr + IntrEnable);
-+ writel(2, ioaddr + StatsCtrl);
-+ writel(RxOff | TxOff, ioaddr + ChipCmd);
-+ writel(CfgXcrOff | readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
-+ break;
-+ case DRV_RESUME:
-+ /* This is incomplete: the open() actions should be repeated. */
-+ writel(~CfgXcrOff & readl(ioaddr + ChipConfig), ioaddr + ChipConfig);
-+ set_rx_mode(dev);
-+ writel(np->intr_enable, ioaddr + IntrEnable);
-+ writel(1, ioaddr + IntrEnable);
-+ writel(RxOn | TxOn, ioaddr + ChipCmd);
-+ break;
-+ case DRV_DETACH: {
-+ struct net_device **devp, **next;
-+ if (dev->flags & IFF_UP) {
-+ /* Some, but not all, kernel versions close automatically. */
-+ dev_close(dev);
-+ dev->flags &= ~(IFF_UP|IFF_RUNNING);
-+ }
-+ unregister_netdev(dev);
-+ release_region(dev->base_addr, pci_id_tbl[np->chip_id].io_size);
-+ for (devp = &root_net_dev; *devp; devp = next) {
-+ next = &((struct netdev_private *)(*devp)->priv)->next_module;
-+ if (*devp == dev) {
-+ *devp = *next;
-+ break;
-+ }
-+ }
-+ if (np->priv_addr)
-+ kfree(np->priv_addr);
-+ kfree(dev);
-+ MOD_DEC_USE_COUNT;
-+ break;
-+ }
-+ }
-+
-+ return 0;
-+}
-+
-+
-+#ifdef MODULE
-+int init_module(void)
-+{
-+ /* Emit version even if no cards detected. */
-+ printk(KERN_INFO "%s" KERN_INFO "%s", version1, version2);
-+#ifdef CARDBUS
-+ register_driver(&etherdev_ops);
-+ return 0;
-+#else
-+ return pci_drv_register(&ns820_drv_id, NULL);
-+#endif
-+}
-+
-+void cleanup_module(void)
-+{
-+ struct net_device *next_dev;
-+
-+#ifdef CARDBUS
-+ unregister_driver(&etherdev_ops);
-+#else
-+ pci_drv_unregister(&ns820_drv_id);
-+#endif
-+
-+ /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
-+ while (root_net_dev) {
-+ struct netdev_private *np = (void *)(root_net_dev->priv);
-+ unregister_netdev(root_net_dev);
-+ iounmap((char *)root_net_dev->base_addr);
-+ next_dev = np->next_module;
-+ if (np->priv_addr)
-+ kfree(np->priv_addr);
-+ kfree(root_net_dev);
-+ root_net_dev = next_dev;
-+ }
-+}
-+
-+#endif /* MODULE */
-+
-+/*
-+ * Local variables:
-+ * compile-command: "make KERNVER=`uname -r` ns820.o"
-+ * compile-cmd: "gcc -DMODULE -Wall -Wstrict-prototypes -O6 -c ns820.c"
-+ * simple-compile-command: "gcc -DMODULE -O6 -c ns820.c"
-+ * c-indent-level: 4
-+ * c-basic-offset: 4
-+ * tab-width: 4
-+ * End:
-+ */
diff -Naur linux/dev/drivers/net/Space.c linux/dev/drivers/net/Space.c
--- linux/dev/drivers/net/Space.c 1999-09-07 09:19:06.000000000 +0200
+++ linux/dev/drivers/net/Space.c 2004-10-25 06:29:49.000000000 +0200