Bug Summary

File:obj-scan-build/../linux/src/drivers/net/lance.c
Location:line 1275, column 4
Description:Function call argument is an uninitialized value

Annotated Source Code

1/* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2/*
3 Written/copyright 1993-1998 by Donald Becker.
4
5 Copyright 1993 United States Government as represented by the
6 Director, National Security Agency.
7 This software may be used and distributed according to the terms
8 of the GNU Public License, incorporated herein by reference.
9
10 This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11 with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12
13 The author may be reached as becker@CESDIS.gsfc.nasa.gov, or C/O
14 Center of Excellence in Space Data and Information Sciences
15 Code 930.5, Goddard Space Flight Center, Greenbelt MD 20771
16
17 Fixing alignment problem with 1.3.* kernel and some minor changes
18 by Andrey V. Savochkin, 1996.
19
20 Problems or questions may be send to Donald Becker (see above) or to
21 Andrey Savochkin -- saw@shade.msu.ru or
22 Laboratory of Computation Methods,
23 Department of Mathematics and Mechanics,
24 Moscow State University,
25 Leninskye Gory, Moscow 119899
26
27 But I should to inform you that I'm not an expert in the LANCE card
28 and it may occurs that you will receive no answer on your mail
29 to Donald Becker. I didn't receive any answer on all my letters
30 to him. Who knows why... But may be you are more lucky? ;->
31 SAW
32
33 Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
34 - added support for Linux/Alpha, but removed most of it, because
35 it worked only for the PCI chip.
36 - added hook for the 32bit lance driver
37 - added PCnetPCI II (79C970A) to chip table
38 Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
39 - hopefully fix above so Linux/Alpha can use ISA cards too.
40 8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
41 v1.12 10/27/97 Module support -djb
42 v1.14 2/3/98 Module support modified, made PCI support optional -djb
43*/
44
45static const char *version = "lance.c:v1.14 2/3/1998 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46
47#ifdef MODULE
48#ifdef MODVERSIONS
49#include <linux/modversions.h>
50#endif
51#include <linux/module.h>
52#include <linux/version.h>
53#else
54#define MOD_INC_USE_COUNT
55#define MOD_DEC_USE_COUNT
56#endif
57
58#include <linux/config.h>
59#include <linux/kernel.h>
60#include <linux/sched.h>
61#include <linux/string.h>
62#include <linux/ptrace.h>
63#include <linux/errno.h>
64#include <linux/ioport.h>
65#include <linux/malloc.h>
66#include <linux/interrupt.h>
67#include <linux/pci.h>
68#include <linux/bios32.h>
69#include <asm/bitops.h>
70#include <asm/io.h>
71#include <asm/dma.h>
72
73#include <linux/netdevice.h>
74#include <linux/etherdevice.h>
75#include <linux/skbuff.h>
76
77static unsigned int lance_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0};
78int lance_probe(struct devicelinux_device *dev);
79int lance_probe1(struct devicelinux_device *dev, int ioaddr, int irq, int options);
80
81#ifdef HAVE_DEVLIST
82struct netdev_entry lance_drv =
83{"lance", lance_probe1, LANCE_TOTAL_SIZE0x18, lance_portlist};
84#endif
85
86#ifdef LANCE_DEBUG
87int lance_debug = LANCE_DEBUG;
88#else
89int lance_debug = 1;
90#endif
91
92/*
93 Theory of Operation
94
95I. Board Compatibility
96
97This device driver is designed for the AMD 79C960, the "PCnet-ISA
98single-chip ethernet controller for ISA". This chip is used in a wide
99variety of boards from vendors such as Allied Telesis, HP, Kingston,
100and Boca. This driver is also intended to work with older AMD 7990
101designs, such as the NE1500 and NE2100, and newer 79C961. For convenience,
102I use the name LANCE to refer to all of the AMD chips, even though it properly
103refers only to the original 7990.
104
105II. Board-specific settings
106
107The driver is designed to work the boards that use the faster
108bus-master mode, rather than in shared memory mode. (Only older designs
109have on-board buffer memory needed to support the slower shared memory mode.)
110
111Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
112channel. This driver probes the likely base addresses:
113{0x300, 0x320, 0x340, 0x360}.
114After the board is found it generates a DMA-timeout interrupt and uses
115autoIRQ to find the IRQ line. The DMA channel can be set with the low bits
116of the otherwise-unused dev->mem_start value (aka PARAM1). If unset it is
117probed for by enabling each free DMA channel in turn and checking if
118initialization succeeds.
119
120The HP-J2405A board is an exception: with this board it is easy to read the
121EEPROM-set values for the base, IRQ, and DMA. (Of course you must already
122_know_ the base address -- that field is for writing the EEPROM.)
123
124III. Driver operation
125
126IIIa. Ring buffers
127The LANCE uses ring buffers of Tx and Rx descriptors. Each entry describes
128the base and length of the data buffer, along with status bits. The length
129of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
130the buffer length (rather than being directly the buffer length) for
131implementation ease. The current values are 2 (Tx) and 4 (Rx), which leads to
132ring sizes of 4 (Tx) and 16 (Rx). Increasing the number of ring entries
133needlessly uses extra space and reduces the chance that an upper layer will
134be able to reorder queued Tx packets based on priority. Decreasing the number
135of entries makes it more difficult to achieve back-to-back packet transmission
136and increases the chance that Rx ring will overflow. (Consider the worst case
137of receiving back-to-back minimum-sized packets.)
138
139The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
140statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
141avoid the administrative overhead. For the Rx side this avoids dynamically
142allocating full-sized buffers "just in case", at the expense of a
143memory-to-memory data copy for each packet received. For most systems this
144is a good tradeoff: the Rx buffer will always be in low memory, the copy
145is inexpensive, and it primes the cache for later packet processing. For Tx
146the buffers are only used when needed as low-memory bounce buffers.
147
148IIIB. 16M memory limitations.
149For the ISA bus master mode all structures used directly by the LANCE,
150the initialization block, Rx and Tx rings, and data buffers, must be
151accessible from the ISA bus, i.e. in the lower 16M of real memory.
152This is a problem for current Linux kernels on >16M machines. The network
153devices are initialized after memory initialization, and the kernel doles out
154memory from the top of memory downward. The current solution is to have a
155special network initialization routine that's called before memory
156initialization; this will eventually be generalized for all network devices.
157As mentioned before, low-memory "bounce-buffers" are used when needed.
158
159IIIC. Synchronization
160The driver runs as two independent, single-threaded flows of control. One
161is the send-packet routine, which enforces single-threaded use by the
162dev->tbusy flag. The other thread is the interrupt handler, which is single
163threaded by the hardware and other software.
164
165The send packet thread has partial control over the Tx ring and 'dev->tbusy'
166flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
167queue slot is empty, it clears the tbusy flag when finished otherwise it sets
168the 'lp->tx_full' flag.
169
170The interrupt handler has exclusive control over the Rx ring and records stats
171from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
172we can't avoid the interrupt overhead by having the Tx routine reap the Tx
173stats.) After reaping the stats, it marks the queue entry as empty by setting
174the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
175tx_full and tbusy flags.
176
177*/
178
179/* Set the number of Tx and Rx buffers, using Log_2(# buffers).
180 Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
181 That translates to 4 and 4 (16 == 2^^4).
182 This is a compile-time option for efficiency.
183 */
184#ifndef LANCE_LOG_TX_BUFFERS4
185#define LANCE_LOG_TX_BUFFERS4 4
186#define LANCE_LOG_RX_BUFFERS4 4
187#endif
188
189#define TX_RING_SIZE(1 << (4)) (1 << (LANCE_LOG_TX_BUFFERS4))
190#define TX_RING_MOD_MASK((1 << (4)) - 1) (TX_RING_SIZE(1 << (4)) - 1)
191#define TX_RING_LEN_BITS((4) << 29) ((LANCE_LOG_TX_BUFFERS4) << 29)
192
193#define RX_RING_SIZE(1 << (4)) (1 << (LANCE_LOG_RX_BUFFERS4))
194#define RX_RING_MOD_MASK((1 << (4)) - 1) (RX_RING_SIZE(1 << (4)) - 1)
195#define RX_RING_LEN_BITS((4) << 29) ((LANCE_LOG_RX_BUFFERS4) << 29)
196
197#define PKT_BUF_SZ1544 1544
198
199/* Offsets from base I/O address. */
200#define LANCE_DATA0x10 0x10
201#define LANCE_ADDR0x12 0x12
202#define LANCE_RESET0x14 0x14
203#define LANCE_BUS_IF0x16 0x16
204#define LANCE_TOTAL_SIZE0x18 0x18
205
206/* The LANCE Rx and Tx ring descriptors. */
207struct lance_rx_head {
208 s32 base;
209 s16 buf_length; /* This length is 2s complement (negative)! */
210 s16 msg_length; /* This length is "normal". */
211};
212
213struct lance_tx_head {
214 s32 base;
215 s16 length; /* Length is 2s complement (negative)! */
216 s16 misc;
217};
218
219/* The LANCE initialization block, described in databook. */
220struct lance_init_block {
221 u16 mode; /* Pre-set mode (reg. 15) */
222 u8 phys_addr[6]; /* Physical ethernet address */
223 u32 filter[2]; /* Multicast filter (unused). */
224 /* Receive and transmit ring base, along with extra bits. */
225 u32 rx_ring; /* Tx and Rx ring base pointers */
226 u32 tx_ring;
227};
228
229struct lance_private {
230 /* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
231 struct lance_rx_head rx_ring[RX_RING_SIZE(1 << (4))];
232 struct lance_tx_head tx_ring[TX_RING_SIZE(1 << (4))];
233 struct lance_init_block init_block;
234 const char *name;
235 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
236 struct sk_buff* tx_skbuff[TX_RING_SIZE(1 << (4))];
237 /* The addresses of receive-in-place skbuffs. */
238 struct sk_buff* rx_skbuff[RX_RING_SIZE(1 << (4))];
239 unsigned long rx_buffs; /* Address of Rx and Tx buffers. */
240 /* Tx low-memory "bounce buffer" address. */
241 char (*tx_bounce_buffs)[PKT_BUF_SZ1544];
242 int cur_rx, cur_tx; /* The next free ring entry */
243 int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
244 int dma;
245 struct enet_statistics stats;
246 unsigned char chip_version; /* See lance_chip_type. */
247 char tx_full;
248 unsigned long lock;
249};
250
251#define LANCE_MUST_PAD0x00000001 0x00000001
252#define LANCE_ENABLE_AUTOSELECT0x00000002 0x00000002
253#define LANCE_MUST_REINIT_RING0x00000004 0x00000004
254#define LANCE_MUST_UNRESET0x00000008 0x00000008
255#define LANCE_HAS_MISSED_FRAME0x00000010 0x00000010
256
257/* A mapping from the chip ID number to the part number and features.
258 These are from the datasheets -- in real life the '970 version
259 reportedly has the same ID as the '965. */
260static struct lance_chip_type {
261 int id_number;
262 const char *name;
263 int flags;
264} chip_table[] = {
265 {0x0000, "LANCE 7990", /* Ancient lance chip. */
266 LANCE_MUST_PAD0x00000001 + LANCE_MUST_UNRESET0x00000008},
267 {0x0003, "PCnet/ISA 79C960", /* 79C960 PCnet/ISA. */
268 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
269 LANCE_HAS_MISSED_FRAME0x00000010},
270 {0x2260, "PCnet/ISA+ 79C961", /* 79C961 PCnet/ISA+, Plug-n-Play. */
271 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
272 LANCE_HAS_MISSED_FRAME0x00000010},
273 {0x2420, "PCnet/PCI 79C970", /* 79C970 or 79C974 PCnet-SCSI, PCI. */
274 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
275 LANCE_HAS_MISSED_FRAME0x00000010},
276 /* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
277 it the PCnet32. */
278 {0x2430, "PCnet32", /* 79C965 PCnet for VL bus. */
279 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
280 LANCE_HAS_MISSED_FRAME0x00000010},
281 {0x2621, "PCnet/PCI-II 79C970A", /* 79C970A PCInetPCI II. */
282 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
283 LANCE_HAS_MISSED_FRAME0x00000010},
284 {0x0, "PCnet (unknown)",
285 LANCE_ENABLE_AUTOSELECT0x00000002 + LANCE_MUST_REINIT_RING0x00000004 +
286 LANCE_HAS_MISSED_FRAME0x00000010},
287};
288
289enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
290
291/* Non-zero only if the current card is a PCI with BIOS-set IRQ. */
292static unsigned char pci_irq_line = 0;
293
294/* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
295 Assume yes until we know the memory size. */
296static unsigned char lance_need_isa_bounce_buffers = 1;
297
298static int lance_open(struct devicelinux_device *dev);
299static int lance_open_fail(struct devicelinux_device *dev);
300static void lance_init_ring(struct devicelinux_device *dev, int mode);
301static int lance_start_xmit(struct sk_buff *skb, struct devicelinux_device *dev);
302static int lance_rx(struct devicelinux_device *dev);
303static void lance_interrupt(int irq, void *dev_id, struct pt_regs *regs);
304static int lance_close(struct devicelinux_device *dev);
305static struct enet_statistics *lance_get_stats(struct devicelinux_device *dev);
306static void set_multicast_list(struct devicelinux_device *dev);
307
308
309
310#ifdef MODULE
311#define MAX_CARDS 8 /* Max number of interfaces (cards) per module */
312#define IF_NAMELEN 8 /* # of chars for storing dev->name */
313
314static int io[MAX_CARDS] = { 0, };
315static int dma[MAX_CARDS] = { 0, };
316static int irq[MAX_CARDS] = { 0, };
317
318static char ifnames[MAX_CARDS][IF_NAMELEN] = { {0, }, };
319static struct devicelinux_device dev_lance[MAX_CARDS] =
320{{
321 0, /* device name is inserted by linux/drivers/net/net_init.c */
322 0, 0, 0, 0,
323 0, 0,
324 0, 0, 0, NULL((void *) 0), NULL((void *) 0)}};
325
326int init_module(void)
327{
328 int this_dev, found = 0;
329
330 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
331 struct devicelinux_device *dev = &dev_lance[this_dev];
332 dev->name = ifnames[this_dev];
333 dev->irq = irq[this_dev];
334 dev->base_addr = io[this_dev];
335 dev->dma = dma[this_dev];
336 dev->init = lance_probe;
337 if (io[this_dev] == 0) {
338 if (this_dev != 0) break; /* only complain once */
339 printk(KERN_NOTICE"<5>" "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340 return -EPERM1;
341 }
342 if (register_netdev(dev) != 0) {
343 printk(KERN_WARNING"<4>" "lance.c: No PCnet/LANCE card found (i/o = 0x%x).\n", io[this_dev]);
344 if (found != 0) return 0; /* Got at least one. */
345 return -ENXIO6;
346 }
347 found++;
348 }
349
350 return 0;
351}
352
353void
354cleanup_module(void)
355{
356 int this_dev;
357
358 for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
359 struct devicelinux_device *dev = &dev_lance[this_dev];
360 if (dev->priv != NULL((void *) 0)) {
361 kfreelinux_kfree(dev->priv);
362 dev->priv = NULL((void *) 0);
363 free_dma(dev->dma);
364 release_region(dev->base_addr, LANCE_TOTAL_SIZE0x18);
365 unregister_netdev(dev);
366 }
367 }
368}
369#endif /* MODULE */
370
371/* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
372 board probes now that kmalloc() can allocate ISA DMA-able regions.
373 This also allows the LANCE driver to be used as a module.
374 */
375int lance_probe(struct devicelinux_device *dev)
376{
377 int *port, result;
378
379 if (high_memory <= 16*1024*1024)
380 lance_need_isa_bounce_buffers = 0;
381
382#if defined(CONFIG_PCI1)
383 if (pcibios_present()) {
384 int pci_index;
385 if (lance_debug > 1)
386 printk("lance.c: PCI bios is present, checking for devices...\n");
387 for (pci_index = 0; pci_index < 8; pci_index++) {
388 unsigned char pci_bus, pci_device_fn;
389 unsigned int pci_ioaddr;
390 unsigned short pci_command;
391
392 if (pcibios_find_device (PCI_VENDOR_ID_AMD0x1022,
393 PCI_DEVICE_ID_AMD_LANCE0x2000, pci_index,
394 &pci_bus, &pci_device_fn) != 0)
395 break;
396 pcibios_read_config_byte(pci_bus, pci_device_fn,
397 PCI_INTERRUPT_LINE0x3c, &pci_irq_line);
398 pcibios_read_config_dword(pci_bus, pci_device_fn,
399 PCI_BASE_ADDRESS_00x10, &pci_ioaddr);
400 /* Remove I/O space marker in bit 0. */
401 pci_ioaddr &= ~3;
402 /* PCI Spec 2.1 states that it is either the driver or PCI card's
403 * responsibility to set the PCI Master Enable Bit if needed.
404 * (From Mark Stockton <marks@schooner.sys.hou.compaq.com>)
405 */
406 pcibios_read_config_word(pci_bus, pci_device_fn,
407 PCI_COMMAND0x04, &pci_command);
408 if ( ! (pci_command & PCI_COMMAND_MASTER0x4)) {
409 printk("PCI Master Bit has not been set. Setting...\n");
410 pci_command |= PCI_COMMAND_MASTER0x4;
411 pcibios_write_config_word(pci_bus, pci_device_fn,
412 PCI_COMMAND0x04, pci_command);
413 }
414 printk("Found PCnet/PCI at %#x, irq %d.\n",
415 pci_ioaddr, pci_irq_line);
416 result = lance_probe1(dev, pci_ioaddr, pci_irq_line, 0);
417 pci_irq_line = 0;
418 if (!result) return 0;
419 }
420 }
421#endif /* defined(CONFIG_PCI) */
422
423 for (port = lance_portlist; *port; port++) {
424 int ioaddr = *port;
425
426 if ( check_region(ioaddr, LANCE_TOTAL_SIZE0x18) == 0) {
427 /* Detect "normal" 0x57 0x57 and the NI6510EB 0x52 0x44
428 signatures w/ minimal I/O reads */
429 char offset15, offset14 = inb(ioaddr + 14)((__builtin_constant_p((ioaddr + 14)) && (ioaddr + 14
) < 256) ? __inbc(ioaddr + 14) : __inb(ioaddr + 14))
;
430
431 if ((offset14 == 0x52 || offset14 == 0x57) &&
432 ((offset15 = inb(ioaddr + 15)((__builtin_constant_p((ioaddr + 15)) && (ioaddr + 15
) < 256) ? __inbc(ioaddr + 15) : __inb(ioaddr + 15))
) == 0x57 || offset15 == 0x44)) {
433 result = lance_probe1(dev, ioaddr, 0, 0);
434 if ( !result ) return 0;
435 }
436 }
437 }
438 return -ENODEV19;
439}
440
441int lance_probe1(struct devicelinux_device *dev, int ioaddr, int irq, int options)
442{
443 struct lance_private *lp;
444 short dma_channels; /* Mark spuriously-busy DMA channels */
445 int i, reset_val, lance_version;
446 const char *chipname;
447 /* Flags for specific chips or boards. */
448 unsigned char hpJ2405A = 0; /* HP ISA adaptor */
449 int hp_builtin = 0; /* HP on-board ethernet. */
450 static int did_version = 0; /* Already printed version info. */
451
452 /* First we look for special cases.
453 Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
454 There are two HP versions, check the BIOS for the configuration port.
455 This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
456 */
457 if (readw(0x000f0102)(*(volatile unsigned short *) (0x000f0102)) == 0x5048) {
458 static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
459 int hp_port = (readl(0x000f00f1)(*(volatile unsigned int *) (0x000f00f1)) & 1) ? 0x499 : 0x99;
460 /* We can have boards other than the built-in! Verify this is on-board. */
461 if ((inb(hp_port)((__builtin_constant_p((hp_port)) && (hp_port) < 256
) ? __inbc(hp_port) : __inb(hp_port))
& 0xc0) == 0x80
462 && ioaddr_table[inb(hp_port)((__builtin_constant_p((hp_port)) && (hp_port) < 256
) ? __inbc(hp_port) : __inb(hp_port))
& 3] == ioaddr)
463 hp_builtin = hp_port;
464 }
465 /* We also recognize the HP Vectra on-board here, but check below. */
466 hpJ2405A = (inb(ioaddr)((__builtin_constant_p((ioaddr)) && (ioaddr) < 256
) ? __inbc(ioaddr) : __inb(ioaddr))
== 0x08 && inb(ioaddr+1)((__builtin_constant_p((ioaddr+1)) && (ioaddr+1) <
256) ? __inbc(ioaddr+1) : __inb(ioaddr+1))
== 0x00
467 && inb(ioaddr+2)((__builtin_constant_p((ioaddr+2)) && (ioaddr+2) <
256) ? __inbc(ioaddr+2) : __inb(ioaddr+2))
== 0x09);
468
469 /* Reset the LANCE. */
470 reset_val = inw(ioaddr+LANCE_RESET)((__builtin_constant_p((ioaddr+0x14)) && (ioaddr+0x14
) < 256) ? __inwc(ioaddr+0x14) : __inw(ioaddr+0x14))
; /* Reset the LANCE */
471
472 /* The Un-Reset needed is only needed for the real NE2100, and will
473 confuse the HP board. */
474 if (!hpJ2405A)
475 outw(reset_val, ioaddr+LANCE_RESET)((__builtin_constant_p((ioaddr+0x14)) && (ioaddr+0x14
) < 256) ? __outwc((reset_val),(ioaddr+0x14)) : __outw((reset_val
),(ioaddr+0x14)))
;
476
477 outw(0x0000, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0000),(ioaddr+0x12)) : __outw((0x0000
),(ioaddr+0x12)))
; /* Switch to window 0 */
478 if (inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
!= 0x0004)
479 return -ENODEV19;
480
481 /* Get the version of the chip. */
482 outw(88, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((88),(ioaddr+0x12)) : __outw((88),(ioaddr
+0x12)))
;
483 if (inw(ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __inwc(ioaddr+0x12) : __inw(ioaddr+0x12))
!= 88) {
484 lance_version = 0;
485 } else { /* Good, it's a newer chip. */
486 int chip_version = inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
;
487 outw(89, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((89),(ioaddr+0x12)) : __outw((89),(ioaddr
+0x12)))
;
488 chip_version |= inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
<< 16;
489 if (lance_debug > 2)
490 printk(" LANCE chip version is %#x.\n", chip_version);
491 if ((chip_version & 0xfff) != 0x003)
492 return -ENODEV19;
493 chip_version = (chip_version >> 12) & 0xffff;
494 for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
495 if (chip_table[lance_version].id_number == chip_version)
496 break;
497 }
498 }
499
500 /* We can't use init_etherdev() to allocate dev->priv because it must
501 a ISA DMA-able region. */
502 dev = init_etherdev(dev, 0);
503 dev->open = lance_open_fail;
504 chipname = chip_table[lance_version].name;
505 printk("%s: %s at %#3x,", dev->name, chipname, ioaddr);
506
507 /* There is a 16 byte station address PROM at the base address.
508 The first six bytes are the station address. */
509 for (i = 0; i < 6; i++)
510 printk(" %2.2x", dev->dev_addr[i] = inb(ioaddr + i)((__builtin_constant_p((ioaddr + i)) && (ioaddr + i) <
256) ? __inbc(ioaddr + i) : __inb(ioaddr + i))
);
511
512 dev->base_addr = ioaddr;
513 request_region(ioaddr, LANCE_TOTAL_SIZE0x18, chip_table[lance_version].name);
514
515 /* Make certain the data structures used by the LANCE are aligned and DMAble. */
516
517 lp = (struct lance_private *)(((unsigned long)kmalloclinux_kmalloc(sizeof(*lp)+7,
518 GFP_DMA0x80 | GFP_KERNEL0x03)+7) & ~7);
519 if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
520 memset(lp, 0, sizeof(*lp))(__builtin_constant_p(0) ? (__builtin_constant_p((sizeof(*lp)
)) ? __constant_c_and_count_memset(((lp)),((0x01010101UL*(unsigned
char)(0))),((sizeof(*lp)))) : __constant_c_memset(((lp)),((0x01010101UL
*(unsigned char)(0))),((sizeof(*lp))))) : (__builtin_constant_p
((sizeof(*lp))) ? __memset_generic((((lp))),(((0))),(((sizeof
(*lp))))) : __memset_generic(((lp)),((0)),((sizeof(*lp))))))
;
521 dev->priv = lp;
522 lp->name = chipname;
523 lp->rx_buffs = (unsigned long)kmalloclinux_kmalloc(PKT_BUF_SZ1544*RX_RING_SIZE(1 << (4)),
524 GFP_DMA0x80 | GFP_KERNEL0x03);
525 if (lance_need_isa_bounce_buffers)
526 lp->tx_bounce_buffs = kmalloclinux_kmalloc(PKT_BUF_SZ1544*TX_RING_SIZE(1 << (4)),
527 GFP_DMA0x80 | GFP_KERNEL0x03);
528 else
529 lp->tx_bounce_buffs = NULL((void *) 0);
530
531 lp->chip_version = lance_version;
532
533 lp->init_block.mode = 0x0003; /* Disable Rx and Tx. */
534 for (i = 0; i < 6; i++)
535 lp->init_block.phys_addr[i] = dev->dev_addr[i];
536 lp->init_block.filter[0] = 0x00000000;
537 lp->init_block.filter[1] = 0x00000000;
538 lp->init_block.rx_ring = ((u32)virt_to_busvirt_to_phys(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS((4) << 29);
539 lp->init_block.tx_ring = ((u32)virt_to_busvirt_to_phys(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS((4) << 29);
540
541 outw(0x0001, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0001),(ioaddr+0x12)) : __outw((0x0001
),(ioaddr+0x12)))
;
542 inw(ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __inwc(ioaddr+0x12) : __inw(ioaddr+0x12))
;
543 outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc(((short) (u32) virt_to_phys(&lp->
init_block)),(ioaddr+0x10)) : __outw(((short) (u32) virt_to_phys
(&lp->init_block)),(ioaddr+0x10)))
;
544 outw(0x0002, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0002),(ioaddr+0x12)) : __outw((0x0002
),(ioaddr+0x12)))
;
545 inw(ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __inwc(ioaddr+0x12) : __inw(ioaddr+0x12))
;
546 outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((((u32)virt_to_phys(&lp->init_block
)) >> 16),(ioaddr+0x10)) : __outw((((u32)virt_to_phys(&
lp->init_block)) >> 16),(ioaddr+0x10)))
;
547 outw(0x0000, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0000),(ioaddr+0x12)) : __outw((0x0000
),(ioaddr+0x12)))
;
548 inw(ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __inwc(ioaddr+0x12) : __inw(ioaddr+0x12))
;
549
550 if (irq) { /* Set iff PCI card. */
551 dev->dma = 4; /* Native bus-master, no DMA channel needed. */
552 dev->irq = irq;
553 } else if (hp_builtin) {
554 static const char dma_tbl[4] = {3, 5, 6, 0};
555 static const char irq_tbl[4] = {3, 4, 5, 9};
556 unsigned char port_val = inb(hp_builtin)((__builtin_constant_p((hp_builtin)) && (hp_builtin) <
256) ? __inbc(hp_builtin) : __inb(hp_builtin))
;
557 dev->dma = dma_tbl[(port_val >> 4) & 3];
558 dev->irq = irq_tbl[(port_val >> 2) & 3];
559 printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
560 } else if (hpJ2405A) {
561 static const char dma_tbl[4] = {3, 5, 6, 7};
562 static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
563 short reset_val = inw(ioaddr+LANCE_RESET)((__builtin_constant_p((ioaddr+0x14)) && (ioaddr+0x14
) < 256) ? __inwc(ioaddr+0x14) : __inw(ioaddr+0x14))
;
564 dev->dma = dma_tbl[(reset_val >> 2) & 3];
565 dev->irq = irq_tbl[(reset_val >> 4) & 7];
566 printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
567 } else if (lance_version == PCNET_ISAP) { /* The plug-n-play version. */
568 short bus_info;
569 outw(8, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((8),(ioaddr+0x12)) : __outw((8),(ioaddr
+0x12)))
;
570 bus_info = inw(ioaddr+LANCE_BUS_IF)((__builtin_constant_p((ioaddr+0x16)) && (ioaddr+0x16
) < 256) ? __inwc(ioaddr+0x16) : __inw(ioaddr+0x16))
;
571 dev->dma = bus_info & 0x07;
572 dev->irq = (bus_info >> 4) & 0x0F;
573 } else {
574 /* The DMA channel may be passed in PARAM1. */
575 if (dev->mem_start & 0x07)
576 dev->dma = dev->mem_start & 0x07;
577 }
578
579 if (dev->dma == 0) {
580 /* Read the DMA channel status register, so that we can avoid
581 stuck DMA channels in the DMA detection below. */
582 dma_channels = ((inb(DMA1_STAT_REG)((__builtin_constant_p((0x08)) && (0x08) < 256) ? __inbc
(0x08) : __inb(0x08))
>> 4) & 0x0f) |
583 (inb(DMA2_STAT_REG)((__builtin_constant_p((0xD0)) && (0xD0) < 256) ? __inbc
(0xD0) : __inb(0xD0))
& 0xf0);
584 }
585 if (dev->irq >= 2)
586 printk(" assigned IRQ %d", dev->irq);
587 else if (lance_version != 0) { /* 7990 boards need DMA detection first. */
588 /* To auto-IRQ we enable the initialization-done and DMA error
589 interrupts. For ISA boards we get a DMA error, but VLB and PCI
590 boards will work. */
591 autoirq_setup(0);
592
593 /* Trigger an initialization just for the interrupt. */
594 outw(0x0041, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0041),(ioaddr+0x10)) : __outw((0x0041
),(ioaddr+0x10)))
;
595
596 dev->irq = autoirq_report(2);
597 if (dev->irq)
598 printk(", probed IRQ %d", dev->irq);
599 else {
600 printk(", failed to detect IRQ line.\n");
601 return -ENODEV19;
602 }
603
604 /* Check for the initialization done bit, 0x0100, which means
605 that we don't need a DMA channel. */
606 if (inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
& 0x0100)
607 dev->dma = 4;
608 }
609
610 if (dev->dma == 4) {
611 printk(", no DMA needed.\n");
612 } else if (dev->dma) {
613 if (request_dma(dev->dma, chipname)) {
614 printk("DMA %d allocation failed.\n", dev->dma);
615 return -ENODEV19;
616 } else
617 printk(", assigned DMA %d.\n", dev->dma);
618 } else { /* OK, we have to auto-DMA. */
619 for (i = 0; i < 4; i++) {
620 static const char dmas[] = { 5, 6, 7, 3 };
621 int dma = dmas[i];
622 int boguscnt;
623
624 /* Don't enable a permanently busy DMA channel, or the machine
625 will hang. */
626 if (test_bit(dma, &dma_channels))
627 continue;
628 outw(0x7f04, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x7f04),(ioaddr+0x10)) : __outw((0x7f04
),(ioaddr+0x10)))
; /* Clear the memory error bits. */
629 if (request_dma(dma, chipname))
630 continue;
631 set_dma_mode(dma, DMA_MODE_CASCADE0xC0);
632 enable_dma(dma);
633
634 /* Trigger an initialization. */
635 outw(0x0001, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0001),(ioaddr+0x10)) : __outw((0x0001
),(ioaddr+0x10)))
;
636 for (boguscnt = 100; boguscnt > 0; --boguscnt)
637 if (inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
& 0x0900)
638 break;
639 if (inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
& 0x0100) {
640 dev->dma = dma;
641 printk(", DMA %d.\n", dev->dma);
642 break;
643 } else {
644 disable_dma(dma);
645 free_dma(dma);
646 }
647 }
648 if (i == 4) { /* Failure: bail. */
649 printk("DMA detection failed.\n");
650 return -ENODEV19;
651 }
652 }
653
654 if (lance_version == 0 && dev->irq == 0) {
655 /* We may auto-IRQ now that we have a DMA channel. */
656 /* Trigger an initialization just for the interrupt. */
657 autoirq_setup(0);
658 outw(0x0041, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0041),(ioaddr+0x10)) : __outw((0x0041
),(ioaddr+0x10)))
;
659
660 dev->irq = autoirq_report(4);
661 if (dev->irq == 0) {
662 printk(" Failed to detect the 7990 IRQ line.\n");
663 return -ENODEV19;
664 }
665 printk(" Auto-IRQ detected IRQ%d.\n", dev->irq);
666 }
667
668 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT0x00000002) {
669 /* Turn on auto-select of media (10baseT or BNC) so that the user
670 can watch the LEDs even if the board isn't opened. */
671 outw(0x0002, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0002),(ioaddr+0x12)) : __outw((0x0002
),(ioaddr+0x12)))
;
672 /* Don't touch 10base2 power bit. */
673 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF)((__builtin_constant_p((ioaddr+0x16)) && (ioaddr+0x16
) < 256) ? __outwc((((__builtin_constant_p((ioaddr+0x16)) &&
(ioaddr+0x16) < 256) ? __inwc(ioaddr+0x16) : __inw(ioaddr
+0x16)) | 0x0002),(ioaddr+0x16)) : __outw((((__builtin_constant_p
((ioaddr+0x16)) && (ioaddr+0x16) < 256) ? __inwc(ioaddr
+0x16) : __inw(ioaddr+0x16)) | 0x0002),(ioaddr+0x16)))
;
674 }
675
676 if (lance_debug > 0 && did_version++ == 0)
677 printk(version);
678
679 /* The LANCE-specific entries in the device structure. */
680 dev->open = lance_open;
681 dev->hard_start_xmit = lance_start_xmit;
682 dev->stop = lance_close;
683 dev->get_stats = lance_get_stats;
684 dev->set_multicast_list = set_multicast_list;
685
686 return 0;
687}
688
689static int
690lance_open_fail(struct devicelinux_device *dev)
691{
692 return -ENODEV19;
693}
694
695
696
697static int
698lance_open(struct devicelinux_device *dev)
699{
700 struct lance_private *lp = (struct lance_private *)dev->priv;
701 int ioaddr = dev->base_addr;
702 int i;
703
704 if (dev->irq == 0 ||
705 request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
706 return -EAGAIN11;
707 }
708
709 MOD_INC_USE_COUNT;
710
711 /* We used to allocate DMA here, but that was silly.
712 DMA lines can't be shared! We now permanently allocate them. */
713
714 /* Reset the LANCE */
715 inw(ioaddr+LANCE_RESET)((__builtin_constant_p((ioaddr+0x14)) && (ioaddr+0x14
) < 256) ? __inwc(ioaddr+0x14) : __inw(ioaddr+0x14))
;
716
717 /* The DMA controller is used as a no-operation slave, "cascade mode". */
718 if (dev->dma != 4) {
719 enable_dma(dev->dma);
720 set_dma_mode(dev->dma, DMA_MODE_CASCADE0xC0);
721 }
722
723 /* Un-Reset the LANCE, needed only for the NE2100. */
724 if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET0x00000008)
725 outw(0, ioaddr+LANCE_RESET)((__builtin_constant_p((ioaddr+0x14)) && (ioaddr+0x14
) < 256) ? __outwc((0),(ioaddr+0x14)) : __outw((0),(ioaddr
+0x14)))
;
726
727 if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT0x00000002) {
728 /* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
729 outw(0x0002, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0002),(ioaddr+0x12)) : __outw((0x0002
),(ioaddr+0x12)))
;
730 /* Only touch autoselect bit. */
731 outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF)((__builtin_constant_p((ioaddr+0x16)) && (ioaddr+0x16
) < 256) ? __outwc((((__builtin_constant_p((ioaddr+0x16)) &&
(ioaddr+0x16) < 256) ? __inwc(ioaddr+0x16) : __inw(ioaddr
+0x16)) | 0x0002),(ioaddr+0x16)) : __outw((((__builtin_constant_p
((ioaddr+0x16)) && (ioaddr+0x16) < 256) ? __inwc(ioaddr
+0x16) : __inw(ioaddr+0x16)) | 0x0002),(ioaddr+0x16)))
;
732 }
733
734 if (lance_debug > 1)
735 printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
736 dev->name, dev->irq, dev->dma,
737 (u32) virt_to_busvirt_to_phys(lp->tx_ring),
738 (u32) virt_to_busvirt_to_phys(lp->rx_ring),
739 (u32) virt_to_busvirt_to_phys(&lp->init_block));
740
741 lance_init_ring(dev, GFP_KERNEL0x03);
742 /* Re-initialize the LANCE, and start it when done. */
743 outw(0x0001, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0001),(ioaddr+0x12)) : __outw((0x0001
),(ioaddr+0x12)))
;
744 outw((short) (u32) virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc(((short) (u32) virt_to_phys(&lp->
init_block)),(ioaddr+0x10)) : __outw(((short) (u32) virt_to_phys
(&lp->init_block)),(ioaddr+0x10)))
;
745 outw(0x0002, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0002),(ioaddr+0x12)) : __outw((0x0002
),(ioaddr+0x12)))
;
746 outw(((u32)virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((((u32)virt_to_phys(&lp->init_block
)) >> 16),(ioaddr+0x10)) : __outw((((u32)virt_to_phys(&
lp->init_block)) >> 16),(ioaddr+0x10)))
;
747
748 outw(0x0004, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0004),(ioaddr+0x12)) : __outw((0x0004
),(ioaddr+0x12)))
;
749 outw(0x0915, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0915),(ioaddr+0x10)) : __outw((0x0915
),(ioaddr+0x10)))
;
750
751 outw(0x0000, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0000),(ioaddr+0x12)) : __outw((0x0000
),(ioaddr+0x12)))
;
752 outw(0x0001, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0001),(ioaddr+0x10)) : __outw((0x0001
),(ioaddr+0x10)))
;
753
754 dev->tbusy = 0;
755 dev->interrupt = 0;
756 dev->start = 1;
757 i = 0;
758 while (i++ < 100)
759 if (inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
& 0x0100)
760 break;
761 /*
762 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
763 * reports that doing so triggers a bug in the '974.
764 */
765 outw(0x0042, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0042),(ioaddr+0x10)) : __outw((0x0042
),(ioaddr+0x10)))
;
766
767 if (lance_debug > 2)
768 printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
769 dev->name, i, (u32) virt_to_busvirt_to_phys(&lp->init_block), inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
);
770
771 return 0; /* Always succeed */
772}
773
774/* The LANCE has been halted for one reason or another (busmaster memory
775 arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
776 etc.). Modern LANCE variants always reload their ring-buffer
777 configuration when restarted, so we must reinitialize our ring
778 context before restarting. As part of this reinitialization,
779 find all packets still on the Tx ring and pretend that they had been
780 sent (in effect, drop the packets on the floor) - the higher-level
781 protocols will time out and retransmit. It'd be better to shuffle
782 these skbs to a temp list and then actually re-Tx them after
783 restarting the chip, but I'm too lazy to do so right now. dplatt@3do.com
784*/
785
786static void
787lance_purge_tx_ring(struct devicelinux_device *dev)
788{
789 struct lance_private *lp = (struct lance_private *)dev->priv;
790 int i;
791
792 for (i = 0; i < TX_RING_SIZE(1 << (4)); i++) {
793 if (lp->tx_skbuff[i]) {
794 dev_kfree_skb(lp->tx_skbuff[i],FREE_WRITE0);
795 lp->tx_skbuff[i] = NULL((void *) 0);
796 }
797 }
798}
799
800
801/* Initialize the LANCE Rx and Tx rings. */
802static void
803lance_init_ring(struct devicelinux_device *dev, int gfp)
804{
805 struct lance_private *lp = (struct lance_private *)dev->priv;
806 int i;
807
808 lp->lock = 0, lp->tx_full = 0;
809 lp->cur_rx = lp->cur_tx = 0;
810 lp->dirty_rx = lp->dirty_tx = 0;
811
812 for (i = 0; i < RX_RING_SIZE(1 << (4)); i++) {
813 struct sk_buff *skb;
814 void *rx_buff;
815
816 skb = alloc_skb(PKT_BUF_SZ1544, GFP_DMA0x80 | gfp);
817 lp->rx_skbuff[i] = skb;
818 if (skb) {
819 skb->dev = dev;
820 rx_buff = skb->tail;
821 } else
822 rx_buff = kmalloclinux_kmalloc(PKT_BUF_SZ1544, GFP_DMA0x80 | gfp);
823 if (rx_buff == NULL((void *) 0))
824 lp->rx_ring[i].base = 0;
825 else
826 lp->rx_ring[i].base = (u32)virt_to_busvirt_to_phys(rx_buff) | 0x80000000;
827 lp->rx_ring[i].buf_length = -PKT_BUF_SZ1544;
828 }
829 /* The Tx buffer address is filled in as needed, but we do need to clear
830 the upper ownership bit. */
831 for (i = 0; i < TX_RING_SIZE(1 << (4)); i++) {
832 lp->tx_skbuff[i] = 0;
833 lp->tx_ring[i].base = 0;
834 }
835
836 lp->init_block.mode = 0x0000;
837 for (i = 0; i < 6; i++)
838 lp->init_block.phys_addr[i] = dev->dev_addr[i];
839 lp->init_block.filter[0] = 0x00000000;
840 lp->init_block.filter[1] = 0x00000000;
841 lp->init_block.rx_ring = ((u32)virt_to_busvirt_to_phys(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS((4) << 29);
842 lp->init_block.tx_ring = ((u32)virt_to_busvirt_to_phys(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS((4) << 29);
843}
844
845static void
846lance_restart(struct devicelinux_device *dev, unsigned int csr0_bits, int must_reinit)
847{
848 struct lance_private *lp = (struct lance_private *)dev->priv;
849
850 if (must_reinit ||
851 (chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING0x00000004)) {
852 lance_purge_tx_ring(dev);
853 lance_init_ring(dev, GFP_ATOMIC0x01);
854 }
855 outw(0x0000, dev->base_addr + LANCE_ADDR)((__builtin_constant_p((dev->base_addr + 0x12)) &&
(dev->base_addr + 0x12) < 256) ? __outwc((0x0000),(dev
->base_addr + 0x12)) : __outw((0x0000),(dev->base_addr +
0x12)))
;
856 outw(csr0_bits, dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __outwc((csr0_bits),(
dev->base_addr + 0x10)) : __outw((csr0_bits),(dev->base_addr
+ 0x10)))
;
857}
858
859static int
860lance_start_xmit(struct sk_buff *skb, struct devicelinux_device *dev)
861{
862 struct lance_private *lp = (struct lance_private *)dev->priv;
863 int ioaddr = dev->base_addr;
864 int entry;
865 unsigned long flags;
866
867 /* Transmitter timeout, serious problems. */
868 if (dev->tbusy) {
869 int tickssofar = jiffies - dev->trans_start;
870 if (tickssofar < 20)
871 return 1;
872 outw(0, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0),(ioaddr+0x12)) : __outw((0),(ioaddr
+0x12)))
;
873 printk("%s: transmit timed out, status %4.4x, resetting.\n",
874 dev->name, inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
);
875 outw(0x0004, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0004),(ioaddr+0x10)) : __outw((0x0004
),(ioaddr+0x10)))
;
876 lp->stats.tx_errors++;
877#ifndef final_version
878 {
879 int i;
880 printk(" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
881 lp->dirty_tx, lp->cur_tx, lp->tx_full ? " (full)" : "",
882 lp->cur_rx);
883 for (i = 0 ; i < RX_RING_SIZE(1 << (4)); i++)
884 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
885 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
886 lp->rx_ring[i].msg_length);
887 for (i = 0 ; i < TX_RING_SIZE(1 << (4)); i++)
888 printk("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
889 lp->tx_ring[i].base, -lp->tx_ring[i].length,
890 lp->tx_ring[i].misc);
891 printk("\n");
892 }
893#endif
894 lance_restart(dev, 0x0043, 1);
895
896 dev->tbusy=0;
897 dev->trans_start = jiffies;
898
899 return 0;
900 }
901
902 if (lance_debug > 3) {
903 outw(0x0000, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0000),(ioaddr+0x12)) : __outw((0x0000
),(ioaddr+0x12)))
;
904 printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
905 inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
);
906 outw(0x0000, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0000),(ioaddr+0x10)) : __outw((0x0000
),(ioaddr+0x10)))
;
907 }
908
909 /* Block a timer-based transmit from overlapping. This could better be
910 done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
911 if (set_bit(0, (void*)&dev->tbusy) != 0) {
912 printk("%s: Transmitter access conflict.\n", dev->name);
913 return 1;
914 }
915
916 if (set_bit(0, (void*)&lp->lock) != 0) {
917 if (lance_debug > 0)
918 printk("%s: tx queue lock!.\n", dev->name);
919 /* don't clear dev->tbusy flag. */
920 return 1;
921 }
922
923 /* Fill in a Tx ring entry */
924
925 /* Mask to ring buffer boundary. */
926 entry = lp->cur_tx & TX_RING_MOD_MASK((1 << (4)) - 1);
927
928 /* Caution: the write order is important here, set the base address
929 with the "ownership" bits last. */
930
931 /* The old LANCE chips doesn't automatically pad buffers to min. size. */
932 if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD0x00000001) {
933 lp->tx_ring[entry].length =
934 -(ETH_ZLEN60 < skb->len ? skb->len : ETH_ZLEN60);
935 } else
936 lp->tx_ring[entry].length = -skb->len;
937
938 lp->tx_ring[entry].misc = 0x0000;
939
940 /* If any part of this buffer is >16M we must copy it to a low-memory
941 buffer. */
942 if ((u32)virt_to_busvirt_to_phys(skb->data) + skb->len > 0x01000000) {
943 if (lance_debug > 5)
944 printk("%s: bouncing a high-memory packet (%#x).\n",
945 dev->name, (u32)virt_to_busvirt_to_phys(skb->data));
946 memcpy(&lp->tx_bounce_buffs[entry], skb->data, skb->len)(__builtin_constant_p(skb->len) ? __constant_memcpy((&
lp->tx_bounce_buffs[entry]),(skb->data),(skb->len)) :
__memcpy((&lp->tx_bounce_buffs[entry]),(skb->data)
,(skb->len)))
;
947 lp->tx_ring[entry].base =
948 ((u32)virt_to_busvirt_to_phys((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
949 dev_kfree_skb (skb, FREE_WRITE0);
950 } else {
951 lp->tx_skbuff[entry] = skb;
952 lp->tx_ring[entry].base = ((u32)virt_to_busvirt_to_phys(skb->data) & 0xffffff) | 0x83000000;
953 }
954 lp->cur_tx++;
955
956 /* Trigger an immediate send poll. */
957 outw(0x0000, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0x0000),(ioaddr+0x12)) : __outw((0x0000
),(ioaddr+0x12)))
;
958 outw(0x0048, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0048),(ioaddr+0x10)) : __outw((0x0048
),(ioaddr+0x10)))
;
959
960 dev->trans_start = jiffies;
961
962 save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory"
)
;
963 cli()__asm__ __volatile__ ("cli": : :"memory");
964 lp->lock = 0;
965 if (lp->tx_ring[(entry+1) & TX_RING_MOD_MASK((1 << (4)) - 1)].base == 0)
966 dev->tbusy=0;
967 else
968 lp->tx_full = 1;
969 restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory");
970
971 return 0;
972}
973
974/* The LANCE interrupt handler. */
975static void
976lance_interrupt(int irq, void *dev_id, struct pt_regs * regs)
977{
978 struct devicelinux_device *dev = (struct devicelinux_device *)dev_id;
979 struct lance_private *lp;
980 int csr0, ioaddr, boguscnt=10;
981 int must_restart;
982
983 if (dev == NULL((void *) 0)) {
984 printk ("lance_interrupt(): irq %d for unknown device.\n", irq);
985 return;
986 }
987
988 ioaddr = dev->base_addr;
989 lp = (struct lance_private *)dev->priv;
990 if (dev->interrupt)
991 printk("%s: Re-entering the interrupt handler.\n", dev->name);
992
993 dev->interrupt = 1;
994
995 outw(0x00, dev->base_addr + LANCE_ADDR)((__builtin_constant_p((dev->base_addr + 0x12)) &&
(dev->base_addr + 0x12) < 256) ? __outwc((0x00),(dev->
base_addr + 0x12)) : __outw((0x00),(dev->base_addr + 0x12)
))
;
996 while ((csr0 = inw(dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __inwc(dev->base_addr
+ 0x10) : __inw(dev->base_addr + 0x10))
) & 0x8600
997 && --boguscnt >= 0) {
998 /* Acknowledge all of the current interrupt sources ASAP. */
999 outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __outwc((csr0 & ~
0x004f),(dev->base_addr + 0x10)) : __outw((csr0 & ~0x004f
),(dev->base_addr + 0x10)))
;
1000
1001 must_restart = 0;
1002
1003 if (lance_debug > 5)
1004 printk("%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
1005 dev->name, csr0, inw(dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __inwc(dev->base_addr
+ 0x10) : __inw(dev->base_addr + 0x10))
);
1006
1007 if (csr0 & 0x0400) /* Rx interrupt */
1008 lance_rx(dev);
1009
1010 if (csr0 & 0x0200) { /* Tx-done interrupt */
1011 int dirty_tx = lp->dirty_tx;
1012
1013 while (dirty_tx < lp->cur_tx) {
1014 int entry = dirty_tx & TX_RING_MOD_MASK((1 << (4)) - 1);
1015 int status = lp->tx_ring[entry].base;
1016
1017 if (status < 0)
1018 break; /* It still hasn't been Txed */
1019
1020 lp->tx_ring[entry].base = 0;
1021
1022 if (status & 0x40000000) {
1023 /* There was an major error, log it. */
1024 int err_status = lp->tx_ring[entry].misc;
1025 lp->stats.tx_errors++;
1026 if (err_status & 0x0400) lp->stats.tx_aborted_errors++;
1027 if (err_status & 0x0800) lp->stats.tx_carrier_errors++;
1028 if (err_status & 0x1000) lp->stats.tx_window_errors++;
1029 if (err_status & 0x4000) {
1030 /* Ackk! On FIFO errors the Tx unit is turned off! */
1031 lp->stats.tx_fifo_errors++;
1032 /* Remove this verbosity later! */
1033 printk("%s: Tx FIFO error! Status %4.4x.\n",
1034 dev->name, csr0);
1035 /* Restart the chip. */
1036 must_restart = 1;
1037 }
1038 } else {
1039 if (status & 0x18000000)
1040 lp->stats.collisions++;
1041 lp->stats.tx_packets++;
1042 }
1043
1044 /* We must free the original skb if it's not a data-only copy
1045 in the bounce buffer. */
1046 if (lp->tx_skbuff[entry]) {
1047 dev_kfree_skb(lp->tx_skbuff[entry],FREE_WRITE0);
1048 lp->tx_skbuff[entry] = 0;
1049 }
1050 dirty_tx++;
1051 }
1052
1053#ifndef final_version
1054 if (lp->cur_tx - dirty_tx >= TX_RING_SIZE(1 << (4))) {
1055 printk("out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1056 dirty_tx, lp->cur_tx, lp->tx_full);
1057 dirty_tx += TX_RING_SIZE(1 << (4));
1058 }
1059#endif
1060
1061 if (lp->tx_full && dev->tbusy
1062 && dirty_tx > lp->cur_tx - TX_RING_SIZE(1 << (4)) + 2) {
1063 /* The ring is no longer full, clear tbusy. */
1064 lp->tx_full = 0;
1065 dev->tbusy = 0;
1066 mark_bh(NET_BH);
1067 }
1068
1069 lp->dirty_tx = dirty_tx;
1070 }
1071
1072 /* Log misc errors. */
1073 if (csr0 & 0x4000) lp->stats.tx_errors++; /* Tx babble. */
1074 if (csr0 & 0x1000) lp->stats.rx_errors++; /* Missed a Rx frame. */
1075 if (csr0 & 0x0800) {
1076 printk("%s: Bus master arbitration failure, status %4.4x.\n",
1077 dev->name, csr0);
1078 /* Restart the chip. */
1079 must_restart = 1;
1080 }
1081
1082 if (must_restart) {
1083 /* stop the chip to clear the error condition, then restart */
1084 outw(0x0000, dev->base_addr + LANCE_ADDR)((__builtin_constant_p((dev->base_addr + 0x12)) &&
(dev->base_addr + 0x12) < 256) ? __outwc((0x0000),(dev
->base_addr + 0x12)) : __outw((0x0000),(dev->base_addr +
0x12)))
;
1085 outw(0x0004, dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __outwc((0x0004),(dev
->base_addr + 0x10)) : __outw((0x0004),(dev->base_addr +
0x10)))
;
1086 lance_restart(dev, 0x0002, 0);
1087 }
1088 }
1089
1090 /* Clear any other interrupt, and set interrupt enable. */
1091 outw(0x0000, dev->base_addr + LANCE_ADDR)((__builtin_constant_p((dev->base_addr + 0x12)) &&
(dev->base_addr + 0x12) < 256) ? __outwc((0x0000),(dev
->base_addr + 0x12)) : __outw((0x0000),(dev->base_addr +
0x12)))
;
1092 outw(0x7940, dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __outwc((0x7940),(dev
->base_addr + 0x10)) : __outw((0x7940),(dev->base_addr +
0x10)))
;
1093
1094 if (lance_debug > 4)
1095 printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1096 dev->name, inw(ioaddr + LANCE_ADDR)((__builtin_constant_p((ioaddr + 0x12)) && (ioaddr + 0x12
) < 256) ? __inwc(ioaddr + 0x12) : __inw(ioaddr + 0x12))
,
1097 inw(dev->base_addr + LANCE_DATA)((__builtin_constant_p((dev->base_addr + 0x10)) &&
(dev->base_addr + 0x10) < 256) ? __inwc(dev->base_addr
+ 0x10) : __inw(dev->base_addr + 0x10))
);
1098
1099 dev->interrupt = 0;
1100 return;
1101}
1102
1103static int
1104lance_rx(struct devicelinux_device *dev)
1105{
1106 struct lance_private *lp = (struct lance_private *)dev->priv;
1107 int entry = lp->cur_rx & RX_RING_MOD_MASK((1 << (4)) - 1);
1108 int i;
1109
1110 /* If we own the next entry, it's a new packet. Send it up. */
1111 while (lp->rx_ring[entry].base >= 0) {
1112 int status = lp->rx_ring[entry].base >> 24;
1113
1114 if (status != 0x03) { /* There was an error. */
1115 /* There is a tricky error noted by John Murphy,
1116 <murf@perftech.com> to Russ Nelson: Even with full-sized
1117 buffers it's possible for a jabber packet to use two
1118 buffers, with only the last correctly noting the error. */
1119 if (status & 0x01) /* Only count a general error at the */
1120 lp->stats.rx_errors++; /* end of a packet.*/
1121 if (status & 0x20) lp->stats.rx_frame_errors++;
1122 if (status & 0x10) lp->stats.rx_over_errors++;
1123 if (status & 0x08) lp->stats.rx_crc_errors++;
1124 if (status & 0x04) lp->stats.rx_fifo_errors++;
1125 lp->rx_ring[entry].base &= 0x03ffffff;
1126 }
1127 else
1128 {
1129 /* Malloc up new buffer, compatible with net3. */
1130 short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1131 struct sk_buff *skb;
1132
1133 if(pkt_len<60)
1134 {
1135 printk("%s: Runt packet!\n",dev->name);
1136 lp->stats.rx_errors++;
1137 }
1138 else
1139 {
1140 skb = dev_alloc_skb(pkt_len+2);
1141 if (skb == NULL((void *) 0))
1142 {
1143 printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1144 for (i=0; i < RX_RING_SIZE(1 << (4)); i++)
1145 if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK((1 << (4)) - 1)].base < 0)
1146 break;
1147
1148 if (i > RX_RING_SIZE(1 << (4)) -2)
1149 {
1150 lp->stats.rx_dropped++;
1151 lp->rx_ring[entry].base |= 0x80000000;
1152 lp->cur_rx++;
1153 }
1154 break;
1155 }
1156 skb->dev = dev;
1157 skb_reserve(skb,2); /* 16 byte align */
1158 skb_put(skb,pkt_len); /* Make room */
1159 eth_copy_and_sum(skb,(__builtin_constant_p(pkt_len) ? __constant_memcpy(((skb)->
data),((unsigned char *)phys_to_virt((lp->rx_ring[entry].base
& 0x00ffffff))),(pkt_len)) : __memcpy(((skb)->data),(
(unsigned char *)phys_to_virt((lp->rx_ring[entry].base &
0x00ffffff))),(pkt_len)))
1160 (unsigned char *)bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),(__builtin_constant_p(pkt_len) ? __constant_memcpy(((skb)->
data),((unsigned char *)phys_to_virt((lp->rx_ring[entry].base
& 0x00ffffff))),(pkt_len)) : __memcpy(((skb)->data),(
(unsigned char *)phys_to_virt((lp->rx_ring[entry].base &
0x00ffffff))),(pkt_len)))
1161 pkt_len,0)(__builtin_constant_p(pkt_len) ? __constant_memcpy(((skb)->
data),((unsigned char *)phys_to_virt((lp->rx_ring[entry].base
& 0x00ffffff))),(pkt_len)) : __memcpy(((skb)->data),(
(unsigned char *)phys_to_virt((lp->rx_ring[entry].base &
0x00ffffff))),(pkt_len)))
;
1162 skb->protocol=eth_type_trans(skb,dev)((unsigned short)0);
1163 netif_rx(skb);
1164 lp->stats.rx_packets++;
1165 }
1166 }
1167 /* The docs say that the buffer length isn't touched, but Andrew Boyd
1168 of QNX reports that some revs of the 79C965 clear it. */
1169 lp->rx_ring[entry].buf_length = -PKT_BUF_SZ1544;
1170 lp->rx_ring[entry].base |= 0x80000000;
1171 entry = (++lp->cur_rx) & RX_RING_MOD_MASK((1 << (4)) - 1);
1172 }
1173
1174 /* We should check that at least two ring entries are free. If not,
1175 we should free one and mark stats->rx_dropped++. */
1176
1177 return 0;
1178}
1179
1180static int
1181lance_close(struct devicelinux_device *dev)
1182{
1183 int ioaddr = dev->base_addr;
1184 struct lance_private *lp = (struct lance_private *)dev->priv;
1185 int i;
1186
1187 dev->start = 0;
1188 dev->tbusy = 1;
1189
1190 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME0x00000010) {
1191 outw(112, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((112),(ioaddr+0x12)) : __outw((112),(ioaddr
+0x12)))
;
1192 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
;
1193 }
1194 outw(0, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0),(ioaddr+0x12)) : __outw((0),(ioaddr
+0x12)))
;
1195
1196 if (lance_debug > 1)
1197 printk("%s: Shutting down ethercard, status was %2.2x.\n",
1198 dev->name, inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
);
1199
1200 /* We stop the LANCE here -- it occasionally polls
1201 memory if we don't. */
1202 outw(0x0004, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0004),(ioaddr+0x10)) : __outw((0x0004
),(ioaddr+0x10)))
;
1203
1204 if (dev->dma != 4)
1205 disable_dma(dev->dma);
1206
1207 free_irq(dev->irq, dev);
1208
1209 /* Free all the skbuffs in the Rx and Tx queues. */
1210 for (i = 0; i < RX_RING_SIZE(1 << (4)); i++) {
1211 struct sk_buff *skb = lp->rx_skbuff[i];
1212 lp->rx_skbuff[i] = 0;
1213 lp->rx_ring[i].base = 0; /* Not owned by LANCE chip. */
1214 if (skb) {
1215 skb->free = 1;
1216 dev_kfree_skb(skb, FREE_WRITE0);
1217 }
1218 }
1219 for (i = 0; i < TX_RING_SIZE(1 << (4)); i++) {
1220 if (lp->tx_skbuff[i])
1221 dev_kfree_skb(lp->tx_skbuff[i], FREE_WRITE0);
1222 lp->tx_skbuff[i] = 0;
1223 }
1224
1225 MOD_DEC_USE_COUNT;
1226 return 0;
1227}
1228
1229static struct enet_statistics *
1230lance_get_stats(struct devicelinux_device *dev)
1231{
1232 struct lance_private *lp = (struct lance_private *)dev->priv;
1233 short ioaddr = dev->base_addr;
1234 short saved_addr;
1235 unsigned long flags;
1236
1237 if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME0x00000010) {
1238 save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory"
)
;
1239 cli()__asm__ __volatile__ ("cli": : :"memory");
1240 saved_addr = inw(ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __inwc(ioaddr+0x12) : __inw(ioaddr+0x12))
;
1241 outw(112, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((112),(ioaddr+0x12)) : __outw((112),(ioaddr
+0x12)))
;
1242 lp->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __inwc(ioaddr+0x10) : __inw(ioaddr+0x10))
;
1243 outw(saved_addr, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((saved_addr),(ioaddr+0x12)) : __outw((saved_addr
),(ioaddr+0x12)))
;
1244 restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory");
1245 }
1246
1247 return &lp->stats;
1248}
1249
1250/* Set or clear the multicast filter for this adaptor.
1251 */
1252
1253static void set_multicast_list(struct devicelinux_device *dev)
1254{
1255 short ioaddr = dev->base_addr;
1256
1257 outw(0, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((0),(ioaddr+0x12)) : __outw((0),(ioaddr
+0x12)))
;
1258 outw(0x0004, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0004),(ioaddr+0x10)) : __outw((0x0004
),(ioaddr+0x10)))
; /* Temporarily stop the lance. */
1259
1260 if (dev->flags&IFF_PROMISC0x100) {
1
Taking false branch
1261 /* Log any net taps. */
1262 printk("%s: Promiscuous mode enabled.\n", dev->name);
1263 outw(15, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((15),(ioaddr+0x12)) : __outw((15),(ioaddr
+0x12)))
;
1264 outw(0x8000, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x8000),(ioaddr+0x10)) : __outw((0x8000
),(ioaddr+0x10)))
; /* Set promiscuous mode */
1265 } else {
1266 short multicast_table[4];
1267 int i;
1268 int num_addrs=dev->mc_count;
1269 if(dev->flags&IFF_ALLMULTI0x200)
2
Taking false branch
1270 num_addrs=1;
1271 /* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1272 memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table))(__builtin_constant_p((num_addrs == 0) ? 0 : -1) ? (__builtin_constant_p
((sizeof(multicast_table))) ? __constant_c_and_count_memset((
(multicast_table)),((0x01010101UL*(unsigned char)((num_addrs ==
0) ? 0 : -1))),((sizeof(multicast_table)))) : __constant_c_memset
(((multicast_table)),((0x01010101UL*(unsigned char)((num_addrs
== 0) ? 0 : -1))),((sizeof(multicast_table))))) : (__builtin_constant_p
((sizeof(multicast_table))) ? __memset_generic((((multicast_table
))),((((num_addrs == 0) ? 0 : -1))),(((sizeof(multicast_table
))))) : __memset_generic(((multicast_table)),(((num_addrs == 0
) ? 0 : -1)),((sizeof(multicast_table))))))
;
1273 for (i = 0; i < 4; i++) {
3
Loop condition is true. Entering loop body
1274 outw(8 + i, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((8 + i),(ioaddr+0x12)) : __outw((8 + i)
,(ioaddr+0x12)))
;
1275 outw(multicast_table[i], ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((multicast_table[i]),(ioaddr+0x10)) : __outw
((multicast_table[i]),(ioaddr+0x10)))
;
4
Within the expansion of the macro 'outw':
a
Function call argument is an uninitialized value
1276 }
1277 outw(15, ioaddr+LANCE_ADDR)((__builtin_constant_p((ioaddr+0x12)) && (ioaddr+0x12
) < 256) ? __outwc((15),(ioaddr+0x12)) : __outw((15),(ioaddr
+0x12)))
;
1278 outw(0x0000, ioaddr+LANCE_DATA)((__builtin_constant_p((ioaddr+0x10)) && (ioaddr+0x10
) < 256) ? __outwc((0x0000),(ioaddr+0x10)) : __outw((0x0000
),(ioaddr+0x10)))
; /* Unset promiscuous mode */
1279 }
1280
1281 lance_restart(dev, 0x0142, 0); /* Resume normal operation */
1282
1283}
1284
1285
1286/*
1287 * Local variables:
1288 * compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -m486 -c lance.c"
1289 * c-indent-level: 4
1290 * c-basic-offset: 4
1291 * tab-width: 4
1292 * End:
1293 */