File: | obj-scan-build/../linux/src/drivers/scsi/sym53c8xx.c |
Location: | line 8034, column 8 |
Description: | Value stored to 'cp' during its initialization is never read |
1 | /****************************************************************************** |
2 | ** High Performance device driver for the Symbios 53C896 controller. |
3 | ** |
4 | ** Copyright (C) 1998-2000 Gerard Roudier <groudier@club-internet.fr> |
5 | ** |
6 | ** This driver also supports all the Symbios 53C8XX controller family, |
7 | ** except 53C810 revisions < 16, 53C825 revisions < 16 and all |
8 | ** revisions of 53C815 controllers. |
9 | ** |
10 | ** This driver is based on the Linux port of the FreeBSD ncr driver. |
11 | ** |
12 | ** Copyright (C) 1994 Wolfgang Stanglmeier |
13 | ** |
14 | **----------------------------------------------------------------------------- |
15 | ** |
16 | ** This program is free software; you can redistribute it and/or modify |
17 | ** it under the terms of the GNU General Public License as published by |
18 | ** the Free Software Foundation; either version 2 of the License, or |
19 | ** (at your option) any later version. |
20 | ** |
21 | ** This program is distributed in the hope that it will be useful, |
22 | ** but WITHOUT ANY WARRANTY; without even the implied warranty of |
23 | ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
24 | ** GNU General Public License for more details. |
25 | ** |
26 | ** You should have received a copy of the GNU General Public License |
27 | ** along with this program; if not, write to the Free Software |
28 | ** Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
29 | ** |
30 | **----------------------------------------------------------------------------- |
31 | ** |
32 | ** The Linux port of the FreeBSD ncr driver has been achieved in |
33 | ** november 1995 by: |
34 | ** |
35 | ** Gerard Roudier <groudier@club-internet.fr> |
36 | ** |
37 | ** Being given that this driver originates from the FreeBSD version, and |
38 | ** in order to keep synergy on both, any suggested enhancements and corrections |
39 | ** received on Linux are automatically a potential candidate for the FreeBSD |
40 | ** version. |
41 | ** |
42 | ** The original driver has been written for 386bsd and FreeBSD by |
43 | ** Wolfgang Stanglmeier <wolf@cologne.de> |
44 | ** Stefan Esser <se@mi.Uni-Koeln.de> |
45 | ** |
46 | **----------------------------------------------------------------------------- |
47 | ** |
48 | ** Major contributions: |
49 | ** -------------------- |
50 | ** |
51 | ** NVRAM detection and reading. |
52 | ** Copyright (C) 1997 Richard Waltham <dormouse@farsrobt.demon.co.uk> |
53 | ** |
54 | ******************************************************************************* |
55 | */ |
56 | |
57 | /* |
58 | ** Supported SCSI features: |
59 | ** Synchronous data transfers |
60 | ** Wide16 SCSI BUS |
61 | ** Disconnection/Reselection |
62 | ** Tagged command queuing |
63 | ** SCSI Parity checking |
64 | ** |
65 | ** Supported NCR/SYMBIOS chips: |
66 | ** 53C810A (8 bits, Fast 10, no rom BIOS) |
67 | ** 53C825A (Wide, Fast 10, on-board rom BIOS) |
68 | ** 53C860 (8 bits, Fast 20, no rom BIOS) |
69 | ** 53C875 (Wide, Fast 20, on-board rom BIOS) |
70 | ** 53C876 (Wide, Fast 20 Dual, on-board rom BIOS) |
71 | ** 53C895 (Wide, Fast 40, on-board rom BIOS) |
72 | ** 53C895A (Wide, Fast 40, on-board rom BIOS) |
73 | ** 53C896 (Wide, Fast 40 Dual, on-board rom BIOS) |
74 | ** 53C897 (Wide, Fast 40 Dual, on-board rom BIOS) |
75 | ** 53C1510D (Wide, Fast 40 Dual, on-board rom BIOS) |
76 | ** 53C1010 (Wide, Fast 80 Dual, on-board rom BIOS) |
77 | ** 53C1010_66(Wide, Fast 80 Dual, on-board rom BIOS, 33/66MHz PCI) |
78 | ** |
79 | ** Other features: |
80 | ** Memory mapped IO |
81 | ** Module |
82 | ** Shared IRQ |
83 | */ |
84 | |
85 | /* |
86 | ** Name and version of the driver |
87 | */ |
88 | #define SCSI_NCR_DRIVER_NAME"sym53c8xx-1.7.1-20000726" "sym53c8xx-1.7.1-20000726" |
89 | |
90 | #define SCSI_NCR_DEBUG_FLAGS(0) (0) |
91 | |
92 | #define NAME53C"sym53c" "sym53c" |
93 | #define NAME53C8XX"sym53c8xx" "sym53c8xx" |
94 | |
95 | /*========================================================== |
96 | ** |
97 | ** Include files |
98 | ** |
99 | **========================================================== |
100 | */ |
101 | |
102 | #define LinuxVersionCode(v, p, s)(((v)<<16)+((p)<<8)+(s)) (((v)<<16)+((p)<<8)+(s)) |
103 | |
104 | #ifdef MODULE |
105 | #include <linux/module.h> |
106 | #endif |
107 | |
108 | #include <asm/dma.h> |
109 | #include <asm/io.h> |
110 | #include <asm/system.h> |
111 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,3,17)(((2)<<16)+((3)<<8)+(17)) |
112 | #include <linux/spinlock.h> |
113 | #elif LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,93)(((2)<<16)+((1)<<8)+(93)) |
114 | #include <asm/spinlock.h> |
115 | #endif |
116 | #include <linux/delay.h> |
117 | #include <linux/signal.h> |
118 | #include <linux/sched.h> |
119 | #include <linux/errno.h> |
120 | #include <linux/pci.h> |
121 | #include <linux/string.h> |
122 | #include <linux/malloc.h> |
123 | #include <linux/mm.h> |
124 | #include <linux/ioport.h> |
125 | #include <linux/time.h> |
126 | #include <linux/timer.h> |
127 | #include <linux/stat.h> |
128 | |
129 | #include <linux/version.h> |
130 | #include <linux/blk.h> |
131 | |
132 | #ifdef CONFIG_ALL_PPC |
133 | #include <asm/prom.h> |
134 | #endif |
135 | |
136 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,35)(((2)<<16)+((1)<<8)+(35)) |
137 | #include <linux/init.h> |
138 | #endif |
139 | |
140 | #ifndef __init |
141 | #define __init |
142 | #endif |
143 | #ifndef __initdata |
144 | #define __initdata |
145 | #endif |
146 | |
147 | #if LINUX_VERSION_CODE131108 <= LinuxVersionCode(2,1,92)(((2)<<16)+((1)<<8)+(92)) |
148 | #include <linux/bios32.h> |
149 | #endif |
150 | |
151 | #include "scsi.h" |
152 | #include "hosts.h" |
153 | #include "constants.h" |
154 | #include "sd.h" |
155 | |
156 | #include <linux/types.h> |
157 | |
158 | /* |
159 | ** Define BITS_PER_LONG for earlier linux versions. |
160 | */ |
161 | #ifndef BITS_PER_LONG32 |
162 | #if (~0UL) == 0xffffffffUL |
163 | #define BITS_PER_LONG32 32 |
164 | #else |
165 | #define BITS_PER_LONG32 64 |
166 | #endif |
167 | #endif |
168 | |
169 | /* |
170 | ** Define the BSD style u_int32 and u_int64 type. |
171 | ** Are in fact u_int32_t and u_int64_t :-) |
172 | */ |
173 | typedef u32 u_int32; |
174 | typedef u64 u_int64; |
175 | |
176 | #include "sym53c8xx.h" |
177 | |
178 | /* |
179 | ** Donnot compile integrity checking code for Linux-2.3.0 |
180 | ** and above since SCSI data structures are not ready yet. |
181 | */ |
182 | /* #if LINUX_VERSION_CODE < LinuxVersionCode(2,3,0) */ |
183 | #if 0 |
184 | #define SCSI_NCR_INTEGRITY_CHECKING |
185 | #endif |
186 | |
187 | #define MIN(a,b)(((a) < (b)) ? (a) : (b)) (((a) < (b)) ? (a) : (b)) |
188 | #define MAX(a,b)(((a) > (b)) ? (a) : (b)) (((a) > (b)) ? (a) : (b)) |
189 | |
190 | /* |
191 | ** Hmmm... What complex some PCI-HOST bridges actually are, |
192 | ** despite the fact that the PCI specifications are looking |
193 | ** so smart and simple! ;-) |
194 | */ |
195 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,3,47)(((2)<<16)+((3)<<8)+(47)) |
196 | #define SCSI_NCR_DYNAMIC_DMA_MAPPING |
197 | #endif |
198 | |
199 | /*========================================================== |
200 | ** |
201 | ** A la VMS/CAM-3 queue management. |
202 | ** Implemented from linux list management. |
203 | ** |
204 | **========================================================== |
205 | */ |
206 | |
207 | typedef struct xpt_quehead { |
208 | struct xpt_quehead *flink; /* Forward pointer */ |
209 | struct xpt_quehead *blink; /* Backward pointer */ |
210 | } XPT_QUEHEAD; |
211 | |
212 | #define xpt_que_init(ptr)do { (ptr)->flink = (ptr); (ptr)->blink = (ptr); } while (0) do { \ |
213 | (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ |
214 | } while (0) |
215 | |
216 | static inlineinline __attribute__((always_inline)) void __xpt_que_add(struct xpt_quehead * new, |
217 | struct xpt_quehead * blink, |
218 | struct xpt_quehead * flink) |
219 | { |
220 | flink->blink = new; |
221 | new->flink = flink; |
222 | new->blink = blink; |
223 | blink->flink = new; |
224 | } |
225 | |
226 | static inlineinline __attribute__((always_inline)) void __xpt_que_del(struct xpt_quehead * blink, |
227 | struct xpt_quehead * flink) |
228 | { |
229 | flink->blink = blink; |
230 | blink->flink = flink; |
231 | } |
232 | |
233 | static inlineinline __attribute__((always_inline)) int xpt_que_empty(struct xpt_quehead *head) |
234 | { |
235 | return head->flink == head; |
236 | } |
237 | |
238 | static inlineinline __attribute__((always_inline)) void xpt_que_splice(struct xpt_quehead *list, |
239 | struct xpt_quehead *head) |
240 | { |
241 | struct xpt_quehead *first = list->flink; |
242 | |
243 | if (first != list) { |
244 | struct xpt_quehead *last = list->blink; |
245 | struct xpt_quehead *at = head->flink; |
246 | |
247 | first->blink = head; |
248 | head->flink = first; |
249 | |
250 | last->flink = at; |
251 | at->blink = last; |
252 | } |
253 | } |
254 | |
255 | #define xpt_que_entry(ptr, type, member)((type *)((char *)(ptr)-(unsigned long)(&((type *)0)-> member))) \ |
256 | ((type *)((char *)(ptr)-(unsigned long)(&((type *)0)->member))) |
257 | |
258 | |
259 | #define xpt_insque(new, pos)__xpt_que_add(new, pos, (pos)->flink) __xpt_que_add(new, pos, (pos)->flink) |
260 | |
261 | #define xpt_remque(el)__xpt_que_del((el)->blink, (el)->flink) __xpt_que_del((el)->blink, (el)->flink) |
262 | |
263 | #define xpt_insque_head(new, head)__xpt_que_add(new, head, (head)->flink) __xpt_que_add(new, head, (head)->flink) |
264 | |
265 | static inlineinline __attribute__((always_inline)) struct xpt_quehead *xpt_remque_head(struct xpt_quehead *head) |
266 | { |
267 | struct xpt_quehead *elem = head->flink; |
268 | |
269 | if (elem != head) |
270 | __xpt_que_del(head, elem->flink); |
271 | else |
272 | elem = 0; |
273 | return elem; |
274 | } |
275 | |
276 | #define xpt_insque_tail(new, head)__xpt_que_add(new, (head)->blink, head) __xpt_que_add(new, (head)->blink, head) |
277 | |
278 | static inlineinline __attribute__((always_inline)) struct xpt_quehead *xpt_remque_tail(struct xpt_quehead *head) |
279 | { |
280 | struct xpt_quehead *elem = head->blink; |
281 | |
282 | if (elem != head) |
283 | __xpt_que_del(elem->blink, head); |
284 | else |
285 | elem = 0; |
286 | return elem; |
287 | } |
288 | |
289 | /*========================================================== |
290 | ** |
291 | ** Configuration and Debugging |
292 | ** |
293 | **========================================================== |
294 | */ |
295 | |
296 | /* |
297 | ** SCSI address of this device. |
298 | ** The boot routines should have set it. |
299 | ** If not, use this. |
300 | */ |
301 | |
302 | #ifndef SCSI_NCR_MYADDR(7) |
303 | #define SCSI_NCR_MYADDR(7) (7) |
304 | #endif |
305 | |
306 | /* |
307 | ** The maximum number of tags per logic unit. |
308 | ** Used only for devices that support tags. |
309 | */ |
310 | |
311 | #ifndef SCSI_NCR_MAX_TAGS(8) |
312 | #define SCSI_NCR_MAX_TAGS(8) (8) |
313 | #endif |
314 | |
315 | /* |
316 | ** TAGS are actually unlimited (256 tags/lun). |
317 | ** But Linux only supports 255. :) |
318 | */ |
319 | #if SCSI_NCR_MAX_TAGS(8) > 255 |
320 | #define MAX_TAGS(8) 255 |
321 | #else |
322 | #define MAX_TAGS(8) SCSI_NCR_MAX_TAGS(8) |
323 | #endif |
324 | |
325 | /* |
326 | ** Since the ncr chips only have a 8 bit ALU, we try to be clever |
327 | ** about offset calculation in the TASK TABLE per LUN that is an |
328 | ** array of DWORDS = 4 bytes. |
329 | */ |
330 | #if MAX_TAGS(8) > (512/4) |
331 | #define MAX_TASKS(256/4) (1024/4) |
332 | #elif MAX_TAGS(8) > (256/4) |
333 | #define MAX_TASKS(256/4) (512/4) |
334 | #else |
335 | #define MAX_TASKS(256/4) (256/4) |
336 | #endif |
337 | |
338 | /* |
339 | ** This one means 'NO TAG for this job' |
340 | */ |
341 | #define NO_TAG(256) (256) |
342 | |
343 | /* |
344 | ** Number of targets supported by the driver. |
345 | ** n permits target numbers 0..n-1. |
346 | ** Default is 16, meaning targets #0..#15. |
347 | ** #7 .. is myself. |
348 | */ |
349 | |
350 | #ifdef SCSI_NCR_MAX_TARGET(16) |
351 | #define MAX_TARGET((16)) (SCSI_NCR_MAX_TARGET(16)) |
352 | #else |
353 | #define MAX_TARGET((16)) (16) |
354 | #endif |
355 | |
356 | /* |
357 | ** Number of logic units supported by the driver. |
358 | ** n enables logic unit numbers 0..n-1. |
359 | ** The common SCSI devices require only |
360 | ** one lun, so take 1 as the default. |
361 | */ |
362 | |
363 | #ifdef SCSI_NCR_MAX_LUN(16) |
364 | #define MAX_LUN64 64 |
365 | #else |
366 | #define MAX_LUN64 (1) |
367 | #endif |
368 | |
369 | /* |
370 | ** Asynchronous pre-scaler (ns). Shall be 40 for |
371 | ** the SCSI timings to be compliant. |
372 | */ |
373 | |
374 | #ifndef SCSI_NCR_MIN_ASYNC(40) |
375 | #define SCSI_NCR_MIN_ASYNC(40) (40) |
376 | #endif |
377 | |
378 | /* |
379 | ** The maximum number of jobs scheduled for starting. |
380 | ** We allocate 4 entries more than the value we announce |
381 | ** to the SCSI upper layer. Guess why ! :-) |
382 | */ |
383 | |
384 | #ifdef SCSI_NCR_CAN_QUEUE(8*(8) + 2*(16)) |
385 | #define MAX_START((8*(8) + 2*(16)) + 4) (SCSI_NCR_CAN_QUEUE(8*(8) + 2*(16)) + 4) |
386 | #else |
387 | #define MAX_START((8*(8) + 2*(16)) + 4) (MAX_TARGET((16)) + 7 * MAX_TAGS(8)) |
388 | #endif |
389 | |
390 | /* |
391 | ** We donnot want to allocate more than 1 PAGE for the |
392 | ** the start queue and the done queue. We hard-code entry |
393 | ** size to 8 in order to let cpp do the checking. |
394 | ** Allows 512-4=508 pending IOs for i386 but Linux seems for |
395 | ** now not able to provide the driver with this amount of IOs. |
396 | */ |
397 | #if MAX_START((8*(8) + 2*(16)) + 4) > PAGE_SIZE(1 << 12)/8 |
398 | #undef MAX_START((8*(8) + 2*(16)) + 4) |
399 | #define MAX_START((8*(8) + 2*(16)) + 4) (PAGE_SIZE(1 << 12)/8) |
400 | #endif |
401 | |
402 | /* |
403 | ** The maximum number of segments a transfer is split into. |
404 | ** We support up to 127 segments for both read and write. |
405 | */ |
406 | |
407 | #define MAX_SCATTER((127)) (SCSI_NCR_MAX_SCATTER(127)) |
408 | #define SCR_SG_SIZE(2) (2) |
409 | |
410 | /* |
411 | ** other |
412 | */ |
413 | |
414 | #define NCR_SNOOP_TIMEOUT(1000000) (1000000) |
415 | |
416 | /*========================================================== |
417 | ** |
418 | ** Miscallaneous BSDish defines. |
419 | ** |
420 | **========================================================== |
421 | */ |
422 | |
423 | #define u_charunsigned char unsigned char |
424 | #define u_shortunsigned short unsigned short |
425 | #define u_intunsigned int unsigned int |
426 | #define u_longunsigned long unsigned long |
427 | |
428 | #ifndef bcopy |
429 | #define bcopy(s, d, n)(__builtin_constant_p((n)) ? __constant_memcpy(((d)),((s)),(( n))) : __memcpy(((d)),((s)),((n)))) memcpy((d), (s), (n))(__builtin_constant_p((n)) ? __constant_memcpy(((d)),((s)),(( n))) : __memcpy(((d)),((s)),((n)))) |
430 | #endif |
431 | |
432 | #ifndef bzero |
433 | #define bzero(d, n)(__builtin_constant_p(0) ? (__builtin_constant_p(((n))) ? __constant_c_and_count_memset ((((d))),((0x01010101UL*(unsigned char)(0))),(((n)))) : __constant_c_memset ((((d))),((0x01010101UL*(unsigned char)(0))),(((n))))) : (__builtin_constant_p (((n))) ? __memset_generic(((((d)))),(((0))),((((n))))) : __memset_generic ((((d))),((0)),(((n)))))) memset((d), 0, (n))(__builtin_constant_p(0) ? (__builtin_constant_p(((n))) ? __constant_c_and_count_memset ((((d))),((0x01010101UL*(unsigned char)(0))),(((n)))) : __constant_c_memset ((((d))),((0x01010101UL*(unsigned char)(0))),(((n))))) : (__builtin_constant_p (((n))) ? __memset_generic(((((d)))),(((0))),((((n))))) : __memset_generic ((((d))),((0)),(((n)))))) |
434 | #endif |
435 | |
436 | #ifndef offsetof |
437 | #define offsetof(t, m)((size_t) (&((t *)0)->m)) ((size_t) (&((t *)0)->m)) |
438 | #endif |
439 | |
440 | /* |
441 | ** Simple Wrapper to kernel PCI bus interface. |
442 | ** |
443 | ** This wrapper allows to get rid of old kernel PCI interface |
444 | ** and still allows to preserve linux-2.0 compatibilty. |
445 | ** In fact, it is mostly an incomplete emulation of the new |
446 | ** PCI code for pre-2.2 kernels. When kernel-2.0 support |
447 | ** will be dropped, we will just have to remove most of this |
448 | ** code. |
449 | */ |
450 | |
451 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,2,0)(((2)<<16)+((2)<<8)+(0)) |
452 | |
453 | typedef struct pci_dev *pcidev_t; |
454 | #define PCIDEV_NULL(~0u) (0) |
455 | #define PciBusNumber(d)((d)>>8) (d)->bus->number |
456 | #define PciDeviceFn(d)((d)&0xff) (d)->devfn |
457 | #define PciVendorId(d) (d)->vendor |
458 | #define PciDeviceId(d) (d)->device |
459 | #define PciIrqLine(d) (d)->irq |
460 | |
461 | #if LINUX_VERSION_CODE131108 > LinuxVersionCode(2,3,12)(((2)<<16)+((3)<<8)+(12)) |
462 | |
463 | static int __init |
464 | pci_get_base_address(struct pci_dev *pdev, int index, u_longunsigned long *base) |
465 | { |
466 | *base = pdev->resource[index].start; |
467 | if ((pdev->resource[index].flags & 0x7) == 0x4) |
468 | ++index; |
469 | return ++index; |
470 | } |
471 | #else |
472 | static int __init |
473 | pci_get_base_address(struct pci_dev *pdev, int index, u_longunsigned long *base) |
474 | { |
475 | *base = pdev->base_address[index++]; |
476 | if ((*base & 0x7) == 0x4) { |
477 | #if BITS_PER_LONG32 > 32 |
478 | *base |= (((u_longunsigned long)pdev->base_address[index]) << 32); |
479 | #endif |
480 | ++index; |
481 | } |
482 | return index; |
483 | } |
484 | #endif |
485 | |
486 | #else /* Incomplete emulation of current PCI code for pre-2.2 kernels */ |
487 | |
488 | typedef unsigned int pcidev_t; |
489 | #define PCIDEV_NULL(~0u) (~0u) |
490 | #define PciBusNumber(d)((d)>>8) ((d)>>8) |
491 | #define PciDeviceFn(d)((d)&0xff) ((d)&0xff) |
492 | #define __PciDev(busn, devfn)(((busn)<<8)+(devfn)) (((busn)<<8)+(devfn)) |
493 | |
494 | #define pci_presentpcibios_present pcibios_present |
495 | |
496 | #define pci_read_config_byte(d, w, v)pcibios_read_config_byte(((d)>>8), ((d)&0xff), w, v ) \ |
497 | pcibios_read_config_byte(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
498 | #define pci_read_config_word(d, w, v)pcibios_read_config_word(((d)>>8), ((d)&0xff), w, v ) \ |
499 | pcibios_read_config_word(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
500 | #define pci_read_config_dword(d, w, v)pcibios_read_config_dword(((d)>>8), ((d)&0xff), w, v ) \ |
501 | pcibios_read_config_dword(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
502 | |
503 | #define pci_write_config_byte(d, w, v)pcibios_write_config_byte(((d)>>8), ((d)&0xff), w, v ) \ |
504 | pcibios_write_config_byte(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
505 | #define pci_write_config_word(d, w, v)pcibios_write_config_word(((d)>>8), ((d)&0xff), w, v ) \ |
506 | pcibios_write_config_word(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
507 | #define pci_write_config_dword(d, w, v)pcibios_write_config_dword(((d)>>8), ((d)&0xff), w, v) \ |
508 | pcibios_write_config_dword(PciBusNumber(d)((d)>>8), PciDeviceFn(d)((d)&0xff), w, v) |
509 | |
510 | static pcidev_t __init |
511 | pci_find_device(unsigned int vendor, unsigned int device, pcidev_t prev) |
512 | { |
513 | static unsigned short pci_index; |
514 | int retv; |
515 | unsigned char bus_number, device_fn; |
516 | |
517 | if (prev == PCIDEV_NULL(~0u)) |
518 | pci_index = 0; |
519 | else |
520 | ++pci_index; |
521 | retv = pcibios_find_device (vendor, device, pci_index, |
522 | &bus_number, &device_fn); |
523 | return retv ? PCIDEV_NULL(~0u) : __PciDev(bus_number, device_fn)(((bus_number)<<8)+(device_fn)); |
524 | } |
525 | |
526 | static u_shortunsigned short __init PciVendorId(pcidev_t dev) |
527 | { |
528 | u_shortunsigned short vendor_id; |
529 | pci_read_config_word(dev, PCI_VENDOR_ID, &vendor_id)pcibios_read_config_word(((dev)>>8), ((dev)&0xff), 0x00 , &vendor_id); |
530 | return vendor_id; |
531 | } |
532 | |
533 | static u_shortunsigned short __init PciDeviceId(pcidev_t dev) |
534 | { |
535 | u_shortunsigned short device_id; |
536 | pci_read_config_word(dev, PCI_DEVICE_ID, &device_id)pcibios_read_config_word(((dev)>>8), ((dev)&0xff), 0x02 , &device_id); |
537 | return device_id; |
538 | } |
539 | |
540 | static u_intunsigned int __init PciIrqLine(pcidev_t dev) |
541 | { |
542 | u_charunsigned char irq; |
543 | pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq)pcibios_read_config_byte(((dev)>>8), ((dev)&0xff), 0x3c , &irq); |
544 | return irq; |
545 | } |
546 | |
547 | static int __init |
548 | pci_get_base_address(pcidev_t dev, int offset, u_longunsigned long *base) |
549 | { |
550 | u_int32 tmp; |
551 | |
552 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp)pcibios_read_config_dword(((dev)>>8), ((dev)&0xff), 0x10 + offset, &tmp); |
553 | *base = tmp; |
554 | offset += sizeof(u_int32); |
555 | if ((tmp & 0x7) == 0x4) { |
556 | #if BITS_PER_LONG32 > 32 |
557 | pci_read_config_dword(dev, PCI_BASE_ADDRESS_0 + offset, &tmp)pcibios_read_config_dword(((dev)>>8), ((dev)&0xff), 0x10 + offset, &tmp); |
558 | *base |= (((u_longunsigned long)tmp) << 32); |
559 | #endif |
560 | offset += sizeof(u_int32); |
561 | } |
562 | return offset; |
563 | } |
564 | |
565 | #endif /* LINUX_VERSION_CODE >= LinuxVersionCode(2,2,0) */ |
566 | |
567 | /*========================================================== |
568 | ** |
569 | ** Debugging tags |
570 | ** |
571 | **========================================================== |
572 | */ |
573 | |
574 | #define DEBUG_ALLOC(0x0001) (0x0001) |
575 | #define DEBUG_PHASE(0x0002) (0x0002) |
576 | #define DEBUG_QUEUE(0x0008) (0x0008) |
577 | #define DEBUG_RESULT(0x0010) (0x0010) |
578 | #define DEBUG_POINTER(0x0020) (0x0020) |
579 | #define DEBUG_SCRIPT(0x0040) (0x0040) |
580 | #define DEBUG_TINY(0x0080) (0x0080) |
581 | #define DEBUG_TIMING(0x0100) (0x0100) |
582 | #define DEBUG_NEGO(0x0200) (0x0200) |
583 | #define DEBUG_TAGS(0x0400) (0x0400) |
584 | #define DEBUG_IC(0x0800) (0x0800) |
585 | |
586 | /* |
587 | ** Enable/Disable debug messages. |
588 | ** Can be changed at runtime too. |
589 | */ |
590 | |
591 | #ifdef SCSI_NCR_DEBUG_INFO_SUPPORT |
592 | static int ncr_debug = SCSI_NCR_DEBUG_FLAGS(0); |
593 | #define DEBUG_FLAGSncr_debug ncr_debug |
594 | #else |
595 | #define DEBUG_FLAGSncr_debug SCSI_NCR_DEBUG_FLAGS(0) |
596 | #endif |
597 | |
598 | /* |
599 | ** SMP threading. |
600 | ** |
601 | ** Assuming that SMP systems are generally high end systems and may |
602 | ** use several SCSI adapters, we are using one lock per controller |
603 | ** instead of some global one. For the moment (linux-2.1.95), driver's |
604 | ** entry points are called with the 'io_request_lock' lock held, so: |
605 | ** - We are uselessly loosing a couple of micro-seconds to lock the |
606 | ** controller data structure. |
607 | ** - But the driver is not broken by design for SMP and so can be |
608 | ** more resistant to bugs or bad changes in the IO sub-system code. |
609 | ** - A small advantage could be that the interrupt code is grained as |
610 | ** wished (e.g.: threaded by controller). |
611 | */ |
612 | |
613 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,93)(((2)<<16)+((1)<<8)+(93)) |
614 | |
615 | spinlock_t sym53c8xx_lock = SPIN_LOCK_UNLOCKED; |
616 | #define NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0) spin_lock_irqsave(&sym53c8xx_lock, flags) |
617 | #define NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0) spin_unlock_irqrestore(&sym53c8xx_lock,flags) |
618 | |
619 | #define NCR_INIT_LOCK_NCB(np)do { } while (0) spin_lock_init(&np->smp_lock); |
620 | #define NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0) spin_lock_irqsave(&np->smp_lock, flags) |
621 | #define NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0) spin_unlock_irqrestore(&np->smp_lock, flags) |
622 | |
623 | #define NCR_LOCK_SCSI_DONE(np, flags)do {;} while (0) \ |
624 | spin_lock_irqsave(&io_request_lock, flags) |
625 | #define NCR_UNLOCK_SCSI_DONE(np, flags)do {;} while (0) \ |
626 | spin_unlock_irqrestore(&io_request_lock, flags) |
627 | |
628 | #else |
629 | |
630 | #define NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0) do { save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); cli()__asm__ __volatile__ ("cli": : :"memory"); } while (0) |
631 | #define NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0) do { restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); } while (0) |
632 | |
633 | #define NCR_INIT_LOCK_NCB(np)do { } while (0) do { } while (0) |
634 | #define NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0) do { save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); cli()__asm__ __volatile__ ("cli": : :"memory"); } while (0) |
635 | #define NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0) do { restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); } while (0) |
636 | |
637 | #define NCR_LOCK_SCSI_DONE(np, flags)do {;} while (0) do {;} while (0) |
638 | #define NCR_UNLOCK_SCSI_DONE(np, flags)do {;} while (0) do {;} while (0) |
639 | |
640 | #endif |
641 | |
642 | /* |
643 | ** Memory mapped IO |
644 | ** |
645 | ** Since linux-2.1, we must use ioremap() to map the io memory space. |
646 | ** iounmap() to unmap it. That allows portability. |
647 | ** Linux 1.3.X and 2.0.X allow to remap physical pages addresses greater |
648 | ** than the highest physical memory address to kernel virtual pages with |
649 | ** vremap() / vfree(). That was not portable but worked with i386 |
650 | ** architecture. |
651 | */ |
652 | |
653 | #if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,1,0)(((2)<<16)+((1)<<8)+(0)) |
654 | #define ioremapvremap vremap |
655 | #define iounmapvfree vfree |
656 | #endif |
657 | |
658 | #ifdef __sparc__ |
659 | # include <asm/irq.h> |
660 | # if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,3,0)(((2)<<16)+((3)<<8)+(0)) |
661 | /* ioremap/iounmap broken in 2.2.x on Sparc. -DaveM */ |
662 | # define ioremapvremap(base, size) ((u_longunsigned long) __va(base)) |
663 | # define iounmapvfree(vaddr) |
664 | # endif |
665 | # define pcivtobus(p)(p) bus_dvma_to_mem(p) |
666 | # define memcpy_to_pci(a, b, c)(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((a ))),(((b))),(((c)))) : __memcpy(((void *)((a))),(((b))),(((c) )))) memcpy_toio((void *)(a), (const void *)(b), (c))(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((void *)(a))),(((const void *)(b))),(((c)))) : __memcpy(((void *)( (void *)(a))),(((const void *)(b))),(((c))))) |
667 | #elif defined(__alpha__) |
668 | # define pcivtobus(p)(p) ((p) & 0xfffffffful) |
669 | # define memcpy_to_pci(a, b, c)(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((a ))),(((b))),(((c)))) : __memcpy(((void *)((a))),(((b))),(((c) )))) memcpy_toio((a), (b), (c))(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((a ))),(((b))),(((c)))) : __memcpy(((void *)((a))),(((b))),(((c) )))) |
670 | #else /* others */ |
671 | # define pcivtobus(p)(p) (p) |
672 | # define memcpy_to_pci(a, b, c)(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((a ))),(((b))),(((c)))) : __memcpy(((void *)((a))),(((b))),(((c) )))) memcpy_toio((a), (b), (c))(__builtin_constant_p(((c))) ? __constant_memcpy(((void *)((a ))),(((b))),(((c)))) : __memcpy(((void *)((a))),(((b))),(((c) )))) |
673 | #endif |
674 | |
675 | #ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
676 | static u_longunsigned long __init remap_pci_mem(u_longunsigned long base, u_longunsigned long size) |
677 | { |
678 | u_longunsigned long page_base = ((u_longunsigned long) base) & PAGE_MASK((1 << 12)-1); |
679 | u_longunsigned long page_offs = ((u_longunsigned long) base) - page_base; |
680 | u_longunsigned long page_remapped = (u_longunsigned long) ioremapvremap(page_base, page_offs+size); |
681 | |
682 | return page_remapped? (page_remapped + page_offs) : 0UL; |
683 | } |
684 | |
685 | static void __init unmap_pci_mem(u_longunsigned long vaddr, u_longunsigned long size) |
686 | { |
687 | if (vaddr) |
688 | iounmapvfree((void *) (vaddr & PAGE_MASK((1 << 12)-1))); |
689 | } |
690 | |
691 | #endif /* not def SCSI_NCR_PCI_MEM_NOT_SUPPORTED */ |
692 | |
693 | /* |
694 | ** Insert a delay in micro-seconds and milli-seconds. |
695 | ** ------------------------------------------------- |
696 | ** Under Linux, udelay() is restricted to delay < 1 milli-second. |
697 | ** In fact, it generally works for up to 1 second delay. |
698 | ** Since 2.1.105, the mdelay() function is provided for delays |
699 | ** in milli-seconds. |
700 | ** Under 2.0 kernels, udelay() is an inline function that is very |
701 | ** inaccurate on Pentium processors. |
702 | */ |
703 | |
704 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,105)(((2)<<16)+((1)<<8)+(105)) |
705 | #define UDELAY udelay |
706 | #define MDELAY mdelay |
707 | #else |
708 | static void UDELAY(long us) { udelay(us)(__builtin_constant_p(us) ? __const_udelay((us) * 0x10c6ul) : __udelay(us)); } |
709 | static void MDELAY(long ms) { while (ms--) UDELAY(1000); } |
710 | #endif |
711 | |
712 | /* |
713 | ** Simple power of two buddy-like allocator |
714 | ** ---------------------------------------- |
715 | ** This simple code is not intended to be fast, but to provide |
716 | ** power of 2 aligned memory allocations. |
717 | ** Since the SCRIPTS processor only supplies 8 bit arithmetic, |
718 | ** this allocator allows simple and fast address calculations |
719 | ** from the SCRIPTS code. In addition, cache line alignment |
720 | ** is guaranteed for power of 2 cache line size. |
721 | ** Enhanced in linux-2.3.44 to provide a memory pool per pcidev |
722 | ** to support dynamic dma mapping. (I would have preferred a |
723 | ** real bus astraction, btw). |
724 | */ |
725 | |
726 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,0)(((2)<<16)+((1)<<8)+(0)) |
727 | #define __GetFreePages(flags, order)__get_free_pages(flags, order, 0) __get_free_pages(flags, order) |
728 | #else |
729 | #define __GetFreePages(flags, order)__get_free_pages(flags, order, 0) __get_free_pages(flags, order, 0) |
730 | #endif |
731 | |
732 | #define MEMO_SHIFT4 4 /* 16 bytes minimum memory chunk */ |
733 | #if PAGE_SIZE(1 << 12) >= 8192 |
734 | #define MEMO_PAGE_ORDER1 0 /* 1 PAGE maximum */ |
735 | #else |
736 | #define MEMO_PAGE_ORDER1 1 /* 2 PAGES maximum */ |
737 | #endif |
738 | #define MEMO_FREE_UNUSED /* Free unused pages immediately */ |
739 | #define MEMO_WARN1 1 |
740 | #define MEMO_GFP_FLAGS0x01 GFP_ATOMIC0x01 |
741 | #define MEMO_CLUSTER_SHIFT(12 +1) (PAGE_SHIFT12+MEMO_PAGE_ORDER1) |
742 | #define MEMO_CLUSTER_SIZE(1UL << (12 +1)) (1UL << MEMO_CLUSTER_SHIFT(12 +1)) |
743 | #define MEMO_CLUSTER_MASK((1UL << (12 +1))-1) (MEMO_CLUSTER_SIZE(1UL << (12 +1))-1) |
744 | |
745 | typedef u_longunsigned long m_addr_t; /* Enough bits to bit-hack addresses */ |
746 | typedef pcidev_t m_bush_t; /* Something that addresses DMAable */ |
747 | |
748 | typedef struct m_link { /* Link between free memory chunks */ |
749 | struct m_link *next; |
750 | } m_link_s; |
751 | |
752 | #ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING |
753 | typedef struct m_vtob { /* Virtual to Bus address translation */ |
754 | struct m_vtob *next; |
755 | m_addr_t vaddr; |
756 | m_addr_t baddr; |
757 | } m_vtob_s; |
758 | #define VTOB_HASH_SHIFT 5 |
759 | #define VTOB_HASH_SIZE (1UL << VTOB_HASH_SHIFT) |
760 | #define VTOB_HASH_MASK (VTOB_HASH_SIZE-1) |
761 | #define VTOB_HASH_CODE(m) \ |
762 | ((((m_addr_t) (m)) >> MEMO_CLUSTER_SHIFT(12 +1)) & VTOB_HASH_MASK) |
763 | #endif |
764 | |
765 | typedef struct m_pool { /* Memory pool of a given kind */ |
766 | #ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING |
767 | m_bush_t bush; |
768 | m_addr_t (*getp)(struct m_pool *); |
769 | void (*freep)(struct m_pool *, m_addr_t); |
770 | #define M_GETP()__get_free_pages(0x01, 1, 0) mp->getp(mp) |
771 | #define M_FREEP(p)free_pages(p, 1) mp->freep(mp, p) |
772 | #define GetPages() __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)__get_free_pages(0x01, 1, 0) |
773 | #define FreePages(p) free_pages(p, MEMO_PAGE_ORDER1) |
774 | int nump; |
775 | m_vtob_s *(vtob[VTOB_HASH_SIZE]); |
776 | struct m_pool *next; |
777 | #else |
778 | #define M_GETP()__get_free_pages(0x01, 1, 0) __GetFreePages(MEMO_GFP_FLAGS, MEMO_PAGE_ORDER)__get_free_pages(0x01, 1, 0) |
779 | #define M_FREEP(p)free_pages(p, 1) free_pages(p, MEMO_PAGE_ORDER1) |
780 | #endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */ |
781 | struct m_link h[PAGE_SHIFT12-MEMO_SHIFT4+MEMO_PAGE_ORDER1+1]; |
782 | } m_pool_s; |
783 | |
784 | static void *___m_alloc(m_pool_s *mp, int size) |
785 | { |
786 | int i = 0; |
787 | int s = (1 << MEMO_SHIFT4); |
788 | int j; |
789 | m_addr_t a; |
790 | m_link_s *h = mp->h; |
791 | |
792 | if (size > (PAGE_SIZE(1 << 12) << MEMO_PAGE_ORDER1)) |
793 | return 0; |
794 | |
795 | while (size > s) { |
796 | s <<= 1; |
797 | ++i; |
798 | } |
799 | |
800 | j = i; |
801 | while (!h[j].next) { |
802 | if (s == (PAGE_SIZE(1 << 12) << MEMO_PAGE_ORDER1)) { |
803 | h[j].next = (m_link_s *) M_GETP()__get_free_pages(0x01, 1, 0); |
804 | if (h[j].next) |
805 | h[j].next->next = 0; |
806 | break; |
807 | } |
808 | ++j; |
809 | s <<= 1; |
810 | } |
811 | a = (m_addr_t) h[j].next; |
812 | if (a) { |
813 | h[j].next = h[j].next->next; |
814 | while (j > i) { |
815 | j -= 1; |
816 | s >>= 1; |
817 | h[j].next = (m_link_s *) (a+s); |
818 | h[j].next->next = 0; |
819 | } |
820 | } |
821 | #ifdef DEBUG |
822 | printk("___m_alloc(%d) = %p\n", size, (void *) a); |
823 | #endif |
824 | return (void *) a; |
825 | } |
826 | |
827 | static void ___m_free(m_pool_s *mp, void *ptr, int size) |
828 | { |
829 | int i = 0; |
830 | int s = (1 << MEMO_SHIFT4); |
831 | m_link_s *q; |
832 | m_addr_t a, b; |
833 | m_link_s *h = mp->h; |
834 | |
835 | #ifdef DEBUG |
836 | printk("___m_free(%p, %d)\n", ptr, size); |
837 | #endif |
838 | |
839 | if (size > (PAGE_SIZE(1 << 12) << MEMO_PAGE_ORDER1)) |
840 | return; |
841 | |
842 | while (size > s) { |
843 | s <<= 1; |
844 | ++i; |
845 | } |
846 | |
847 | a = (m_addr_t) ptr; |
848 | |
849 | while (1) { |
850 | #ifdef MEMO_FREE_UNUSED |
851 | if (s == (PAGE_SIZE(1 << 12) << MEMO_PAGE_ORDER1)) { |
852 | M_FREEP(a)free_pages(a, 1); |
853 | break; |
854 | } |
855 | #endif |
856 | b = a ^ s; |
857 | q = &h[i]; |
858 | while (q->next && q->next != (m_link_s *) b) { |
859 | q = q->next; |
860 | } |
861 | if (!q->next) { |
862 | ((m_link_s *) a)->next = h[i].next; |
863 | h[i].next = (m_link_s *) a; |
864 | break; |
865 | } |
866 | q->next = q->next->next; |
867 | a = a & b; |
868 | s <<= 1; |
869 | ++i; |
870 | } |
871 | } |
872 | |
873 | static void *__m_calloc2(m_pool_s *mp, int size, char *name, int uflags) |
874 | { |
875 | void *p; |
876 | |
877 | p = ___m_alloc(mp, size); |
878 | |
879 | if (DEBUG_FLAGSncr_debug & DEBUG_ALLOC(0x0001)) |
880 | printk ("new %-10s[%4d] @%p.\n", name, size, p); |
881 | |
882 | if (p) |
883 | bzero(p, size)(__builtin_constant_p(0) ? (__builtin_constant_p(((size))) ? __constant_c_and_count_memset ((((p))),((0x01010101UL*(unsigned char)(0))),(((size)))) : __constant_c_memset ((((p))),((0x01010101UL*(unsigned char)(0))),(((size))))) : ( __builtin_constant_p(((size))) ? __memset_generic(((((p)))),( ((0))),((((size))))) : __memset_generic((((p))),((0)),(((size )))))); |
884 | else if (uflags & MEMO_WARN1) |
885 | printk (NAME53C8XX"sym53c8xx" ": failed to allocate %s[%d]\n", name, size); |
886 | |
887 | return p; |
888 | } |
889 | |
890 | #define __m_calloc(mp, s, n)__m_calloc2(mp, s, n, 1) __m_calloc2(mp, s, n, MEMO_WARN1) |
891 | |
892 | static void __m_free(m_pool_s *mp, void *ptr, int size, char *name) |
893 | { |
894 | if (DEBUG_FLAGSncr_debug & DEBUG_ALLOC(0x0001)) |
895 | printk ("freeing %-10s[%4d] @%p.\n", name, size, ptr); |
896 | |
897 | ___m_free(mp, ptr, size); |
898 | |
899 | } |
900 | |
901 | /* |
902 | * With pci bus iommu support, we use a default pool of unmapped memory |
903 | * for memory we donnot need to DMA from/to and one pool per pcidev for |
904 | * memory accessed by the PCI chip. `mp0' is the default not DMAable pool. |
905 | */ |
906 | |
907 | #ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING |
908 | |
909 | static m_pool_s mp0; |
910 | |
911 | #else |
912 | |
913 | static m_addr_t ___mp0_getp(m_pool_s *mp) |
914 | { |
915 | m_addr_t m = GetPages(); |
916 | if (m) |
917 | ++mp->nump; |
918 | return m; |
919 | } |
920 | |
921 | static void ___mp0_freep(m_pool_s *mp, m_addr_t m) |
922 | { |
923 | FreePages(m); |
924 | --mp->nump; |
925 | } |
926 | |
927 | static m_pool_s mp0 = {0, ___mp0_getp, ___mp0_freep}; |
928 | |
929 | #endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */ |
930 | |
931 | static void *m_calloc(int size, char *name) |
932 | { |
933 | u_longunsigned long flags; |
934 | void *m; |
935 | NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
936 | m = __m_calloc(&mp0, size, name)__m_calloc2(&mp0, size, name, 1); |
937 | NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
938 | return m; |
939 | } |
940 | |
941 | static void m_free(void *ptr, int size, char *name) |
942 | { |
943 | u_longunsigned long flags; |
944 | NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
945 | __m_free(&mp0, ptr, size, name); |
946 | NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
947 | } |
948 | |
949 | /* |
950 | * DMAable pools. |
951 | */ |
952 | |
953 | #ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING |
954 | |
955 | /* Without pci bus iommu support, all the memory is assumed DMAable */ |
956 | |
957 | #define __m_calloc_dma(b, s, n)m_calloc(s, n) m_calloc(s, n) |
958 | #define __m_free_dma(b, p, s, n)m_free(p, s, n) m_free(p, s, n) |
959 | #define __vtobus(b, p)virt_to_phys(p) virt_to_busvirt_to_phys(p) |
960 | |
961 | #else |
962 | |
963 | /* |
964 | * With pci bus iommu support, we maintain one pool per pcidev and a |
965 | * hashed reverse table for virtual to bus physical address translations. |
966 | */ |
967 | static m_addr_t ___dma_getp(m_pool_s *mp) |
968 | { |
969 | m_addr_t vp; |
970 | m_vtob_s *vbp; |
971 | |
972 | vbp = __m_calloc(&mp0, sizeof(*vbp), "VTOB")__m_calloc2(&mp0, sizeof(*vbp), "VTOB", 1); |
973 | if (vbp) { |
974 | dma_addr_t daddr; |
975 | vp = (m_addr_t) pci_alloc_consistent(mp->bush, |
976 | PAGE_SIZE(1 << 12)<<MEMO_PAGE_ORDER1, |
977 | &daddr); |
978 | if (vp) { |
979 | int hc = VTOB_HASH_CODE(vp); |
980 | vbp->vaddr = vp; |
981 | vbp->baddr = daddr; |
982 | vbp->next = mp->vtob[hc]; |
983 | mp->vtob[hc] = vbp; |
984 | ++mp->nump; |
985 | return vp; |
986 | } |
987 | else |
988 | __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); |
989 | } |
990 | return 0; |
991 | } |
992 | |
993 | static void ___dma_freep(m_pool_s *mp, m_addr_t m) |
994 | { |
995 | m_vtob_s **vbpp, *vbp; |
996 | int hc = VTOB_HASH_CODE(m); |
997 | |
998 | vbpp = &mp->vtob[hc]; |
999 | while (*vbpp && (*vbpp)->vaddr != m) |
1000 | vbpp = &(*vbpp)->next; |
1001 | if (*vbpp) { |
1002 | vbp = *vbpp; |
1003 | *vbpp = (*vbpp)->next; |
1004 | pci_free_consistent(mp->bush, PAGE_SIZE(1 << 12)<<MEMO_PAGE_ORDER1, |
1005 | (void *)vbp->vaddr, (dma_addr_t)vbp->baddr); |
1006 | __m_free(&mp0, vbp, sizeof(*vbp), "VTOB"); |
1007 | --mp->nump; |
1008 | } |
1009 | } |
1010 | |
1011 | static inlineinline __attribute__((always_inline)) m_pool_s *___get_dma_pool(m_bush_t bush) |
1012 | { |
1013 | m_pool_s *mp; |
1014 | for (mp = mp0.next; mp && mp->bush != bush; mp = mp->next); |
1015 | return mp; |
1016 | } |
1017 | |
1018 | static m_pool_s *___cre_dma_pool(m_bush_t bush) |
1019 | { |
1020 | m_pool_s *mp; |
1021 | mp = __m_calloc(&mp0, sizeof(*mp), "MPOOL")__m_calloc2(&mp0, sizeof(*mp), "MPOOL", 1); |
1022 | if (mp) { |
1023 | bzero(mp, sizeof(*mp))(__builtin_constant_p(0) ? (__builtin_constant_p(((sizeof(*mp )))) ? __constant_c_and_count_memset((((mp))),((0x01010101UL* (unsigned char)(0))),(((sizeof(*mp))))) : __constant_c_memset ((((mp))),((0x01010101UL*(unsigned char)(0))),(((sizeof(*mp)) )))) : (__builtin_constant_p(((sizeof(*mp)))) ? __memset_generic (((((mp)))),(((0))),((((sizeof(*mp)))))) : __memset_generic(( ((mp))),((0)),(((sizeof(*mp))))))); |
1024 | mp->bush = bush; |
1025 | mp->getp = ___dma_getp; |
1026 | mp->freep = ___dma_freep; |
1027 | mp->next = mp0.next; |
1028 | mp0.next = mp; |
1029 | } |
1030 | return mp; |
1031 | } |
1032 | |
1033 | static void ___del_dma_pool(m_pool_s *p) |
1034 | { |
1035 | struct m_pool **pp = &mp0.next; |
1036 | |
1037 | while (*pp && *pp != p) |
1038 | pp = &(*pp)->next; |
1039 | if (*pp) { |
1040 | *pp = (*pp)->next; |
1041 | __m_free(&mp0, p, sizeof(*p), "MPOOL"); |
1042 | } |
1043 | } |
1044 | |
1045 | static void *__m_calloc_dma(m_bush_t bush, int size, char *name)m_calloc(int size, char *name) |
1046 | { |
1047 | u_longunsigned long flags; |
1048 | struct m_pool *mp; |
1049 | void *m = 0; |
1050 | |
1051 | NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
1052 | mp = ___get_dma_pool(bush); |
1053 | if (!mp) |
1054 | mp = ___cre_dma_pool(bush); |
1055 | if (mp) |
1056 | m = __m_calloc(mp, size, name)__m_calloc2(mp, size, name, 1); |
1057 | if (mp && !mp->nump) |
1058 | ___del_dma_pool(mp); |
1059 | NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
1060 | |
1061 | return m; |
1062 | } |
1063 | |
1064 | static void __m_free_dma(m_bush_t bush, void *m, int size, char *name)m_free(void *m, int size, char *name) |
1065 | { |
1066 | u_longunsigned long flags; |
1067 | struct m_pool *mp; |
1068 | |
1069 | NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
1070 | mp = ___get_dma_pool(bush); |
1071 | if (mp) |
1072 | __m_free(mp, m, size, name); |
1073 | if (mp && !mp->nump) |
1074 | ___del_dma_pool(mp); |
1075 | NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
1076 | } |
1077 | |
1078 | static m_addr_t __vtobus(m_bush_t bush, void *m)virt_to_phys(void *m) |
1079 | { |
1080 | u_longunsigned long flags; |
1081 | m_pool_s *mp; |
1082 | int hc = VTOB_HASH_CODE(m); |
1083 | m_vtob_s *vp = 0; |
1084 | m_addr_t a = ((m_addr_t) m) & ~MEMO_CLUSTER_MASK((1UL << (12 +1))-1); |
1085 | |
1086 | NCR_LOCK_DRIVER(flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
1087 | mp = ___get_dma_pool(bush); |
1088 | if (mp) { |
1089 | vp = mp->vtob[hc]; |
1090 | while (vp && (m_addr_t) vp->vaddr != a) |
1091 | vp = vp->next; |
1092 | } |
1093 | NCR_UNLOCK_DRIVER(flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
1094 | return vp ? vp->baddr + (((m_addr_t) m) - a) : 0; |
1095 | } |
1096 | |
1097 | #endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */ |
1098 | |
1099 | #define _m_calloc_dma(np, s, n)m_calloc(s, n) __m_calloc_dma(np->pdev, s, n)m_calloc(s, n) |
1100 | #define _m_free_dma(np, p, s, n)m_free(p, s, n) __m_free_dma(np->pdev, p, s, n)m_free(p, s, n) |
1101 | #define m_calloc_dma(s, n)m_calloc(s, n) _m_calloc_dma(np, s, n)m_calloc(s, n) |
1102 | #define m_free_dma(p, s, n)m_free(p, s, n) _m_free_dma(np, p, s, n)m_free(p, s, n) |
1103 | #define _vtobus(np, p)virt_to_phys(p) __vtobus(np->pdev, p)virt_to_phys(p) |
1104 | #define vtobus(p)virt_to_phys(p) _vtobus(np, p)virt_to_phys(p) |
1105 | |
1106 | /* |
1107 | * Deal with DMA mapping/unmapping. |
1108 | */ |
1109 | |
1110 | #ifndef SCSI_NCR_DYNAMIC_DMA_MAPPING |
1111 | |
1112 | /* Linux versions prior to pci bus iommu kernel interface */ |
1113 | |
1114 | #define __unmap_scsi_data(pdev, cmd)do {; } while (0) do {; } while (0) |
1115 | #define __map_scsi_single_data(pdev, cmd)(virt_to_phys((cmd)->request_buffer)) (__vtobus(pdev,(cmd)->request_buffer)virt_to_phys((cmd)->request_buffer)) |
1116 | #define __map_scsi_sg_data(pdev, cmd)((cmd)->use_sg) ((cmd)->use_sg) |
1117 | #define __sync_scsi_data(pdev, cmd)do {; } while (0) do {; } while (0) |
1118 | |
1119 | #define scsi_sg_dma_address(sc)virt_to_phys((sc)->address) vtobus((sc)->address)virt_to_phys((sc)->address) |
1120 | #define scsi_sg_dma_len(sc)((sc)->length) ((sc)->length) |
1121 | |
1122 | #else |
1123 | |
1124 | /* Linux version with pci bus iommu kernel interface */ |
1125 | |
1126 | /* To keep track of the dma mapping (sg/single) that has been set */ |
1127 | #define __data_mapped SCp.phase |
1128 | #define __data_mapping SCp.have_data_in |
1129 | |
1130 | static void __unmap_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)do {; } while (0) |
1131 | { |
1132 | int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); |
1133 | |
1134 | switch(cmd->__data_mapped) { |
1135 | case 2: |
1136 | pci_unmap_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); |
1137 | break; |
1138 | case 1: |
1139 | pci_unmap_single(pdev, cmd->__data_mapping, |
1140 | cmd->request_bufflen, dma_dir); |
1141 | break; |
1142 | } |
1143 | cmd->__data_mapped = 0; |
1144 | } |
1145 | |
1146 | static u_longunsigned long __map_scsi_single_data(pcidev_t pdev, Scsi_Cmnd *cmd)(virt_to_phys((Scsi_Cmnd *cmd)->request_buffer)) |
1147 | { |
1148 | dma_addr_t mapping; |
1149 | int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); |
1150 | |
1151 | if (cmd->request_bufflen == 0) |
1152 | return 0; |
1153 | |
1154 | mapping = pci_map_single(pdev, cmd->request_buffer, |
1155 | cmd->request_bufflen, dma_dir); |
1156 | cmd->__data_mapped = 1; |
1157 | cmd->__data_mapping = mapping; |
1158 | |
1159 | return mapping; |
1160 | } |
1161 | |
1162 | static int __map_scsi_sg_data(pcidev_t pdev, Scsi_Cmnd *cmd)((Scsi_Cmnd *cmd)->use_sg) |
1163 | { |
1164 | int use_sg; |
1165 | int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); |
1166 | |
1167 | if (cmd->use_sg == 0) |
1168 | return 0; |
1169 | |
1170 | use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); |
1171 | cmd->__data_mapped = 2; |
1172 | cmd->__data_mapping = use_sg; |
1173 | |
1174 | return use_sg; |
1175 | } |
1176 | |
1177 | static void __sync_scsi_data(pcidev_t pdev, Scsi_Cmnd *cmd)do {; } while (0) |
1178 | { |
1179 | int dma_dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); |
1180 | |
1181 | switch(cmd->__data_mapped) { |
1182 | case 2: |
1183 | pci_dma_sync_sg(pdev, cmd->buffer, cmd->use_sg, dma_dir); |
1184 | break; |
1185 | case 1: |
1186 | pci_dma_sync_single(pdev, cmd->__data_mapping, |
1187 | cmd->request_bufflen, dma_dir); |
1188 | break; |
1189 | } |
1190 | } |
1191 | |
1192 | #define scsi_sg_dma_address(sc)virt_to_phys((sc)->address) sg_dma_address(sc) |
1193 | #define scsi_sg_dma_len(sc)((sc)->length) sg_dma_len(sc) |
1194 | |
1195 | #endif /* SCSI_NCR_DYNAMIC_DMA_MAPPING */ |
1196 | |
1197 | #define unmap_scsi_data(np, cmd)do {; } while (0) __unmap_scsi_data(np->pdev, cmd)do {; } while (0) |
1198 | #define map_scsi_single_data(np, cmd)(virt_to_phys((cmd)->request_buffer)) __map_scsi_single_data(np->pdev, cmd)(virt_to_phys((cmd)->request_buffer)) |
1199 | #define map_scsi_sg_data(np, cmd)((cmd)->use_sg) __map_scsi_sg_data(np->pdev, cmd)((cmd)->use_sg) |
1200 | #define sync_scsi_data(np, cmd)do {; } while (0) __sync_scsi_data(np->pdev, cmd)do {; } while (0) |
1201 | |
1202 | |
1203 | /* |
1204 | * Print out some buffer. |
1205 | */ |
1206 | static void ncr_print_hex(u_charunsigned char *p, int n) |
1207 | { |
1208 | while (n-- > 0) |
1209 | printk (" %x", *p++); |
1210 | } |
1211 | |
1212 | static void ncr_printl_hex(char *label, u_charunsigned char *p, int n) |
1213 | { |
1214 | printk("%s", label); |
1215 | ncr_print_hex(p, n); |
1216 | printk (".\n"); |
1217 | } |
1218 | |
1219 | /* |
1220 | ** Transfer direction |
1221 | ** |
1222 | ** Until some linux kernel version near 2.3.40, low-level scsi |
1223 | ** drivers were not told about data transfer direction. |
1224 | ** We check the existence of this feature that has been expected |
1225 | ** for a _long_ time by all SCSI driver developers by just |
1226 | ** testing against the definition of SCSI_DATA_UNKNOWN. Indeed |
1227 | ** this is a hack, but testing against a kernel version would |
1228 | ** have been a shame. ;-) |
1229 | */ |
1230 | #ifdef SCSI_DATA_UNKNOWN0 |
1231 | |
1232 | #define scsi_data_direction(cmd) (cmd->sc_data_direction) |
1233 | |
1234 | #else |
1235 | |
1236 | #define SCSI_DATA_UNKNOWN0 0 |
1237 | #define SCSI_DATA_WRITE1 1 |
1238 | #define SCSI_DATA_READ2 2 |
1239 | #define SCSI_DATA_NONE3 3 |
1240 | |
1241 | static __inline____inline__ __attribute__((always_inline)) int scsi_data_direction(Scsi_Cmnd *cmd) |
1242 | { |
1243 | int direction; |
1244 | |
1245 | switch((int) cmd->cmnd[0]) { |
1246 | case 0x08: /* READ(6) 08 */ |
1247 | case 0x28: /* READ(10) 28 */ |
1248 | case 0xA8: /* READ(12) A8 */ |
1249 | direction = SCSI_DATA_READ2; |
1250 | break; |
1251 | case 0x0A: /* WRITE(6) 0A */ |
1252 | case 0x2A: /* WRITE(10) 2A */ |
1253 | case 0xAA: /* WRITE(12) AA */ |
1254 | direction = SCSI_DATA_WRITE1; |
1255 | break; |
1256 | default: |
1257 | direction = SCSI_DATA_UNKNOWN0; |
1258 | break; |
1259 | } |
1260 | |
1261 | return direction; |
1262 | } |
1263 | |
1264 | #endif /* SCSI_DATA_UNKNOWN */ |
1265 | |
1266 | /* |
1267 | ** Head of list of NCR boards |
1268 | ** |
1269 | ** For kernel version < 1.3.70, host is retrieved by its irq level. |
1270 | ** For later kernels, the internal host control block address |
1271 | ** (struct ncb) is used as device id parameter of the irq stuff. |
1272 | */ |
1273 | |
1274 | static struct Scsi_Host *first_host = NULL((void *) 0); |
1275 | |
1276 | |
1277 | /* |
1278 | ** /proc directory entry and proc_info function |
1279 | */ |
1280 | #ifdef SCSI_NCR_PROC_INFO_SUPPORT |
1281 | #if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,3,27)(((2)<<16)+((3)<<8)+(27)) |
1282 | static struct proc_dir_entry proc_scsi_sym53c8xx = { |
1283 | PROC_SCSI_SYM53C8XX, 9, NAME53C8XX"sym53c8xx", |
1284 | S_IFDIR0040000 | S_IRUGO(00400|00040|00004) | S_IXUGO(00100|00010|00001), 2 |
1285 | }; |
1286 | #endif |
1287 | static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset, |
1288 | int length, int hostno, int func); |
1289 | #endif |
1290 | |
1291 | /* |
1292 | ** Driver setup. |
1293 | ** |
1294 | ** This structure is initialized from linux config options. |
1295 | ** It can be overridden at boot-up by the boot command line. |
1296 | */ |
1297 | static struct ncr_driver_setup |
1298 | driver_setup = SCSI_NCR_DRIVER_SETUP{ (1), (1), (1), (3), (3), (0), 0, 0, 1, 0, (0), (250/((20))) , 0x00, 7, (0), 1, (2), (0), 0, 1, 0, 0, 255, 0x00 }; |
1299 | |
1300 | #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT |
1301 | static struct ncr_driver_setup |
1302 | driver_safe_setup __initdata = SCSI_NCR_DRIVER_SAFE_SETUP{ 0, 1, 0, 0, 0, 0, 0, 0, 1, 2, 0, 255, 0x00, 255, 0, 0, 10, 1 , 1, 1, 0, 0, 255 }; |
1303 | # ifdef MODULE |
1304 | char *sym53c8xx = 0; /* command line passed by insmod */ |
1305 | # if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,30)(((2)<<16)+((1)<<8)+(30)) |
1306 | MODULE_PARM(sym53c8xx, "s"); |
1307 | # endif |
1308 | # endif |
1309 | #endif |
1310 | |
1311 | /* |
1312 | ** Other Linux definitions |
1313 | */ |
1314 | #define SetScsiResult(cmd, h_sts, s_sts)cmd->result = (((h_sts) << 16) + ((s_sts) & 0x7f )) \ |
1315 | cmd->result = (((h_sts) << 16) + ((s_sts) & 0x7f)) |
1316 | |
1317 | /* We may have to remind our amnesiac SCSI layer of the reason of the abort */ |
1318 | #if 0 |
1319 | #define SetScsiAbortResult(cmd)cmd->result = (((0x05) << 16) + ((0xff) & 0x7f)) \ |
1320 | SetScsiResult( \cmd->result = ((((cmd)->abort_reason == 0x03 ? 0x03 : 0x05 ) << 16) + ((0xff) & 0x7f)) |
1321 | cmd, \cmd->result = ((((cmd)->abort_reason == 0x03 ? 0x03 : 0x05 ) << 16) + ((0xff) & 0x7f)) |
1322 | (cmd)->abort_reason == DID_TIME_OUT ? DID_TIME_OUT : DID_ABORT, \cmd->result = ((((cmd)->abort_reason == 0x03 ? 0x03 : 0x05 ) << 16) + ((0xff) & 0x7f)) |
1323 | 0xff)cmd->result = ((((cmd)->abort_reason == 0x03 ? 0x03 : 0x05 ) << 16) + ((0xff) & 0x7f)) |
1324 | #else |
1325 | #define SetScsiAbortResult(cmd)cmd->result = (((0x05) << 16) + ((0xff) & 0x7f)) SetScsiResult(cmd, DID_ABORT, 0xff)cmd->result = (((0x05) << 16) + ((0xff) & 0x7f)) |
1326 | #endif |
1327 | |
1328 | static void sym53c8xx_select_queue_depths( |
1329 | struct Scsi_Host *host, struct scsi_device *devlist); |
1330 | static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs); |
1331 | static void sym53c8xx_timeout(unsigned long np); |
1332 | |
1333 | #define initverbose(driver_setup.verbose) (driver_setup.verbose) |
1334 | #define bootverbose(np->verbose) (np->verbose) |
1335 | |
1336 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
1337 | static u_charunsigned char Tekram_sync[16] __initdata = |
1338 | {25,31,37,43, 50,62,75,125, 12,15,18,21, 6,7,9,10}; |
1339 | #endif /* SCSI_NCR_NVRAM_SUPPORT */ |
1340 | |
1341 | /* |
1342 | ** Structures used by sym53c8xx_detect/sym53c8xx_pci_init to |
1343 | ** transmit device configuration to the ncr_attach() function. |
1344 | */ |
1345 | typedef struct { |
1346 | int bus; |
1347 | u_charunsigned char device_fn; |
1348 | u_longunsigned long base; |
1349 | u_longunsigned long base_2; |
1350 | u_longunsigned long io_port; |
1351 | int irq; |
1352 | /* port and reg fields to use INB, OUTB macros */ |
1353 | u_longunsigned long base_io; |
1354 | volatile struct ncr_reg *reg; |
1355 | } ncr_slot; |
1356 | |
1357 | typedef struct { |
1358 | int type; |
1359 | #define SCSI_NCR_SYMBIOS_NVRAM(1) (1) |
1360 | #define SCSI_NCR_TEKRAM_NVRAM(2) (2) |
1361 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
1362 | union { |
1363 | Symbios_nvram Symbios; |
1364 | Tekram_nvram Tekram; |
1365 | } data; |
1366 | #endif |
1367 | } ncr_nvram; |
1368 | |
1369 | /* |
1370 | ** Structure used by sym53c8xx_detect/sym53c8xx_pci_init |
1371 | ** to save data on each detected board for ncr_attach(). |
1372 | */ |
1373 | typedef struct { |
1374 | pcidev_t pdev; |
1375 | ncr_slot slot; |
1376 | ncr_chip chip; |
1377 | ncr_nvram *nvram; |
1378 | u_charunsigned char host_id; |
1379 | #ifdef SCSI_NCR_PQS_PDS_SUPPORT |
1380 | u_charunsigned char pqs_pds; |
1381 | #endif |
1382 | int attach_done; |
1383 | } ncr_device; |
1384 | |
1385 | /*========================================================== |
1386 | ** |
1387 | ** assert () |
1388 | ** |
1389 | **========================================================== |
1390 | ** |
1391 | ** modified copy from 386bsd:/usr/include/sys/assert.h |
1392 | ** |
1393 | **---------------------------------------------------------- |
1394 | */ |
1395 | |
1396 | #define assert(expression){ if (!(expression)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "expression", "../linux/src/drivers/scsi/sym53c8xx.c", 1396 ); } } { \ |
1397 | if (!(expression)) { \ |
1398 | (void)panic( \ |
1399 | "assertion \"%s\" failed: file \"%s\", line %d\n", \ |
1400 | #expression, \ |
1401 | __FILE__"../linux/src/drivers/scsi/sym53c8xx.c", __LINE__1401); \ |
1402 | } \ |
1403 | } |
1404 | |
1405 | /*========================================================== |
1406 | ** |
1407 | ** Command control block states. |
1408 | ** |
1409 | **========================================================== |
1410 | */ |
1411 | |
1412 | #define HS_IDLE(0) (0) |
1413 | #define HS_BUSY(1) (1) |
1414 | #define HS_NEGOTIATE(2) (2) /* sync/wide data transfer*/ |
1415 | #define HS_DISCONNECT(3) (3) /* Disconnected by target */ |
1416 | |
1417 | #define HS_DONEMASK(0x80) (0x80) |
1418 | #define HS_COMPLETE(4|(0x80)) (4|HS_DONEMASK(0x80)) |
1419 | #define HS_SEL_TIMEOUT(5|(0x80)) (5|HS_DONEMASK(0x80)) /* Selection timeout */ |
1420 | #define HS_RESET(6|(0x80)) (6|HS_DONEMASK(0x80)) /* SCSI reset */ |
1421 | #define HS_ABORTED(7|(0x80)) (7|HS_DONEMASK(0x80)) /* Transfer aborted */ |
1422 | #define HS_TIMEOUT(8|(0x80)) (8|HS_DONEMASK(0x80)) /* Software timeout */ |
1423 | #define HS_FAIL(9|(0x80)) (9|HS_DONEMASK(0x80)) /* SCSI or PCI bus errors */ |
1424 | #define HS_UNEXPECTED(10|(0x80)) (10|HS_DONEMASK(0x80))/* Unexpected disconnect */ |
1425 | |
1426 | #define DSA_INVALID0xffffffff 0xffffffff |
1427 | |
1428 | /*========================================================== |
1429 | ** |
1430 | ** Software Interrupt Codes |
1431 | ** |
1432 | **========================================================== |
1433 | */ |
1434 | |
1435 | #define SIR_BAD_STATUS(1) (1) |
1436 | #define SIR_SEL_ATN_NO_MSG_OUT(2) (2) |
1437 | #define SIR_MSG_RECEIVED(3) (3) |
1438 | #define SIR_MSG_WEIRD(4) (4) |
1439 | #define SIR_NEGO_FAILED(5) (5) |
1440 | #define SIR_NEGO_PROTO(6) (6) |
1441 | #define SIR_SCRIPT_STOPPED(7) (7) |
1442 | #define SIR_REJECT_TO_SEND(8) (8) |
1443 | #define SIR_SWIDE_OVERRUN(9) (9) |
1444 | #define SIR_SODL_UNDERRUN(10) (10) |
1445 | #define SIR_RESEL_NO_MSG_IN(11) (11) |
1446 | #define SIR_RESEL_NO_IDENTIFY(12) (12) |
1447 | #define SIR_RESEL_BAD_LUN(13) (13) |
1448 | #define SIR_TARGET_SELECTED(14) (14) |
1449 | #define SIR_RESEL_BAD_I_T_L(15) (15) |
1450 | #define SIR_RESEL_BAD_I_T_L_Q(16) (16) |
1451 | #define SIR_ABORT_SENT(17) (17) |
1452 | #define SIR_RESEL_ABORTED(18) (18) |
1453 | #define SIR_MSG_OUT_DONE(19) (19) |
1454 | #define SIR_AUTO_SENSE_DONE(20) (20) |
1455 | #define SIR_DUMMY_INTERRUPT(21) (21) |
1456 | #define SIR_DATA_OVERRUN(22) (22) |
1457 | #define SIR_BAD_PHASE(23) (23) |
1458 | #define SIR_MAX(23) (23) |
1459 | |
1460 | /*========================================================== |
1461 | ** |
1462 | ** Extended error bits. |
1463 | ** xerr_status field of struct ccb. |
1464 | ** |
1465 | **========================================================== |
1466 | */ |
1467 | |
1468 | #define XE_EXTRA_DATA(1) (1) /* unexpected data phase */ |
1469 | #define XE_BAD_PHASE(2) (2) /* illegal phase (4/5) */ |
1470 | #define XE_PARITY_ERR(4) (4) /* unrecovered SCSI parity error */ |
1471 | #define XE_SODL_UNRUN(1<<3) (1<<3) |
1472 | #define XE_SWIDE_OVRUN(1<<4) (1<<4) |
1473 | |
1474 | /*========================================================== |
1475 | ** |
1476 | ** Negotiation status. |
1477 | ** nego_status field of struct ccb. |
1478 | ** |
1479 | **========================================================== |
1480 | */ |
1481 | |
1482 | #define NS_NOCHANGE(0) (0) |
1483 | #define NS_SYNC(1) (1) |
1484 | #define NS_WIDE(2) (2) |
1485 | #define NS_PPR(4) (4) |
1486 | |
1487 | /*========================================================== |
1488 | ** |
1489 | ** "Special features" of targets. |
1490 | ** quirks field of struct tcb. |
1491 | ** actualquirks field of struct ccb. |
1492 | ** |
1493 | **========================================================== |
1494 | */ |
1495 | |
1496 | #define QUIRK_AUTOSAVE(0x01) (0x01) |
1497 | |
1498 | /*========================================================== |
1499 | ** |
1500 | ** Capability bits in Inquire response byte 7. |
1501 | ** |
1502 | **========================================================== |
1503 | */ |
1504 | |
1505 | #define INQ7_QUEUE(0x02) (0x02) |
1506 | #define INQ7_SYNC(0x10) (0x10) |
1507 | #define INQ7_WIDE16(0x20) (0x20) |
1508 | |
1509 | /*========================================================== |
1510 | ** |
1511 | ** A CCB hashed table is used to retrieve CCB address |
1512 | ** from DSA value. |
1513 | ** |
1514 | **========================================================== |
1515 | */ |
1516 | |
1517 | #define CCB_HASH_SHIFT8 8 |
1518 | #define CCB_HASH_SIZE(1UL << 8) (1UL << CCB_HASH_SHIFT8) |
1519 | #define CCB_HASH_MASK((1UL << 8)-1) (CCB_HASH_SIZE(1UL << 8)-1) |
1520 | #define CCB_HASH_CODE(dsa)(((dsa) >> 11) & ((1UL << 8)-1)) (((dsa) >> 11) & CCB_HASH_MASK((1UL << 8)-1)) |
1521 | |
1522 | /*========================================================== |
1523 | ** |
1524 | ** Declaration of structs. |
1525 | ** |
1526 | **========================================================== |
1527 | */ |
1528 | |
1529 | struct tcb; |
1530 | struct lcb; |
1531 | struct ccb; |
1532 | struct ncb; |
1533 | struct script; |
1534 | |
1535 | typedef struct ncb * ncb_p; |
1536 | typedef struct tcb * tcb_p; |
1537 | typedef struct lcb * lcb_p; |
1538 | typedef struct ccb * ccb_p; |
1539 | |
1540 | struct link { |
1541 | ncrcmd l_cmd; |
1542 | ncrcmd l_paddr; |
1543 | }; |
1544 | |
1545 | struct usrcmd { |
1546 | u_longunsigned long target; |
1547 | u_longunsigned long lun; |
1548 | u_longunsigned long data; |
1549 | u_longunsigned long cmd; |
1550 | }; |
1551 | |
1552 | #define UC_SETSYNC10 10 |
1553 | #define UC_SETTAGS11 11 |
1554 | #define UC_SETDEBUG12 12 |
1555 | #define UC_SETORDER13 13 |
1556 | #define UC_SETWIDE14 14 |
1557 | #define UC_SETFLAG15 15 |
1558 | #define UC_SETVERBOSE17 17 |
1559 | #define UC_RESETDEV18 18 |
1560 | #define UC_CLEARDEV19 19 |
1561 | |
1562 | #define UF_TRACE(0x01) (0x01) |
1563 | #define UF_NODISC(0x02) (0x02) |
1564 | #define UF_NOSCAN(0x04) (0x04) |
1565 | |
1566 | /*======================================================================== |
1567 | ** |
1568 | ** Declaration of structs: target control block |
1569 | ** |
1570 | **======================================================================== |
1571 | */ |
1572 | struct tcb { |
1573 | /*---------------------------------------------------------------- |
1574 | ** LUN tables. |
1575 | ** An array of bus addresses is used on reselection by |
1576 | ** the SCRIPT. |
1577 | **---------------------------------------------------------------- |
1578 | */ |
1579 | u_int32 *luntbl; /* lcbs bus address table */ |
1580 | u_int32 b_luntbl; /* bus address of this table */ |
1581 | u_int32 b_lun0; /* bus address of lun0 */ |
1582 | lcb_p l0p; /* lcb of LUN #0 (normal case) */ |
1583 | #if MAX_LUN64 > 1 |
1584 | lcb_p *lmp; /* Other lcb's [1..MAX_LUN] */ |
1585 | #endif |
1586 | /*---------------------------------------------------------------- |
1587 | ** Target capabilities. |
1588 | **---------------------------------------------------------------- |
1589 | */ |
1590 | u_charunsigned char inq_done; /* Target capabilities received */ |
1591 | u_charunsigned char inq_byte7; /* Contains these capabilities */ |
1592 | |
1593 | /*---------------------------------------------------------------- |
1594 | ** Some flags. |
1595 | **---------------------------------------------------------------- |
1596 | */ |
1597 | u_charunsigned char to_reset; /* This target is to be reset */ |
1598 | |
1599 | /*---------------------------------------------------------------- |
1600 | ** Pointer to the ccb used for negotiation. |
1601 | ** Prevent from starting a negotiation for all queued commands |
1602 | ** when tagged command queuing is enabled. |
1603 | **---------------------------------------------------------------- |
1604 | */ |
1605 | ccb_p nego_cp; |
1606 | |
1607 | /*---------------------------------------------------------------- |
1608 | ** negotiation of wide and synch transfer and device quirks. |
1609 | ** sval, wval and uval are read from SCRIPTS and so have alignment |
1610 | ** constraints. |
1611 | **---------------------------------------------------------------- |
1612 | */ |
1613 | /*0*/ u_charunsigned char minsync; |
1614 | /*1*/ u_charunsigned char sval; |
1615 | /*2*/ u_shortunsigned short period; |
1616 | /*0*/ u_charunsigned char maxoffs; |
1617 | /*1*/ u_charunsigned char quirks; |
1618 | /*2*/ u_charunsigned char widedone; |
1619 | /*3*/ u_charunsigned char wval; |
1620 | /*0*/ u_charunsigned char uval; |
1621 | |
1622 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
1623 | u_charunsigned char ic_min_sync; |
1624 | u_charunsigned char ic_max_width; |
1625 | u_charunsigned char ic_done; |
1626 | #endif |
1627 | u_charunsigned char ic_maximums_set; |
1628 | u_charunsigned char ppr_negotiation; |
1629 | |
1630 | /*---------------------------------------------------------------- |
1631 | ** User settable limits and options. |
1632 | ** These limits are read from the NVRAM if present. |
1633 | **---------------------------------------------------------------- |
1634 | */ |
1635 | u_charunsigned char usrsync; |
1636 | u_charunsigned char usrwide; |
1637 | u_shortunsigned short usrtags; |
1638 | u_charunsigned char usrflag; |
1639 | }; |
1640 | |
1641 | /*======================================================================== |
1642 | ** |
1643 | ** Declaration of structs: lun control block |
1644 | ** |
1645 | **======================================================================== |
1646 | */ |
1647 | struct lcb { |
1648 | /*---------------------------------------------------------------- |
1649 | ** On reselection, SCRIPTS use this value as a JUMP address |
1650 | ** after the IDENTIFY has been successfully received. |
1651 | ** This field is set to 'resel_tag' if TCQ is enabled and |
1652 | ** to 'resel_notag' if TCQ is disabled. |
1653 | ** (Must be at zero due to bad lun handling on reselection) |
1654 | **---------------------------------------------------------------- |
1655 | */ |
1656 | /*0*/ u_int32 resel_task; |
1657 | |
1658 | /*---------------------------------------------------------------- |
1659 | ** Task table used by the script processor to retrieve the |
1660 | ** task corresponding to a reselected nexus. The TAG is used |
1661 | ** as offset to determine the corresponding entry. |
1662 | ** Each entry contains the associated CCB bus address. |
1663 | **---------------------------------------------------------------- |
1664 | */ |
1665 | u_int32 tasktbl_0; /* Used if TCQ not enabled */ |
1666 | u_int32 *tasktbl; |
1667 | u_int32 b_tasktbl; |
1668 | |
1669 | /*---------------------------------------------------------------- |
1670 | ** CCB queue management. |
1671 | **---------------------------------------------------------------- |
1672 | */ |
1673 | XPT_QUEHEAD busy_ccbq; /* Queue of busy CCBs */ |
1674 | XPT_QUEHEAD wait_ccbq; /* Queue of waiting for IO CCBs */ |
1675 | u_shortunsigned short busyccbs; /* CCBs busy for this lun */ |
1676 | u_shortunsigned short queuedccbs; /* CCBs queued to the controller*/ |
1677 | u_shortunsigned short queuedepth; /* Queue depth for this lun */ |
1678 | u_shortunsigned short scdev_depth; /* SCSI device queue depth */ |
1679 | u_shortunsigned short maxnxs; /* Max possible nexuses */ |
1680 | |
1681 | /*---------------------------------------------------------------- |
1682 | ** Control of tagged command queuing. |
1683 | ** Tags allocation is performed using a circular buffer. |
1684 | ** This avoids using a loop for tag allocation. |
1685 | **---------------------------------------------------------------- |
1686 | */ |
1687 | u_shortunsigned short ia_tag; /* Tag allocation index */ |
1688 | u_shortunsigned short if_tag; /* Tag release index */ |
1689 | u_charunsigned char *cb_tags; /* Circular tags buffer */ |
1690 | u_charunsigned char inq_byte7; /* Store unit CmdQ capability */ |
1691 | u_charunsigned char usetags; /* Command queuing is active */ |
1692 | u_charunsigned char to_clear; /* User wants to clear all tasks*/ |
1693 | u_shortunsigned short maxtags; /* Max NR of tags asked by user */ |
1694 | u_shortunsigned short numtags; /* Current number of tags */ |
1695 | |
1696 | /*---------------------------------------------------------------- |
1697 | ** QUEUE FULL and ORDERED tag control. |
1698 | **---------------------------------------------------------------- |
1699 | */ |
1700 | u_shortunsigned short num_good; /* Nr of GOOD since QUEUE FULL */ |
1701 | u_shortunsigned short tags_sum[2]; /* Tags sum counters */ |
1702 | u_charunsigned char tags_si; /* Current index to tags sum */ |
1703 | u_longunsigned long tags_stime; /* Last time we switch tags_sum */ |
1704 | }; |
1705 | |
1706 | /*======================================================================== |
1707 | ** |
1708 | ** Declaration of structs: actions for a task. |
1709 | ** |
1710 | **======================================================================== |
1711 | ** |
1712 | ** It is part of the CCB and is called by the scripts processor to |
1713 | ** start or restart the data structure (nexus). |
1714 | ** |
1715 | **------------------------------------------------------------------------ |
1716 | */ |
1717 | struct action { |
1718 | u_int32 start; |
1719 | u_int32 restart; |
1720 | }; |
1721 | |
1722 | /*======================================================================== |
1723 | ** |
1724 | ** Declaration of structs: Phase mismatch context. |
1725 | ** |
1726 | **======================================================================== |
1727 | ** |
1728 | ** It is part of the CCB and is used as parameters for the DATA |
1729 | ** pointer. We need two contexts to handle correctly the SAVED |
1730 | ** DATA POINTER. |
1731 | ** |
1732 | **------------------------------------------------------------------------ |
1733 | */ |
1734 | struct pm_ctx { |
1735 | struct scr_tblmove sg; /* Updated interrupted SG block */ |
1736 | u_int32 ret; /* SCRIPT return address */ |
1737 | }; |
1738 | |
1739 | /*======================================================================== |
1740 | ** |
1741 | ** Declaration of structs: global HEADER. |
1742 | ** |
1743 | **======================================================================== |
1744 | ** |
1745 | ** In earlier driver versions, this substructure was copied from the |
1746 | ** ccb to a global address after selection (or reselection) and copied |
1747 | ** back before disconnect. Since we are now using LOAD/STORE DSA |
1748 | ** RELATIVE instructions, the script is able to access directly these |
1749 | ** fields, and so, this header is no more copied. |
1750 | ** |
1751 | **------------------------------------------------------------------------ |
1752 | */ |
1753 | |
1754 | struct head { |
1755 | /*---------------------------------------------------------------- |
1756 | ** Start and restart SCRIPTS addresses (must be at 0). |
1757 | **---------------------------------------------------------------- |
1758 | */ |
1759 | struct action go; |
1760 | |
1761 | /*---------------------------------------------------------------- |
1762 | ** Saved data pointer. |
1763 | ** Points to the position in the script responsible for the |
1764 | ** actual transfer of data. |
1765 | ** It's written after reception of a SAVE_DATA_POINTER message. |
1766 | ** The goalpointer points after the last transfer command. |
1767 | **---------------------------------------------------------------- |
1768 | */ |
1769 | u_int32 savep; |
1770 | u_int32 lastp; |
1771 | u_int32 goalp; |
1772 | |
1773 | /*---------------------------------------------------------------- |
1774 | ** Alternate data pointer. |
1775 | ** They are copied back to savep/lastp/goalp by the SCRIPTS |
1776 | ** when the direction is unknown and the device claims data out. |
1777 | **---------------------------------------------------------------- |
1778 | */ |
1779 | u_int32 wlastp; |
1780 | u_int32 wgoalp; |
1781 | |
1782 | /*---------------------------------------------------------------- |
1783 | ** Status fields. |
1784 | **---------------------------------------------------------------- |
1785 | */ |
1786 | u_charunsigned char status[4]; /* host status */ |
1787 | }; |
1788 | |
1789 | /* |
1790 | ** LUN control block lookup. |
1791 | ** We use a direct pointer for LUN #0, and a table of pointers |
1792 | ** which is only allocated for devices that support LUN(s) > 0. |
1793 | */ |
1794 | #if MAX_LUN64 <= 1 |
1795 | #define ncr_lp(np, tp, lun)(!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0 (!lun) ? (tp)->l0p : 0 |
1796 | #else |
1797 | #define ncr_lp(np, tp, lun)(!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0 \ |
1798 | (!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0 |
1799 | #endif |
1800 | |
1801 | /* |
1802 | ** The status bytes are used by the host and the script processor. |
1803 | ** |
1804 | ** The four bytes (status[4]) are copied to the scratchb register |
1805 | ** (declared as scr0..scr3 in ncr_reg.h) just after the select/reselect, |
1806 | ** and copied back just after disconnecting. |
1807 | ** Inside the script the XX_REG are used. |
1808 | */ |
1809 | |
1810 | /* |
1811 | ** Last four bytes (script) |
1812 | */ |
1813 | #define QU_REGscr0 scr0 |
1814 | #define HS_REGscr1 scr1 |
1815 | #define HS_PRTnc_scr1 nc_scr1 |
1816 | #define SS_REGscr2 scr2 |
1817 | #define SS_PRTnc_scr2 nc_scr2 |
1818 | #define HF_REGscr3 scr3 |
1819 | #define HF_PRTnc_scr3 nc_scr3 |
1820 | |
1821 | /* |
1822 | ** Last four bytes (host) |
1823 | */ |
1824 | #define actualquirksphys.header.status[0] phys.header.status[0] |
1825 | #define host_statusphys.header.status[1] phys.header.status[1] |
1826 | #define scsi_statusphys.header.status[2] phys.header.status[2] |
1827 | #define host_flagsphys.header.status[3] phys.header.status[3] |
1828 | |
1829 | /* |
1830 | ** Host flags |
1831 | */ |
1832 | #define HF_IN_PM01u 1u |
1833 | #define HF_IN_PM1(1u<<1) (1u<<1) |
1834 | #define HF_ACT_PM(1u<<2) (1u<<2) |
1835 | #define HF_DP_SAVED(1u<<3) (1u<<3) |
1836 | #define HF_AUTO_SENSE(1u<<4) (1u<<4) |
1837 | #define HF_DATA_IN(1u<<5) (1u<<5) |
1838 | #define HF_PM_TO_C(1u<<6) (1u<<6) |
1839 | #define HF_EXT_ERR(1u<<7) (1u<<7) |
1840 | |
1841 | #ifdef SCSI_NCR_IARB_SUPPORT |
1842 | #define HF_HINT_IARB (1u<<7) |
1843 | #endif |
1844 | |
1845 | /* |
1846 | ** This one is stolen from QU_REG.:) |
1847 | */ |
1848 | #define HF_DATA_ST(1u<<7) (1u<<7) |
1849 | |
1850 | /*========================================================== |
1851 | ** |
1852 | ** Declaration of structs: Data structure block |
1853 | ** |
1854 | **========================================================== |
1855 | ** |
1856 | ** During execution of a ccb by the script processor, |
1857 | ** the DSA (data structure address) register points |
1858 | ** to this substructure of the ccb. |
1859 | ** This substructure contains the header with |
1860 | ** the script-processor-changable data and |
1861 | ** data blocks for the indirect move commands. |
1862 | ** |
1863 | **---------------------------------------------------------- |
1864 | */ |
1865 | |
1866 | struct dsb { |
1867 | |
1868 | /* |
1869 | ** Header. |
1870 | */ |
1871 | |
1872 | struct head header; |
1873 | |
1874 | /* |
1875 | ** Table data for Script |
1876 | */ |
1877 | |
1878 | struct scr_tblsel select; |
1879 | struct scr_tblmove smsg ; |
1880 | struct scr_tblmove smsg_ext ; |
1881 | struct scr_tblmove cmd ; |
1882 | struct scr_tblmove sense ; |
1883 | struct scr_tblmove wresid; |
1884 | struct scr_tblmove data [MAX_SCATTER((127))]; |
1885 | |
1886 | /* |
1887 | ** Phase mismatch contexts. |
1888 | ** We need two to handle correctly the |
1889 | ** SAVED DATA POINTER. |
1890 | */ |
1891 | |
1892 | struct pm_ctx pm0; |
1893 | struct pm_ctx pm1; |
1894 | }; |
1895 | |
1896 | |
1897 | /*======================================================================== |
1898 | ** |
1899 | ** Declaration of structs: Command control block. |
1900 | ** |
1901 | **======================================================================== |
1902 | */ |
1903 | struct ccb { |
1904 | /*---------------------------------------------------------------- |
1905 | ** This is the data structure which is pointed by the DSA |
1906 | ** register when it is executed by the script processor. |
1907 | ** It must be the first entry. |
1908 | **---------------------------------------------------------------- |
1909 | */ |
1910 | struct dsb phys; |
1911 | |
1912 | /*---------------------------------------------------------------- |
1913 | ** The general SCSI driver provides a |
1914 | ** pointer to a control block. |
1915 | **---------------------------------------------------------------- |
1916 | */ |
1917 | Scsi_Cmnd *cmd; /* SCSI command */ |
1918 | u_charunsigned char cdb_buf[16]; /* Copy of CDB */ |
1919 | u_charunsigned char sense_buf[64]; |
1920 | int data_len; /* Total data length */ |
1921 | int segments; /* Number of SG segments */ |
1922 | |
1923 | /*---------------------------------------------------------------- |
1924 | ** Message areas. |
1925 | ** We prepare a message to be sent after selection. |
1926 | ** We may use a second one if the command is rescheduled |
1927 | ** due to CHECK_CONDITION or QUEUE FULL status. |
1928 | ** Contents are IDENTIFY and SIMPLE_TAG. |
1929 | ** While negotiating sync or wide transfer, |
1930 | ** a SDTR or WDTR message is appended. |
1931 | **---------------------------------------------------------------- |
1932 | */ |
1933 | u_charunsigned char scsi_smsg [12]; |
1934 | u_charunsigned char scsi_smsg2[12]; |
1935 | |
1936 | /*---------------------------------------------------------------- |
1937 | ** Miscellaneous status'. |
1938 | **---------------------------------------------------------------- |
1939 | */ |
1940 | u_charunsigned char nego_status; /* Negotiation status */ |
1941 | u_charunsigned char xerr_status; /* Extended error flags */ |
1942 | u_int32 extra_bytes; /* Extraneous bytes transferred */ |
1943 | |
1944 | /*---------------------------------------------------------------- |
1945 | ** Saved info for auto-sense |
1946 | **---------------------------------------------------------------- |
1947 | */ |
1948 | u_charunsigned char sv_scsi_status; |
1949 | u_charunsigned char sv_xerr_status; |
1950 | |
1951 | /*---------------------------------------------------------------- |
1952 | ** Other fields. |
1953 | **---------------------------------------------------------------- |
1954 | */ |
1955 | u_longunsigned long p_ccb; /* BUS address of this CCB */ |
1956 | u_charunsigned char sensecmd[6]; /* Sense command */ |
1957 | u_charunsigned char to_abort; /* This CCB is to be aborted */ |
1958 | u_shortunsigned short tag; /* Tag for this transfer */ |
1959 | /* NO_TAG means no tag */ |
1960 | u_charunsigned char tags_si; /* Lun tags sum index (0,1) */ |
1961 | |
1962 | u_charunsigned char target; |
1963 | u_charunsigned char lun; |
1964 | u_shortunsigned short queued; |
1965 | ccb_p link_ccb; /* Host adapter CCB chain */ |
1966 | ccb_p link_ccbh; /* Host adapter CCB hash chain */ |
1967 | XPT_QUEHEAD link_ccbq; /* Link to unit CCB queue */ |
1968 | u_int32 startp; /* Initial data pointer */ |
1969 | u_int32 lastp0; /* Initial 'lastp' */ |
1970 | int ext_sg; /* Extreme data pointer, used */ |
1971 | int ext_ofs; /* to calculate the residual. */ |
1972 | int resid; |
1973 | }; |
1974 | |
1975 | #define CCB_PHYS(cp,lbl)(cp->p_ccb + ((size_t) (&((struct ccb *)0)->lbl))) (cp->p_ccb + offsetof(struct ccb, lbl)((size_t) (&((struct ccb *)0)->lbl))) |
1976 | |
1977 | |
1978 | /*======================================================================== |
1979 | ** |
1980 | ** Declaration of structs: NCR device descriptor |
1981 | ** |
1982 | **======================================================================== |
1983 | */ |
1984 | struct ncb { |
1985 | /*---------------------------------------------------------------- |
1986 | ** Idle task and invalid task actions and their bus |
1987 | ** addresses. |
1988 | **---------------------------------------------------------------- |
1989 | */ |
1990 | struct action idletask; |
1991 | struct action notask; |
1992 | struct action bad_i_t_l; |
1993 | struct action bad_i_t_l_q; |
1994 | u_longunsigned long p_idletask; |
1995 | u_longunsigned long p_notask; |
1996 | u_longunsigned long p_bad_i_t_l; |
1997 | u_longunsigned long p_bad_i_t_l_q; |
1998 | |
1999 | /*---------------------------------------------------------------- |
2000 | ** Dummy lun table to protect us against target returning bad |
2001 | ** lun number on reselection. |
2002 | **---------------------------------------------------------------- |
2003 | */ |
2004 | u_int32 *badluntbl; /* Table physical address */ |
2005 | u_int32 resel_badlun; /* SCRIPT handler BUS address */ |
2006 | |
2007 | /*---------------------------------------------------------------- |
2008 | ** Bit 32-63 of the on-chip RAM bus address in LE format. |
2009 | ** The START_RAM64 script loads the MMRS and MMWS from this |
2010 | ** field. |
2011 | **---------------------------------------------------------------- |
2012 | */ |
2013 | u_int32 scr_ram_seg; |
2014 | |
2015 | /*---------------------------------------------------------------- |
2016 | ** CCBs management queues. |
2017 | **---------------------------------------------------------------- |
2018 | */ |
2019 | Scsi_Cmnd *waiting_list; /* Commands waiting for a CCB */ |
2020 | /* when lcb is not allocated. */ |
2021 | Scsi_Cmnd *done_list; /* Commands waiting for done() */ |
2022 | /* callback to be invoked. */ |
2023 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,1,93)(((2)<<16)+((1)<<8)+(93)) |
2024 | spinlock_t smp_lock; /* Lock for SMP threading */ |
2025 | #endif |
2026 | |
2027 | /*---------------------------------------------------------------- |
2028 | ** Chip and controller indentification. |
2029 | **---------------------------------------------------------------- |
2030 | */ |
2031 | int unit; /* Unit number */ |
2032 | char chip_name[8]; /* Chip name */ |
2033 | char inst_name[16]; /* ncb instance name */ |
2034 | |
2035 | /*---------------------------------------------------------------- |
2036 | ** Initial value of some IO register bits. |
2037 | ** These values are assumed to have been set by BIOS, and may |
2038 | ** be used for probing adapter implementation differences. |
2039 | **---------------------------------------------------------------- |
2040 | */ |
2041 | u_charunsigned char sv_scntl0, sv_scntl3, sv_dmode, sv_dcntl, sv_ctest3, sv_ctest4, |
2042 | sv_ctest5, sv_gpcntl, sv_stest2, sv_stest4, sv_stest1, sv_scntl4; |
2043 | |
2044 | /*---------------------------------------------------------------- |
2045 | ** Actual initial value of IO register bits used by the |
2046 | ** driver. They are loaded at initialisation according to |
2047 | ** features that are to be enabled. |
2048 | **---------------------------------------------------------------- |
2049 | */ |
2050 | u_charunsigned char rv_scntl0, rv_scntl3, rv_dmode, rv_dcntl, rv_ctest3, rv_ctest4, |
2051 | rv_ctest5, rv_stest2, rv_ccntl0, rv_ccntl1, rv_scntl4; |
2052 | |
2053 | /*---------------------------------------------------------------- |
2054 | ** Target data. |
2055 | ** Target control block bus address array used by the SCRIPT |
2056 | ** on reselection. |
2057 | **---------------------------------------------------------------- |
2058 | */ |
2059 | struct tcb target[MAX_TARGET((16))]; |
2060 | u_int32 *targtbl; |
2061 | |
2062 | /*---------------------------------------------------------------- |
2063 | ** Virtual and physical bus addresses of the chip. |
2064 | **---------------------------------------------------------------- |
2065 | */ |
2066 | #ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
2067 | u_longunsigned long base_va; /* MMIO base virtual address */ |
2068 | u_longunsigned long base2_va; /* On-chip RAM virtual address */ |
2069 | #endif |
2070 | u_longunsigned long base_ba; /* MMIO base bus address */ |
2071 | u_longunsigned long base_io; /* IO space base address */ |
2072 | u_longunsigned long base_ws; /* (MM)IO window size */ |
2073 | u_longunsigned long base2_ba; /* On-chip RAM bus address */ |
2074 | u_longunsigned long base2_ws; /* On-chip RAM window size */ |
2075 | u_intunsigned int irq; /* IRQ number */ |
2076 | volatile /* Pointer to volatile for */ |
2077 | struct ncr_reg *reg; /* memory mapped IO. */ |
2078 | |
2079 | /*---------------------------------------------------------------- |
2080 | ** SCRIPTS virtual and physical bus addresses. |
2081 | ** 'script' is loaded in the on-chip RAM if present. |
2082 | ** 'scripth' stays in main memory for all chips except the |
2083 | ** 53C895A and 53C896 that provide 8K on-chip RAM. |
2084 | **---------------------------------------------------------------- |
2085 | */ |
2086 | struct script *script0; /* Copies of script and scripth */ |
2087 | struct scripth *scripth0; /* relocated for this ncb. */ |
2088 | u_longunsigned long p_script; /* Actual script and scripth */ |
2089 | u_longunsigned long p_scripth; /* bus addresses. */ |
2090 | u_longunsigned long p_scripth0; |
2091 | |
2092 | /*---------------------------------------------------------------- |
2093 | ** General controller parameters and configuration. |
2094 | **---------------------------------------------------------------- |
2095 | */ |
2096 | pcidev_t pdev; |
2097 | u_shortunsigned short device_id; /* PCI device id */ |
2098 | u_charunsigned char revision_id; /* PCI device revision id */ |
2099 | u_charunsigned char bus; /* PCI BUS number */ |
2100 | u_charunsigned char device_fn; /* PCI BUS device and function */ |
2101 | u_charunsigned char myaddr; /* SCSI id of the adapter */ |
2102 | u_charunsigned char maxburst; /* log base 2 of dwords burst */ |
2103 | u_charunsigned char maxwide; /* Maximum transfer width */ |
2104 | u_charunsigned char minsync; /* Minimum sync period factor */ |
2105 | u_charunsigned char maxsync; /* Maximum sync period factor */ |
2106 | u_charunsigned char maxoffs; /* Max scsi offset */ |
2107 | u_charunsigned char multiplier; /* Clock multiplier (1,2,4) */ |
2108 | u_charunsigned char clock_divn; /* Number of clock divisors */ |
2109 | u_longunsigned long clock_khz; /* SCSI clock frequency in KHz */ |
2110 | u_intunsigned int features; /* Chip features map */ |
2111 | |
2112 | /*---------------------------------------------------------------- |
2113 | ** Range for the PCI clock frequency measurement result |
2114 | ** that ensures the algorithm used by the driver can be |
2115 | ** trusted for the SCSI clock frequency measurement. |
2116 | ** (Assuming a PCI clock frequency of 33 MHz). |
2117 | **---------------------------------------------------------------- |
2118 | */ |
2119 | u_intunsigned int pciclock_min; |
2120 | u_intunsigned int pciclock_max; |
2121 | |
2122 | /*---------------------------------------------------------------- |
2123 | ** Start queue management. |
2124 | ** It is filled up by the host processor and accessed by the |
2125 | ** SCRIPTS processor in order to start SCSI commands. |
2126 | **---------------------------------------------------------------- |
2127 | */ |
2128 | u_longunsigned long p_squeue; /* Start queue BUS address */ |
2129 | u_int32 *squeue; /* Start queue virtual address */ |
2130 | u_shortunsigned short squeueput; /* Next free slot of the queue */ |
2131 | u_shortunsigned short actccbs; /* Number of allocated CCBs */ |
2132 | u_shortunsigned short queuedepth; /* Start queue depth */ |
2133 | |
2134 | /*---------------------------------------------------------------- |
2135 | ** Command completion queue. |
2136 | ** It is the same size as the start queue to avoid overflow. |
2137 | **---------------------------------------------------------------- |
2138 | */ |
2139 | u_shortunsigned short dqueueget; /* Next position to scan */ |
2140 | u_int32 *dqueue; /* Completion (done) queue */ |
2141 | |
2142 | /*---------------------------------------------------------------- |
2143 | ** Timeout handler. |
2144 | **---------------------------------------------------------------- |
2145 | */ |
2146 | struct timer_list timer; /* Timer handler link header */ |
2147 | u_longunsigned long lasttime; |
2148 | u_longunsigned long settle_time; /* Resetting the SCSI BUS */ |
2149 | |
2150 | /*---------------------------------------------------------------- |
2151 | ** Debugging and profiling. |
2152 | **---------------------------------------------------------------- |
2153 | */ |
2154 | struct ncr_reg regdump; /* Register dump */ |
2155 | u_longunsigned long regtime; /* Time it has been done */ |
2156 | |
2157 | /*---------------------------------------------------------------- |
2158 | ** Miscellaneous buffers accessed by the scripts-processor. |
2159 | ** They shall be DWORD aligned, because they may be read or |
2160 | ** written with a script command. |
2161 | **---------------------------------------------------------------- |
2162 | */ |
2163 | u_charunsigned char msgout[12]; /* Buffer for MESSAGE OUT */ |
2164 | u_charunsigned char msgin [12]; /* Buffer for MESSAGE IN */ |
2165 | u_int32 lastmsg; /* Last SCSI message sent */ |
2166 | u_charunsigned char scratch; /* Scratch for SCSI receive */ |
2167 | |
2168 | /*---------------------------------------------------------------- |
2169 | ** Miscellaneous configuration and status parameters. |
2170 | **---------------------------------------------------------------- |
2171 | */ |
2172 | u_charunsigned char scsi_mode; /* Current SCSI BUS mode */ |
2173 | u_charunsigned char order; /* Tag order to use */ |
2174 | u_charunsigned char verbose; /* Verbosity for this controller*/ |
2175 | u_int32 ncr_cache; /* Used for cache test at init. */ |
2176 | u_longunsigned long p_ncb; /* BUS address of this NCB */ |
2177 | |
2178 | /*---------------------------------------------------------------- |
2179 | ** CCB lists and queue. |
2180 | **---------------------------------------------------------------- |
2181 | */ |
2182 | ccb_p ccbh[CCB_HASH_SIZE(1UL << 8)]; /* CCB hashed by DSA value */ |
2183 | struct ccb *ccbc; /* CCB chain */ |
2184 | XPT_QUEHEAD free_ccbq; /* Queue of available CCBs */ |
2185 | |
2186 | /*---------------------------------------------------------------- |
2187 | ** IMMEDIATE ARBITRATION (IARB) control. |
2188 | ** We keep track in 'last_cp' of the last CCB that has been |
2189 | ** queued to the SCRIPTS processor and clear 'last_cp' when |
2190 | ** this CCB completes. If last_cp is not zero at the moment |
2191 | ** we queue a new CCB, we set a flag in 'last_cp' that is |
2192 | ** used by the SCRIPTS as a hint for setting IARB. |
2193 | ** We donnot set more than 'iarb_max' consecutive hints for |
2194 | ** IARB in order to leave devices a chance to reselect. |
2195 | ** By the way, any non zero value of 'iarb_max' is unfair. :) |
2196 | **---------------------------------------------------------------- |
2197 | */ |
2198 | #ifdef SCSI_NCR_IARB_SUPPORT |
2199 | struct ccb *last_cp; /* Last queud CCB used for IARB */ |
2200 | u_shortunsigned short iarb_max; /* Max. # consecutive IARB hints*/ |
2201 | u_shortunsigned short iarb_count; /* Actual # of these hints */ |
2202 | #endif |
2203 | |
2204 | /*---------------------------------------------------------------- |
2205 | ** We need the LCB in order to handle disconnections and |
2206 | ** to count active CCBs for task management. So, we use |
2207 | ** a unique CCB for LUNs we donnot have the LCB yet. |
2208 | ** This queue normally should have at most 1 element. |
2209 | **---------------------------------------------------------------- |
2210 | */ |
2211 | XPT_QUEHEAD b0_ccbq; |
2212 | |
2213 | /*---------------------------------------------------------------- |
2214 | ** We use a different scatter function for 896 rev 1. |
2215 | **---------------------------------------------------------------- |
2216 | */ |
2217 | int (*scatter) (ncb_p, ccb_p, Scsi_Cmnd *); |
2218 | |
2219 | /*---------------------------------------------------------------- |
2220 | ** Command abort handling. |
2221 | ** We need to synchronize tightly with the SCRIPTS |
2222 | ** processor in order to handle things correctly. |
2223 | **---------------------------------------------------------------- |
2224 | */ |
2225 | u_charunsigned char abrt_msg[4]; /* Message to send buffer */ |
2226 | struct scr_tblmove abrt_tbl; /* Table for the MOV of it */ |
2227 | struct scr_tblsel abrt_sel; /* Sync params for selection */ |
2228 | u_charunsigned char istat_sem; /* Tells the chip to stop (SEM) */ |
2229 | |
2230 | /*---------------------------------------------------------------- |
2231 | ** Fields that should be removed or changed. |
2232 | **---------------------------------------------------------------- |
2233 | */ |
2234 | struct usrcmd user; /* Command from user */ |
2235 | volatile u_charunsigned char release_stage; /* Synchronisation stage on release */ |
2236 | |
2237 | /*---------------------------------------------------------------- |
2238 | ** Fields that are used (primarily) for integrity check |
2239 | **---------------------------------------------------------------- |
2240 | */ |
2241 | unsigned char check_integrity; /* Enable midlayer integ. check on |
2242 | * bus scan. */ |
2243 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
2244 | unsigned char check_integ_par; /* Set if par or Init. Det. error |
2245 | * used only during integ check */ |
2246 | #endif |
2247 | }; |
2248 | |
2249 | #define NCB_PHYS(np, lbl)(np->p_ncb + ((size_t) (&((struct ncb *)0)->lbl))) (np->p_ncb + offsetof(struct ncb, lbl)((size_t) (&((struct ncb *)0)->lbl))) |
2250 | #define NCB_SCRIPT_PHYS(np,lbl)(np->p_script + ((size_t) (&((struct script *)0)->lbl ))) (np->p_script + offsetof (struct script, lbl)((size_t) (&((struct script *)0)->lbl))) |
2251 | #define NCB_SCRIPTH_PHYS(np,lbl)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> lbl))) (np->p_scripth + offsetof (struct scripth,lbl)((size_t) (&((struct scripth *)0)->lbl))) |
2252 | #define NCB_SCRIPTH0_PHYS(np,lbl)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> lbl))) (np->p_scripth0+offsetof (struct scripth,lbl)((size_t) (&((struct scripth *)0)->lbl))) |
2253 | |
2254 | /*========================================================== |
2255 | ** |
2256 | ** |
2257 | ** Script for NCR-Processor. |
2258 | ** |
2259 | ** Use ncr_script_fill() to create the variable parts. |
2260 | ** Use ncr_script_copy_and_bind() to make a copy and |
2261 | ** bind to physical addresses. |
2262 | ** |
2263 | ** |
2264 | **========================================================== |
2265 | ** |
2266 | ** We have to know the offsets of all labels before |
2267 | ** we reach them (for forward jumps). |
2268 | ** Therefore we declare a struct here. |
2269 | ** If you make changes inside the script, |
2270 | ** DONT FORGET TO CHANGE THE LENGTHS HERE! |
2271 | ** |
2272 | **---------------------------------------------------------- |
2273 | */ |
2274 | |
2275 | /* |
2276 | ** Script fragments which are loaded into the on-chip RAM |
2277 | ** of 825A, 875, 876, 895, 895A and 896 chips. |
2278 | */ |
2279 | struct script { |
2280 | ncrcmd start [ 14]; |
2281 | ncrcmd getjob_begin [ 4]; |
2282 | ncrcmd getjob_end [ 4]; |
2283 | ncrcmd select [ 8]; |
2284 | ncrcmd wf_sel_done [ 2]; |
2285 | ncrcmd send_ident [ 2]; |
2286 | #ifdef SCSI_NCR_IARB_SUPPORT |
2287 | ncrcmd select2 [ 8]; |
2288 | #else |
2289 | ncrcmd select2 [ 2]; |
2290 | #endif |
2291 | ncrcmd command [ 2]; |
2292 | ncrcmd dispatch [ 28]; |
2293 | ncrcmd sel_no_cmd [ 10]; |
2294 | ncrcmd init [ 6]; |
2295 | ncrcmd clrack [ 4]; |
2296 | ncrcmd disp_status [ 4]; |
2297 | ncrcmd datai_done [ 26]; |
2298 | ncrcmd datao_done [ 12]; |
2299 | ncrcmd ign_i_w_r_msg [ 4]; |
2300 | ncrcmd datai_phase [ 2]; |
2301 | ncrcmd datao_phase [ 4]; |
2302 | ncrcmd msg_in [ 2]; |
2303 | ncrcmd msg_in2 [ 10]; |
2304 | #ifdef SCSI_NCR_IARB_SUPPORT |
2305 | ncrcmd status [ 14]; |
2306 | #else |
2307 | ncrcmd status [ 10]; |
2308 | #endif |
2309 | ncrcmd complete [ 8]; |
2310 | #ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES |
2311 | ncrcmd complete2 [ 12]; |
2312 | #else |
2313 | ncrcmd complete2 [ 10]; |
2314 | #endif |
2315 | #ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR |
2316 | ncrcmd done [ 18]; |
2317 | #else |
2318 | ncrcmd done [ 14]; |
2319 | #endif |
2320 | ncrcmd done_end [ 2]; |
2321 | ncrcmd save_dp [ 8]; |
2322 | ncrcmd restore_dp [ 4]; |
2323 | ncrcmd disconnect [ 20]; |
2324 | #ifdef SCSI_NCR_IARB_SUPPORT |
2325 | ncrcmd idle [ 4]; |
2326 | #else |
2327 | ncrcmd idle [ 2]; |
2328 | #endif |
2329 | #ifdef SCSI_NCR_IARB_SUPPORT |
2330 | ncrcmd ungetjob [ 6]; |
2331 | #else |
2332 | ncrcmd ungetjob [ 4]; |
2333 | #endif |
2334 | ncrcmd reselect [ 4]; |
2335 | ncrcmd reselected [ 20]; |
2336 | ncrcmd resel_scntl4 [ 30]; |
2337 | #if MAX_TASKS(256/4)*4 > 512 |
2338 | ncrcmd resel_tag [ 18]; |
2339 | #elif MAX_TASKS(256/4)*4 > 256 |
2340 | ncrcmd resel_tag [ 12]; |
2341 | #else |
2342 | ncrcmd resel_tag [ 8]; |
2343 | #endif |
2344 | ncrcmd resel_go [ 6]; |
2345 | ncrcmd resel_notag [ 2]; |
2346 | ncrcmd resel_dsa [ 8]; |
2347 | ncrcmd data_in [MAX_SCATTER((127)) * SCR_SG_SIZE(2)]; |
2348 | ncrcmd data_in2 [ 4]; |
2349 | ncrcmd data_out [MAX_SCATTER((127)) * SCR_SG_SIZE(2)]; |
2350 | ncrcmd data_out2 [ 4]; |
2351 | ncrcmd pm0_data [ 12]; |
2352 | ncrcmd pm0_data_out [ 6]; |
2353 | ncrcmd pm0_data_end [ 6]; |
2354 | ncrcmd pm1_data [ 12]; |
2355 | ncrcmd pm1_data_out [ 6]; |
2356 | ncrcmd pm1_data_end [ 6]; |
2357 | }; |
2358 | |
2359 | /* |
2360 | ** Script fragments which stay in main memory for all chips |
2361 | ** except for the 895A and 896 that support 8K on-chip RAM. |
2362 | */ |
2363 | struct scripth { |
2364 | ncrcmd start64 [ 2]; |
2365 | ncrcmd no_data [ 2]; |
2366 | ncrcmd sel_for_abort [ 18]; |
2367 | ncrcmd sel_for_abort_1 [ 2]; |
2368 | ncrcmd select_no_atn [ 8]; |
2369 | ncrcmd wf_sel_done_no_atn [ 4]; |
2370 | |
2371 | ncrcmd msg_in_etc [ 14]; |
2372 | ncrcmd msg_received [ 4]; |
2373 | ncrcmd msg_weird_seen [ 4]; |
2374 | ncrcmd msg_extended [ 20]; |
2375 | ncrcmd msg_bad [ 6]; |
2376 | ncrcmd msg_weird [ 4]; |
2377 | ncrcmd msg_weird1 [ 8]; |
2378 | |
2379 | ncrcmd wdtr_resp [ 6]; |
2380 | ncrcmd send_wdtr [ 4]; |
2381 | ncrcmd sdtr_resp [ 6]; |
2382 | ncrcmd send_sdtr [ 4]; |
2383 | ncrcmd ppr_resp [ 6]; |
2384 | ncrcmd send_ppr [ 4]; |
2385 | ncrcmd nego_bad_phase [ 4]; |
2386 | ncrcmd msg_out [ 4]; |
2387 | ncrcmd msg_out_done [ 4]; |
2388 | ncrcmd data_ovrun [ 2]; |
2389 | ncrcmd data_ovrun1 [ 22]; |
2390 | ncrcmd data_ovrun2 [ 8]; |
2391 | ncrcmd abort_resel [ 16]; |
2392 | ncrcmd resend_ident [ 4]; |
2393 | ncrcmd ident_break [ 4]; |
2394 | ncrcmd ident_break_atn [ 4]; |
2395 | ncrcmd sdata_in [ 6]; |
2396 | ncrcmd data_io [ 2]; |
2397 | ncrcmd data_io_com [ 8]; |
2398 | ncrcmd data_io_out [ 12]; |
2399 | ncrcmd resel_bad_lun [ 4]; |
2400 | ncrcmd bad_i_t_l [ 4]; |
2401 | ncrcmd bad_i_t_l_q [ 4]; |
2402 | ncrcmd bad_status [ 6]; |
2403 | ncrcmd tweak_pmj [ 12]; |
2404 | ncrcmd pm_handle [ 20]; |
2405 | ncrcmd pm_handle1 [ 4]; |
2406 | ncrcmd pm_save [ 4]; |
2407 | ncrcmd pm0_save [ 14]; |
2408 | ncrcmd pm1_save [ 14]; |
2409 | |
2410 | /* WSR handling */ |
2411 | #ifdef SYM_DEBUG_PM_WITH_WSR |
2412 | ncrcmd pm_wsr_handle [ 44]; |
2413 | #else |
2414 | ncrcmd pm_wsr_handle [ 42]; |
2415 | #endif |
2416 | ncrcmd wsr_ma_helper [ 4]; |
2417 | |
2418 | /* Data area */ |
2419 | ncrcmd zero [ 1]; |
2420 | ncrcmd scratch [ 1]; |
2421 | ncrcmd scratch1 [ 1]; |
2422 | ncrcmd pm0_data_addr [ 1]; |
2423 | ncrcmd pm1_data_addr [ 1]; |
2424 | ncrcmd saved_dsa [ 1]; |
2425 | ncrcmd saved_drs [ 1]; |
2426 | ncrcmd done_pos [ 1]; |
2427 | ncrcmd startpos [ 1]; |
2428 | ncrcmd targtbl [ 1]; |
2429 | /* End of data area */ |
2430 | |
2431 | #ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
2432 | ncrcmd start_ram [ 1]; |
2433 | ncrcmd script0_ba [ 4]; |
2434 | ncrcmd start_ram64 [ 3]; |
2435 | ncrcmd script0_ba64 [ 3]; |
2436 | ncrcmd scripth0_ba64 [ 6]; |
2437 | ncrcmd ram_seg64 [ 1]; |
2438 | #endif |
2439 | ncrcmd snooptest [ 6]; |
2440 | ncrcmd snoopend [ 2]; |
2441 | }; |
2442 | |
2443 | /*========================================================== |
2444 | ** |
2445 | ** |
2446 | ** Function headers. |
2447 | ** |
2448 | ** |
2449 | **========================================================== |
2450 | */ |
2451 | |
2452 | static ccb_p ncr_alloc_ccb (ncb_p np); |
2453 | static void ncr_complete (ncb_p np, ccb_p cp); |
2454 | static void ncr_exception (ncb_p np); |
2455 | static void ncr_free_ccb (ncb_p np, ccb_p cp); |
2456 | static ccb_p ncr_ccb_from_dsa(ncb_p np, u_longunsigned long dsa); |
2457 | static void ncr_init_tcb (ncb_p np, u_charunsigned char tn); |
2458 | static lcb_p ncr_alloc_lcb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln); |
2459 | static lcb_p ncr_setup_lcb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln, |
2460 | u_charunsigned char *inq_data); |
2461 | static void ncr_getclock (ncb_p np, int mult); |
2462 | static u_intunsigned int ncr_getpciclock (ncb_p np); |
2463 | static void ncr_selectclock (ncb_p np, u_charunsigned char scntl3); |
2464 | static ccb_p ncr_get_ccb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln); |
2465 | static void ncr_init (ncb_p np, int reset, char * msg, u_longunsigned long code); |
2466 | static void ncr_int_sbmc (ncb_p np); |
2467 | static void ncr_int_par (ncb_p np, u_shortunsigned short sist); |
2468 | static void ncr_int_ma (ncb_p np); |
2469 | static void ncr_int_sir (ncb_p np); |
2470 | static void ncr_int_sto (ncb_p np); |
2471 | static void ncr_int_udc (ncb_p np); |
2472 | static void ncr_negotiate (ncb_p np, tcb_p tp); |
2473 | static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_charunsigned char *msgptr); |
2474 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
2475 | static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_charunsigned char *msgptr); |
2476 | #endif |
2477 | static void ncr_script_copy_and_bind |
2478 | (ncb_p np, ncrcmd *src, ncrcmd *dst, int len); |
2479 | static void ncr_script_fill (struct script * scr, struct scripth * scripth); |
2480 | static int ncr_scatter_896R1 (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd); |
2481 | static int ncr_scatter (ncb_p np, ccb_p cp, Scsi_Cmnd *cmd); |
2482 | static void ncr_getsync (ncb_p np, u_charunsigned char sfac, u_charunsigned char *fakp, u_charunsigned char *scntl3p); |
2483 | static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_charunsigned char *factor, u_charunsigned char *offset, u_charunsigned char *width); |
2484 | static void ncr_setsync (ncb_p np, ccb_p cp, u_charunsigned char scntl3, u_charunsigned char sxfer, u_charunsigned char scntl4); |
2485 | static void ncr_set_sync_wide_status (ncb_p np, u_charunsigned char target); |
2486 | static void ncr_setup_tags (ncb_p np, u_charunsigned char tn, u_charunsigned char ln); |
2487 | static void ncr_setwide (ncb_p np, ccb_p cp, u_charunsigned char wide, u_charunsigned char ack); |
2488 | static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_charunsigned char scntl3, u_charunsigned char sxfer, u_charunsigned char scntl4, u_charunsigned char wide); |
2489 | static int ncr_show_msg (u_charunsigned char * msg); |
2490 | static void ncr_print_msg (ccb_p cp, char *label, u_charunsigned char * msg); |
2491 | static int ncr_snooptest (ncb_p np); |
2492 | static void ncr_timeout (ncb_p np); |
2493 | static void ncr_wakeup (ncb_p np, u_longunsigned long code); |
2494 | static int ncr_wakeup_done (ncb_p np); |
2495 | static void ncr_start_next_ccb (ncb_p np, lcb_p lp, int maxn); |
2496 | static void ncr_put_start_queue(ncb_p np, ccb_p cp); |
2497 | static void ncr_chip_reset (ncb_p np); |
2498 | static void ncr_soft_reset (ncb_p np); |
2499 | static void ncr_start_reset (ncb_p np); |
2500 | static int ncr_reset_scsi_bus (ncb_p np, int enab_int, int settle_delay); |
2501 | static int ncr_compute_residual (ncb_p np, ccb_p cp); |
2502 | |
2503 | #ifdef SCSI_NCR_USER_COMMAND_SUPPORT |
2504 | static void ncr_usercmd (ncb_p np); |
2505 | #endif |
2506 | |
2507 | static int ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device); |
2508 | static void ncr_free_resources(ncb_p np); |
2509 | |
2510 | static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd); |
2511 | static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd); |
2512 | static void process_waiting_list(ncb_p np, int sts); |
2513 | |
2514 | #define remove_from_waiting_list(np, cmd)retrieve_from_waiting_list(1, (np), (cmd)) \ |
2515 | retrieve_from_waiting_list(1, (np), (cmd)) |
2516 | #define requeue_waiting_list(np)process_waiting_list((np), 0x00) process_waiting_list((np), DID_OK0x00) |
2517 | #define reset_waiting_list(np)process_waiting_list((np), 0x08) process_waiting_list((np), DID_RESET0x08) |
2518 | |
2519 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
2520 | static void ncr_get_nvram (ncr_device *devp, ncr_nvram *nvp); |
2521 | static int sym_read_Tekram_nvram (ncr_slot *np, u_shortunsigned short device_id, |
2522 | Tekram_nvram *nvram); |
2523 | static int sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram); |
2524 | #endif |
2525 | |
2526 | /*========================================================== |
2527 | ** |
2528 | ** |
2529 | ** Global static data. |
2530 | ** |
2531 | ** |
2532 | **========================================================== |
2533 | */ |
2534 | |
2535 | static inlineinline __attribute__((always_inline)) char *ncr_name (ncb_p np) |
2536 | { |
2537 | return np->inst_name; |
2538 | } |
2539 | |
2540 | |
2541 | /*========================================================== |
2542 | ** |
2543 | ** |
2544 | ** Scripts for NCR-Processor. |
2545 | ** |
2546 | ** Use ncr_script_bind for binding to physical addresses. |
2547 | ** |
2548 | ** |
2549 | **========================================================== |
2550 | ** |
2551 | ** NADDR generates a reference to a field of the controller data. |
2552 | ** PADDR generates a reference to another part of the script. |
2553 | ** RADDR generates a reference to a script processor register. |
2554 | ** FADDR generates a reference to a script processor register |
2555 | ** with offset. |
2556 | ** |
2557 | **---------------------------------------------------------- |
2558 | */ |
2559 | |
2560 | #define RELOC_SOFTC0x40000000 0x40000000 |
2561 | #define RELOC_LABEL0x50000000 0x50000000 |
2562 | #define RELOC_REGISTER0x60000000 0x60000000 |
2563 | #if 0 |
2564 | #define RELOC_KVAR 0x70000000 |
2565 | #endif |
2566 | #define RELOC_LABELH0x80000000 0x80000000 |
2567 | #define RELOC_MASK0xf0000000 0xf0000000 |
2568 | |
2569 | #define NADDR(label)(0x40000000 | ((size_t) (&((struct ncb *)0)->label))) (RELOC_SOFTC0x40000000 | offsetof(struct ncb, label)((size_t) (&((struct ncb *)0)->label))) |
2570 | #define PADDR(label)(0x50000000 | ((size_t) (&((struct script *)0)->label) )) (RELOC_LABEL0x50000000 | offsetof(struct script, label)((size_t) (&((struct script *)0)->label))) |
2571 | #define PADDRH(label)(0x80000000 | ((size_t) (&((struct scripth *)0)->label ))) (RELOC_LABELH0x80000000 | offsetof(struct scripth, label)((size_t) (&((struct scripth *)0)->label))) |
2572 | #define RADDR(label)(0x60000000 | (((size_t) (&((struct ncr_reg *)0)->nc_label )))) (RELOC_REGISTER0x60000000 | REG(label)(((size_t) (&((struct ncr_reg *)0)->nc_label)))) |
2573 | #define FADDR(label,ofs)(0x60000000 | (((((size_t) (&((struct ncr_reg *)0)->nc_label ))))+(ofs)))(RELOC_REGISTER0x60000000 | ((REG(label)(((size_t) (&((struct ncr_reg *)0)->nc_label))))+(ofs))) |
2574 | #define KVAR(which)(RELOC_KVAR | (which)) (RELOC_KVAR | (which)) |
2575 | |
2576 | #define SCR_DATA_ZERO0xf00ff00f 0xf00ff00f |
2577 | |
2578 | #ifdef RELOC_KVAR |
2579 | #define SCRIPT_KVAR_JIFFIES (0) |
2580 | #define SCRIPT_KVAR_FIRST SCRIPT_KVAR_JIFFIES |
2581 | #define SCRIPT_KVAR_LAST SCRIPT_KVAR_JIFFIES |
2582 | /* |
2583 | * Kernel variables referenced in the scripts. |
2584 | * THESE MUST ALL BE ALIGNED TO A 4-BYTE BOUNDARY. |
2585 | */ |
2586 | static void *script_kvars[] __initdata = |
2587 | { (void *)&jiffies }; |
2588 | #endif |
2589 | |
2590 | static struct script script0 __initdata = { |
2591 | /*--------------------------< START >-----------------------*/ { |
2592 | /* |
2593 | ** This NOP will be patched with LED ON |
2594 | ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) |
2595 | */ |
2596 | SCR_NO_OP0x80000000, |
2597 | 0, |
2598 | /* |
2599 | ** Clear SIGP. |
2600 | */ |
2601 | SCR_FROM_REG (ctest2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ctest2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_ctest2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2602 | 0, |
2603 | |
2604 | /* |
2605 | ** Stop here if the C code wants to perform |
2606 | ** some error recovery procedure manually. |
2607 | ** (Indicate this by setting SEM in ISTAT) |
2608 | */ |
2609 | SCR_FROM_REG (istat)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_istat)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_istat)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2610 | 0, |
2611 | /* |
2612 | ** Report to the C code the next position in |
2613 | ** the start queue the SCRIPTS will schedule. |
2614 | ** The C code must not change SCRATCHA. |
2615 | */ |
2616 | SCR_LOAD_ABS (scratcha, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
2617 | PADDRH (startpos)(0x80000000 | ((size_t) (&((struct scripth *)0)->startpos ))), |
2618 | SCR_INT0x98080000 ^ IFTRUE (MASK (SEM, SEM))(0x00000000 | ((0x00040000 | (((0x10 ^ 0xff) & 0xff) << 8ul)|((0x10) & 0xff)))), |
2619 | SIR_SCRIPT_STOPPED(7), |
2620 | |
2621 | /* |
2622 | ** Start the next job. |
2623 | ** |
2624 | ** @DSA = start point for this job. |
2625 | ** SCRATCHA = address of this job in the start queue. |
2626 | ** |
2627 | ** We will restore startpos with SCRATCHA if we fails the |
2628 | ** arbitration or if it is the idle job. |
2629 | ** |
2630 | ** The below GETJOB_BEGIN to GETJOB_END section of SCRIPTS |
2631 | ** is a critical path. If it is partially executed, it then |
2632 | ** may happen that the job address is not yet in the DSA |
2633 | ** and the the next queue position points to the next JOB. |
2634 | */ |
2635 | SCR_LOAD_ABS (dsa, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul)) | (4)), |
2636 | PADDRH (startpos)(0x80000000 | ((size_t) (&((struct scripth *)0)->startpos ))), |
2637 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
2638 | 4, |
2639 | }/*-------------------------< GETJOB_BEGIN >------------------*/,{ |
2640 | SCR_STORE_ABS (temp, 4)(0xe0000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul)) | (4)), |
2641 | PADDRH (startpos)(0x80000000 | ((size_t) (&((struct scripth *)0)->startpos ))), |
2642 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
2643 | 0, |
2644 | }/*-------------------------< GETJOB_END >--------------------*/,{ |
2645 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
2646 | 0, |
2647 | SCR_RETURN0x90080000, |
2648 | 0, |
2649 | |
2650 | }/*-------------------------< SELECT >----------------------*/,{ |
2651 | /* |
2652 | ** DSA contains the address of a scheduled |
2653 | ** data structure. |
2654 | ** |
2655 | ** SCRATCHA contains the address of the start queue |
2656 | ** entry which points to the next job. |
2657 | ** |
2658 | ** Set Initiator mode. |
2659 | ** |
2660 | ** (Target mode is left as an exercise for the reader) |
2661 | */ |
2662 | |
2663 | SCR_CLR (SCR_TRG)(0x60000000 | (0x00000200)), |
2664 | 0, |
2665 | /* |
2666 | ** And try to select this target. |
2667 | */ |
2668 | SCR_SEL_TBL_ATN0x43000000 ^ offsetof (struct dsb, select)((size_t) (&((struct dsb *)0)->select)), |
2669 | PADDR (ungetjob)(0x50000000 | ((size_t) (&((struct script *)0)->ungetjob ))), |
2670 | /* |
2671 | ** Now there are 4 possibilities: |
2672 | ** |
2673 | ** (1) The ncr looses arbitration. |
2674 | ** This is ok, because it will try again, |
2675 | ** when the bus becomes idle. |
2676 | ** (But beware of the timeout function!) |
2677 | ** |
2678 | ** (2) The ncr is reselected. |
2679 | ** Then the script processor takes the jump |
2680 | ** to the RESELECT label. |
2681 | ** |
2682 | ** (3) The ncr wins arbitration. |
2683 | ** Then it will execute SCRIPTS instruction until |
2684 | ** the next instruction that checks SCSI phase. |
2685 | ** Then will stop and wait for selection to be |
2686 | ** complete or selection time-out to occur. |
2687 | ** |
2688 | ** After having won arbitration, the ncr SCRIPTS |
2689 | ** processor is able to execute instructions while |
2690 | ** the SCSI core is performing SCSI selection. But |
2691 | ** some script instruction that is not waiting for |
2692 | ** a valid phase (or selection timeout) to occur |
2693 | ** breaks the selection procedure, by probably |
2694 | ** affecting timing requirements. |
2695 | ** So we have to wait immediately for the next phase |
2696 | ** or the selection to complete or time-out. |
2697 | */ |
2698 | |
2699 | /* |
2700 | ** load the savep (saved pointer) into |
2701 | ** the actual data pointer. |
2702 | */ |
2703 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
2704 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
2705 | /* |
2706 | ** Initialize the status registers |
2707 | */ |
2708 | SCR_LOAD_REL (scr0, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), |
2709 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
2710 | |
2711 | }/*-------------------------< WF_SEL_DONE >----------------------*/,{ |
2712 | SCR_INT0x98080000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
2713 | SIR_SEL_ATN_NO_MSG_OUT(2), |
2714 | }/*-------------------------< SEND_IDENT >----------------------*/,{ |
2715 | /* |
2716 | ** Selection complete. |
2717 | ** Send the IDENTIFY and SIMPLE_TAG messages |
2718 | ** (and the M_X_SYNC_REQ / M_X_WIDE_REQ message) |
2719 | */ |
2720 | SCR_MOVE_TBL(0x10000000 | 0x08000000) ^ SCR_MSG_OUT0x06000000, |
2721 | offsetof (struct dsb, smsg)((size_t) (&((struct dsb *)0)->smsg)), |
2722 | }/*-------------------------< SELECT2 >----------------------*/,{ |
2723 | #ifdef SCSI_NCR_IARB_SUPPORT |
2724 | /* |
2725 | ** Set IMMEDIATE ARBITRATION if we have been given |
2726 | ** a hint to do so. (Some job to do after this one). |
2727 | */ |
2728 | SCR_FROM_REG (HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2729 | 0, |
2730 | SCR_JUMPR0x80880000 ^ IFFALSE (MASK (HF_HINT_IARB, HF_HINT_IARB))(0x00080000 | ((0x00040000 | (((HF_HINT_IARB ^ 0xff) & 0xff ) << 8ul)|((HF_HINT_IARB) & 0xff)))), |
2731 | 8, |
2732 | SCR_REG_REG (scntl1, SCR_OR, IARB)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl1)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl1)))) & 0x80))) | (0x02000000 ) | (((0x02)&0xff)<<8ul)), |
2733 | 0, |
2734 | #endif |
2735 | /* |
2736 | ** Anticipate the COMMAND phase. |
2737 | ** This is the PHASE we expect at this point. |
2738 | */ |
2739 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_COMMAND))(0x00080000 | ((0x00030000 | (0x02000000)))), |
2740 | PADDR (sel_no_cmd)(0x50000000 | ((size_t) (&((struct script *)0)->sel_no_cmd ))), |
2741 | |
2742 | }/*-------------------------< COMMAND >--------------------*/,{ |
2743 | /* |
2744 | ** ... and send the command |
2745 | */ |
2746 | SCR_MOVE_TBL(0x10000000 | 0x08000000) ^ SCR_COMMAND0x02000000, |
2747 | offsetof (struct dsb, cmd)((size_t) (&((struct dsb *)0)->cmd)), |
2748 | |
2749 | }/*-----------------------< DISPATCH >----------------------*/,{ |
2750 | /* |
2751 | ** MSG_IN is the only phase that shall be |
2752 | ** entered at least once for each (re)selection. |
2753 | ** So we test it first. |
2754 | */ |
2755 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_MSG_IN))(0x00000000 | ((0x00030000 | (0x07000000)))), |
2756 | PADDR (msg_in)(0x50000000 | ((size_t) (&((struct script *)0)->msg_in ))), |
2757 | SCR_JUMP0x80080000 ^ IFTRUE (IF (SCR_DATA_OUT))(0x00000000 | ((0x00020000 | (0x00000000)))), |
2758 | PADDR (datao_phase)(0x50000000 | ((size_t) (&((struct script *)0)->datao_phase ))), |
2759 | SCR_JUMP0x80080000 ^ IFTRUE (IF (SCR_DATA_IN))(0x00000000 | ((0x00020000 | (0x01000000)))), |
2760 | PADDR (datai_phase)(0x50000000 | ((size_t) (&((struct script *)0)->datai_phase ))), |
2761 | SCR_JUMP0x80080000 ^ IFTRUE (IF (SCR_STATUS))(0x00000000 | ((0x00020000 | (0x03000000)))), |
2762 | PADDR (status)(0x50000000 | ((size_t) (&((struct script *)0)->status ))), |
2763 | SCR_JUMP0x80080000 ^ IFTRUE (IF (SCR_COMMAND))(0x00000000 | ((0x00020000 | (0x02000000)))), |
2764 | PADDR (command)(0x50000000 | ((size_t) (&((struct script *)0)->command ))), |
2765 | SCR_JUMP0x80080000 ^ IFTRUE (IF (SCR_MSG_OUT))(0x00000000 | ((0x00020000 | (0x06000000)))), |
2766 | PADDRH (msg_out)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_out ))), |
2767 | /* |
2768 | * Discard as many illegal phases as |
2769 | * required and tell the C code about. |
2770 | */ |
2771 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_ILG_OUT))(0x00080000 | ((0x00030000 | (0x04000000)))), |
2772 | 16, |
2773 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_ILG_OUT0x04000000, |
2774 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
2775 | SCR_JUMPR0x80880000 ^ IFTRUE (WHEN (SCR_ILG_OUT))(0x00000000 | ((0x00030000 | (0x04000000)))), |
2776 | -16, |
2777 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_ILG_IN))(0x00080000 | ((0x00030000 | (0x05000000)))), |
2778 | 16, |
2779 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_ILG_IN0x05000000, |
2780 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
2781 | SCR_JUMPR0x80880000 ^ IFTRUE (WHEN (SCR_ILG_IN))(0x00000000 | ((0x00030000 | (0x05000000)))), |
2782 | -16, |
2783 | SCR_INT0x98080000, |
2784 | SIR_BAD_PHASE(23), |
2785 | SCR_JUMP0x80080000, |
2786 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2787 | }/*---------------------< SEL_NO_CMD >----------------------*/,{ |
2788 | /* |
2789 | ** The target does not switch to command |
2790 | ** phase after IDENTIFY has been sent. |
2791 | ** |
2792 | ** If it stays in MSG OUT phase send it |
2793 | ** the IDENTIFY again. |
2794 | */ |
2795 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_MSG_OUT))(0x00000000 | ((0x00030000 | (0x06000000)))), |
2796 | PADDRH (resend_ident)(0x80000000 | ((size_t) (&((struct scripth *)0)->resend_ident ))), |
2797 | /* |
2798 | ** If target does not switch to MSG IN phase |
2799 | ** and we sent a negotiation, assert the |
2800 | ** failure immediately. |
2801 | */ |
2802 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_MSG_IN))(0x00000000 | ((0x00030000 | (0x07000000)))), |
2803 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2804 | SCR_FROM_REG (HS_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr1)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2805 | 0, |
2806 | SCR_INT0x98080000 ^ IFTRUE (DATA (HS_NEGOTIATE))(0x00000000 | ((0x00040000 | (((2)) & 0xff)))), |
2807 | SIR_NEGO_FAILED(5), |
2808 | /* |
2809 | ** Jump to dispatcher. |
2810 | */ |
2811 | SCR_JUMP0x80080000, |
2812 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2813 | |
2814 | }/*-------------------------< INIT >------------------------*/,{ |
2815 | /* |
2816 | ** Wait for the SCSI RESET signal to be |
2817 | ** inactive before restarting operations, |
2818 | ** since the chip may hang on SEL_ATN |
2819 | ** if SCSI RESET is active. |
2820 | */ |
2821 | SCR_FROM_REG (sstat0)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sstat0)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_sstat0)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2822 | 0, |
2823 | SCR_JUMPR0x80880000 ^ IFTRUE (MASK (IRST, IRST))(0x00000000 | ((0x00040000 | (((0x02 ^ 0xff) & 0xff) << 8ul)|((0x02) & 0xff)))), |
2824 | -16, |
2825 | SCR_JUMP0x80080000, |
2826 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
2827 | }/*-------------------------< CLRACK >----------------------*/,{ |
2828 | /* |
2829 | ** Terminate possible pending message phase. |
2830 | */ |
2831 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
2832 | 0, |
2833 | SCR_JUMP0x80080000, |
2834 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2835 | |
2836 | }/*-------------------------< DISP_STATUS >----------------------*/,{ |
2837 | /* |
2838 | ** Anticipate STATUS phase. |
2839 | ** |
2840 | ** Does spare 3 SCRIPTS instructions when we have |
2841 | ** completed the INPUT of the data. |
2842 | */ |
2843 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_STATUS))(0x00000000 | ((0x00030000 | (0x03000000)))), |
2844 | PADDR (status)(0x50000000 | ((size_t) (&((struct script *)0)->status ))), |
2845 | SCR_JUMP0x80080000, |
2846 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2847 | |
2848 | }/*-------------------------< DATAI_DONE >-------------------*/,{ |
2849 | /* |
2850 | * If the device wants us to send more data, |
2851 | * we must count the extra bytes. |
2852 | */ |
2853 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_DATA_IN))(0x00000000 | ((0x00030000 | (0x01000000)))), |
2854 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
2855 | /* |
2856 | ** If the SWIDE is not full, jump to dispatcher. |
2857 | ** We anticipate a STATUS phase. |
2858 | ** If we get later an IGNORE WIDE RESIDUE, we |
2859 | ** will alias it as a MODIFY DP (-1). |
2860 | */ |
2861 | SCR_FROM_REG (scntl2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2862 | 0, |
2863 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (WSR, WSR))(0x00080000 | ((0x00040000 | (((0x01 ^ 0xff) & 0xff) << 8ul)|((0x01) & 0xff)))), |
2864 | PADDR (disp_status)(0x50000000 | ((size_t) (&((struct script *)0)->disp_status ))), |
2865 | /* |
2866 | ** The SWIDE is full. |
2867 | ** Clear this condition. |
2868 | */ |
2869 | SCR_REG_REG (scntl2, SCR_OR, WSR)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0x01)&0xff)<<8ul)), |
2870 | 0, |
2871 | /* |
2872 | * We are expecting an IGNORE RESIDUE message |
2873 | * from the device, otherwise we are in data |
2874 | * overrun condition. Check against MSG_IN phase. |
2875 | */ |
2876 | SCR_INT0x98080000 ^ IFFALSE (WHEN (SCR_MSG_IN))(0x00080000 | ((0x00030000 | (0x07000000)))), |
2877 | SIR_SWIDE_OVERRUN(9), |
2878 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_MSG_IN))(0x00080000 | ((0x00030000 | (0x07000000)))), |
2879 | PADDR (disp_status)(0x50000000 | ((size_t) (&((struct script *)0)->disp_status ))), |
2880 | /* |
2881 | * We are in MSG_IN phase, |
2882 | * Read the first byte of the message. |
2883 | * If it is not an IGNORE RESIDUE message, |
2884 | * signal overrun and jump to message |
2885 | * processing. |
2886 | */ |
2887 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
2888 | NADDR (msgin[0])(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin[0]) )), |
2889 | SCR_INT0x98080000 ^ IFFALSE (DATA (M_IGN_RESIDUE))(0x00080000 | ((0x00040000 | (((0x23)) & 0xff)))), |
2890 | SIR_SWIDE_OVERRUN(9), |
2891 | SCR_JUMP0x80080000 ^ IFFALSE (DATA (M_IGN_RESIDUE))(0x00080000 | ((0x00040000 | (((0x23)) & 0xff)))), |
2892 | PADDR (msg_in2)(0x50000000 | ((size_t) (&((struct script *)0)->msg_in2 ))), |
2893 | |
2894 | /* |
2895 | * We got the message we expected. |
2896 | * Read the 2nd byte, and jump to dispatcher. |
2897 | */ |
2898 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
2899 | 0, |
2900 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
2901 | NADDR (msgin[1])(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin[1]) )), |
2902 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
2903 | 0, |
2904 | SCR_JUMP0x80080000, |
2905 | PADDR (disp_status)(0x50000000 | ((size_t) (&((struct script *)0)->disp_status ))), |
2906 | |
2907 | }/*-------------------------< DATAO_DONE >-------------------*/,{ |
2908 | /* |
2909 | * If the device wants us to send more data, |
2910 | * we must count the extra bytes. |
2911 | */ |
2912 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_DATA_OUT))(0x00000000 | ((0x00030000 | (0x00000000)))), |
2913 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
2914 | /* |
2915 | ** If the SODL is not full jump to dispatcher. |
2916 | ** We anticipate a MSG IN phase or a STATUS phase. |
2917 | */ |
2918 | SCR_FROM_REG (scntl2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
2919 | 0, |
2920 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (WSS, WSS))(0x00080000 | ((0x00040000 | (((0x08 ^ 0xff) & 0xff) << 8ul)|((0x08) & 0xff)))), |
2921 | PADDR (disp_status)(0x50000000 | ((size_t) (&((struct script *)0)->disp_status ))), |
2922 | /* |
2923 | ** The SODL is full, clear this condition. |
2924 | */ |
2925 | SCR_REG_REG (scntl2, SCR_OR, WSS)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0x08)&0xff)<<8ul)), |
2926 | 0, |
2927 | /* |
2928 | ** And signal a DATA UNDERRUN condition |
2929 | ** to the C code. |
2930 | */ |
2931 | SCR_INT0x98080000, |
2932 | SIR_SODL_UNDERRUN(10), |
2933 | SCR_JUMP0x80080000, |
2934 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
2935 | |
2936 | }/*-------------------------< IGN_I_W_R_MSG >--------------*/,{ |
2937 | /* |
2938 | ** We jump here from the phase mismatch interrupt, |
2939 | ** When we have a SWIDE and the device has presented |
2940 | ** a IGNORE WIDE RESIDUE message on the BUS. |
2941 | ** We just have to throw away this message and then |
2942 | ** to jump to dispatcher. |
2943 | */ |
2944 | SCR_MOVE_ABS (2)((0x00000000 | 0x08000000) | (2)) ^ SCR_MSG_IN0x07000000, |
2945 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
2946 | /* |
2947 | ** Clear ACK and jump to dispatcher. |
2948 | */ |
2949 | SCR_JUMP0x80080000, |
2950 | PADDR (clrack)(0x50000000 | ((size_t) (&((struct script *)0)->clrack ))), |
2951 | |
2952 | }/*-------------------------< DATAI_PHASE >------------------*/,{ |
2953 | SCR_RETURN0x90080000, |
2954 | 0, |
2955 | }/*-------------------------< DATAO_PHASE >------------------*/,{ |
2956 | /* |
2957 | ** Patch for 53c1010_66 only - to allow A0 part |
2958 | ** to operate properly in a 33MHz PCI bus. |
2959 | ** |
2960 | ** SCR_REG_REG(scntl4, SCR_OR, 0x0c), |
2961 | ** 0, |
2962 | */ |
2963 | SCR_NO_OP0x80000000, |
2964 | 0, |
2965 | SCR_RETURN0x90080000, |
2966 | 0, |
2967 | }/*-------------------------< MSG_IN >--------------------*/,{ |
2968 | /* |
2969 | ** Get the first byte of the message. |
2970 | ** |
2971 | ** The script processor doesn't negate the |
2972 | ** ACK signal after this transfer. |
2973 | */ |
2974 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
2975 | NADDR (msgin[0])(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin[0]) )), |
2976 | }/*-------------------------< MSG_IN2 >--------------------*/,{ |
2977 | /* |
2978 | ** Check first against 1 byte messages |
2979 | ** that we handle from SCRIPTS. |
2980 | */ |
2981 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (M_COMPLETE))(0x00000000 | ((0x00040000 | (((0x00)) & 0xff)))), |
2982 | PADDR (complete)(0x50000000 | ((size_t) (&((struct script *)0)->complete ))), |
2983 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (M_DISCONNECT))(0x00000000 | ((0x00040000 | (((0x04)) & 0xff)))), |
2984 | PADDR (disconnect)(0x50000000 | ((size_t) (&((struct script *)0)->disconnect ))), |
2985 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (M_SAVE_DP))(0x00000000 | ((0x00040000 | (((0x02)) & 0xff)))), |
2986 | PADDR (save_dp)(0x50000000 | ((size_t) (&((struct script *)0)->save_dp ))), |
2987 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (M_RESTORE_DP))(0x00000000 | ((0x00040000 | (((0x03)) & 0xff)))), |
2988 | PADDR (restore_dp)(0x50000000 | ((size_t) (&((struct script *)0)->restore_dp ))), |
2989 | /* |
2990 | ** We handle all other messages from the |
2991 | ** C code, so no need to waste on-chip RAM |
2992 | ** for those ones. |
2993 | */ |
2994 | SCR_JUMP0x80080000, |
2995 | PADDRH (msg_in_etc)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_in_etc ))), |
2996 | |
2997 | }/*-------------------------< STATUS >--------------------*/,{ |
2998 | /* |
2999 | ** get the status |
3000 | */ |
3001 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_STATUS0x03000000, |
3002 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
3003 | #ifdef SCSI_NCR_IARB_SUPPORT |
3004 | /* |
3005 | ** If STATUS is not GOOD, clear IMMEDIATE ARBITRATION, |
3006 | ** since we may have to tamper the start queue from |
3007 | ** the C code. |
3008 | */ |
3009 | SCR_JUMPR0x80880000 ^ IFTRUE (DATA (S_GOOD))(0x00000000 | ((0x00040000 | (((0x00)) & 0xff)))), |
3010 | 8, |
3011 | SCR_REG_REG (scntl1, SCR_AND, ~IARB)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl1)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl1)))) & 0x80))) | (0x04000000 ) | (((~0x02)&0xff)<<8ul)), |
3012 | 0, |
3013 | #endif |
3014 | /* |
3015 | ** save status to scsi_status. |
3016 | ** mark as complete. |
3017 | */ |
3018 | SCR_TO_REG (SS_REG)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr2)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3019 | 0, |
3020 | SCR_LOAD_REG (HS_REG, HS_COMPLETE)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr1)))) & 0x80))) | (0x00000000 ) | ((((4|(0x80)))&0xff)<<8ul)), |
3021 | 0, |
3022 | /* |
3023 | ** Anticipate the MESSAGE PHASE for |
3024 | ** the TASK COMPLETE message. |
3025 | */ |
3026 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_MSG_IN))(0x00000000 | ((0x00030000 | (0x07000000)))), |
3027 | PADDR (msg_in)(0x50000000 | ((size_t) (&((struct script *)0)->msg_in ))), |
3028 | SCR_JUMP0x80080000, |
3029 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3030 | |
3031 | }/*-------------------------< COMPLETE >-----------------*/,{ |
3032 | /* |
3033 | ** Complete message. |
3034 | ** |
3035 | ** Copy the data pointer to LASTP in header. |
3036 | */ |
3037 | SCR_STORE_REL (temp, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3038 | offsetof (struct ccb, phys.header.lastp)((size_t) (&((struct ccb *)0)->phys.header.lastp)), |
3039 | /* |
3040 | ** When we terminate the cycle by clearing ACK, |
3041 | ** the target may disconnect immediately. |
3042 | ** |
3043 | ** We don't want to be told of an |
3044 | ** "unexpected disconnect", |
3045 | ** so we disable this feature. |
3046 | */ |
3047 | SCR_REG_REG (scntl2, SCR_AND, 0x7f)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x04000000 ) | (((0x7f)&0xff)<<8ul)), |
3048 | 0, |
3049 | /* |
3050 | ** Terminate cycle ... |
3051 | */ |
3052 | SCR_CLR (SCR_ACK|SCR_ATN)(0x60000000 | (0x00000040|0x00000008)), |
3053 | 0, |
3054 | /* |
3055 | ** ... and wait for the disconnect. |
3056 | */ |
3057 | SCR_WAIT_DISC0x48000000, |
3058 | 0, |
3059 | }/*-------------------------< COMPLETE2 >-----------------*/,{ |
3060 | /* |
3061 | ** Save host status to header. |
3062 | */ |
3063 | SCR_STORE_REL (scr0, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), |
3064 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
3065 | |
3066 | #ifdef SCSI_NCR_PCIQ_MAY_REORDER_WRITES |
3067 | /* |
3068 | ** Some bridges may reorder DMA writes to memory. |
3069 | ** We donnot want the CPU to deal with completions |
3070 | ** without all the posted write having been flushed |
3071 | ** to memory. This DUMMY READ should flush posted |
3072 | ** buffers prior to the CPU having to deal with |
3073 | ** completions. |
3074 | */ |
3075 | SCR_LOAD_REL (scr0, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), /* DUMMY READ */ |
3076 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
3077 | #endif |
3078 | /* |
3079 | ** If command resulted in not GOOD status, |
3080 | ** call the C code if needed. |
3081 | */ |
3082 | SCR_FROM_REG (SS_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr2)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3083 | 0, |
3084 | SCR_CALL0x88080000 ^ IFFALSE (DATA (S_GOOD))(0x00080000 | ((0x00040000 | (((0x00)) & 0xff)))), |
3085 | PADDRH (bad_status)(0x80000000 | ((size_t) (&((struct scripth *)0)->bad_status ))), |
3086 | |
3087 | /* |
3088 | ** If we performed an auto-sense, call |
3089 | ** the C code to synchronyze task aborts |
3090 | ** with UNIT ATTENTION conditions. |
3091 | */ |
3092 | SCR_FROM_REG (HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3093 | 0, |
3094 | SCR_INT0x98080000 ^ IFTRUE (MASK (HF_AUTO_SENSE, HF_AUTO_SENSE))(0x00000000 | ((0x00040000 | ((((1u<<4) ^ 0xff) & 0xff ) << 8ul)|(((1u<<4)) & 0xff)))), |
3095 | SIR_AUTO_SENSE_DONE(20), |
3096 | |
3097 | }/*------------------------< DONE >-----------------*/,{ |
3098 | #ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR |
3099 | /* |
3100 | ** It seems that some bridges flush everything |
3101 | ** when the INTR line is raised. For these ones, |
3102 | ** we can just ensure that the INTR line will be |
3103 | ** raised before each completion. So, if it happens |
3104 | ** that we have been faster that the CPU, we just |
3105 | ** have to synchronize with it. A dummy programmed |
3106 | ** interrupt will do the trick. |
3107 | ** Note that we overlap at most 1 IO with the CPU |
3108 | ** in this situation and that the IRQ line must not |
3109 | ** be shared. |
3110 | */ |
3111 | SCR_FROM_REG (istat)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_istat)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_istat)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3112 | 0, |
3113 | SCR_INT0x98080000 ^ IFTRUE (MASK (INTF, INTF))(0x00000000 | ((0x00040000 | (((0x04 ^ 0xff) & 0xff) << 8ul)|((0x04) & 0xff)))), |
3114 | SIR_DUMMY_INTERRUPT(21), |
3115 | #endif |
3116 | /* |
3117 | ** Copy the DSA to the DONE QUEUE and |
3118 | ** signal completion to the host. |
3119 | ** If we are interrupted between DONE |
3120 | ** and DONE_END, we must reset, otherwise |
3121 | ** the completed CCB will be lost. |
3122 | */ |
3123 | SCR_STORE_ABS (dsa, 4)(0xe0000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul)) | (4)), |
3124 | PADDRH (saved_dsa)(0x80000000 | ((size_t) (&((struct scripth *)0)->saved_dsa ))), |
3125 | SCR_LOAD_ABS (dsa, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul)) | (4)), |
3126 | PADDRH (done_pos)(0x80000000 | ((size_t) (&((struct scripth *)0)->done_pos ))), |
3127 | SCR_LOAD_ABS (scratcha, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
3128 | PADDRH (saved_dsa)(0x80000000 | ((size_t) (&((struct scripth *)0)->saved_dsa ))), |
3129 | SCR_STORE_REL (scratcha, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
3130 | 0, |
3131 | /* |
3132 | ** The instruction below reads the DONE QUEUE next |
3133 | ** free position from memory. |
3134 | ** In addition it ensures that all PCI posted writes |
3135 | ** are flushed and so the DSA value of the done |
3136 | ** CCB is visible by the CPU before INTFLY is raised. |
3137 | */ |
3138 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3139 | 4, |
3140 | SCR_INT_FLY0x98180000, |
3141 | 0, |
3142 | SCR_STORE_ABS (temp, 4)(0xe0000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul)) | (4)), |
3143 | PADDRH (done_pos)(0x80000000 | ((size_t) (&((struct scripth *)0)->done_pos ))), |
3144 | }/*------------------------< DONE_END >-----------------*/,{ |
3145 | SCR_JUMP0x80080000, |
3146 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
3147 | |
3148 | }/*-------------------------< SAVE_DP >------------------*/,{ |
3149 | /* |
3150 | ** Clear ACK immediately. |
3151 | ** No need to delay it. |
3152 | */ |
3153 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3154 | 0, |
3155 | /* |
3156 | ** Keep track we received a SAVE DP, so |
3157 | ** we will switch to the other PM context |
3158 | ** on the next PM since the DP may point |
3159 | ** to the current PM context. |
3160 | */ |
3161 | SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | ((((1u<<3))&0xff)<<8ul)), |
3162 | 0, |
3163 | /* |
3164 | ** SAVE_DP message: |
3165 | ** Copy the data pointer to SAVEP in header. |
3166 | */ |
3167 | SCR_STORE_REL (temp, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3168 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
3169 | SCR_JUMP0x80080000, |
3170 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3171 | }/*-------------------------< RESTORE_DP >---------------*/,{ |
3172 | /* |
3173 | ** RESTORE_DP message: |
3174 | ** Copy SAVEP in header to actual data pointer. |
3175 | */ |
3176 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3177 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
3178 | SCR_JUMP0x80080000, |
3179 | PADDR (clrack)(0x50000000 | ((size_t) (&((struct script *)0)->clrack ))), |
3180 | |
3181 | }/*-------------------------< DISCONNECT >---------------*/,{ |
3182 | /* |
3183 | ** DISCONNECTing ... |
3184 | ** |
3185 | ** disable the "unexpected disconnect" feature, |
3186 | ** and remove the ACK signal. |
3187 | */ |
3188 | SCR_REG_REG (scntl2, SCR_AND, 0x7f)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x04000000 ) | (((0x7f)&0xff)<<8ul)), |
3189 | 0, |
3190 | SCR_CLR (SCR_ACK|SCR_ATN)(0x60000000 | (0x00000040|0x00000008)), |
3191 | 0, |
3192 | /* |
3193 | ** Wait for the disconnect. |
3194 | */ |
3195 | SCR_WAIT_DISC0x48000000, |
3196 | 0, |
3197 | /* |
3198 | ** Status is: DISCONNECTED. |
3199 | */ |
3200 | SCR_LOAD_REG (HS_REG, HS_DISCONNECT)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr1)))) & 0x80))) | (0x00000000 ) | ((((3))&0xff)<<8ul)), |
3201 | 0, |
3202 | /* |
3203 | ** Save host status to header. |
3204 | */ |
3205 | SCR_STORE_REL (scr0, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), |
3206 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
3207 | /* |
3208 | ** If QUIRK_AUTOSAVE is set, |
3209 | ** do an "save pointer" operation. |
3210 | */ |
3211 | SCR_FROM_REG (QU_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr0)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr0)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3212 | 0, |
3213 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (QUIRK_AUTOSAVE, QUIRK_AUTOSAVE))(0x00080000 | ((0x00040000 | ((((0x01) ^ 0xff) & 0xff) << 8ul)|(((0x01)) & 0xff)))), |
3214 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
3215 | /* |
3216 | ** like SAVE_DP message: |
3217 | ** Remember we saved the data pointer. |
3218 | ** Copy data pointer to SAVEP in header. |
3219 | */ |
3220 | SCR_REG_REG (HF_REG, SCR_OR, HF_DP_SAVED)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | ((((1u<<3))&0xff)<<8ul)), |
3221 | 0, |
3222 | SCR_STORE_REL (temp, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3223 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
3224 | SCR_JUMP0x80080000, |
3225 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
3226 | |
3227 | }/*-------------------------< IDLE >------------------------*/,{ |
3228 | /* |
3229 | ** Nothing to do? |
3230 | ** Wait for reselect. |
3231 | ** This NOP will be patched with LED OFF |
3232 | ** SCR_REG_REG (gpreg, SCR_OR, 0x01) |
3233 | */ |
3234 | SCR_NO_OP0x80000000, |
3235 | 0, |
3236 | #ifdef SCSI_NCR_IARB_SUPPORT |
3237 | SCR_JUMPR0x80880000, |
3238 | 8, |
3239 | #endif |
3240 | }/*-------------------------< UNGETJOB >-----------------*/,{ |
3241 | #ifdef SCSI_NCR_IARB_SUPPORT |
3242 | /* |
3243 | ** Set IMMEDIATE ARBITRATION, for the next time. |
3244 | ** This will give us better chance to win arbitration |
3245 | ** for the job we just wanted to do. |
3246 | */ |
3247 | SCR_REG_REG (scntl1, SCR_OR, IARB)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl1)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl1)))) & 0x80))) | (0x02000000 ) | (((0x02)&0xff)<<8ul)), |
3248 | 0, |
3249 | #endif |
3250 | /* |
3251 | ** We are not able to restart the SCRIPTS if we are |
3252 | ** interrupted and these instruction haven't been |
3253 | ** all executed. BTW, this is very unlikely to |
3254 | ** happen, but we check that from the C code. |
3255 | */ |
3256 | SCR_LOAD_REG (dsa, 0xff)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x00000000 ) | (((0xff)&0xff)<<8ul)), |
3257 | 0, |
3258 | SCR_STORE_ABS (scratcha, 4)(0xe0000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
3259 | PADDRH (startpos)(0x80000000 | ((size_t) (&((struct scripth *)0)->startpos ))), |
3260 | }/*-------------------------< RESELECT >--------------------*/,{ |
3261 | /* |
3262 | ** make the host status invalid. |
3263 | */ |
3264 | SCR_CLR (SCR_TRG)(0x60000000 | (0x00000200)), |
3265 | 0, |
3266 | /* |
3267 | ** Sleep waiting for a reselection. |
3268 | ** If SIGP is set, special treatment. |
3269 | ** |
3270 | ** Zu allem bereit .. |
3271 | */ |
3272 | SCR_WAIT_RESEL0x50000000, |
3273 | PADDR(start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
3274 | }/*-------------------------< RESELECTED >------------------*/,{ |
3275 | /* |
3276 | ** This NOP will be patched with LED ON |
3277 | ** SCR_REG_REG (gpreg, SCR_AND, 0xfe) |
3278 | */ |
3279 | SCR_NO_OP0x80000000, |
3280 | 0, |
3281 | /* |
3282 | ** load the target id into the sdid |
3283 | */ |
3284 | SCR_REG_SFBR (ssid, SCR_AND, 0x8F)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ssid)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_ssid)))) & 0x80))) | (0x04000000 ) | (((0x8F)&0xff)<<8ul)), |
3285 | 0, |
3286 | SCR_TO_REG (sdid)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sdid)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_sdid)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3287 | 0, |
3288 | /* |
3289 | ** load the target control block address |
3290 | */ |
3291 | SCR_LOAD_ABS (dsa, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul)) | (4)), |
3292 | PADDRH (targtbl)(0x80000000 | ((size_t) (&((struct scripth *)0)->targtbl ))), |
3293 | SCR_SFBR_REG (dsa, SCR_SHL, 0)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3294 | 0, |
3295 | SCR_REG_REG (dsa, SCR_SHL, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3296 | 0, |
3297 | SCR_REG_REG (dsa, SCR_AND, 0x3c)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x04000000 ) | (((0x3c)&0xff)<<8ul)), |
3298 | 0, |
3299 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3300 | 0, |
3301 | /* |
3302 | ** Load the synchronous transfer registers. |
3303 | */ |
3304 | SCR_LOAD_REL (scntl3, 1)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scntl3)))) & 0xff) << 16ul )) | (1)), |
3305 | offsetof(struct tcb, wval)((size_t) (&((struct tcb *)0)->wval)), |
3306 | SCR_LOAD_REL (sxfer, 1)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_sxfer)))) & 0xff) << 16ul )) | (1)), |
3307 | offsetof(struct tcb, sval)((size_t) (&((struct tcb *)0)->sval)), |
3308 | }/*-------------------------< RESEL_SCNTL4 >------------------*/,{ |
3309 | /* |
3310 | ** Write with uval value. Patch if device |
3311 | ** does not support Ultra3. |
3312 | ** |
3313 | ** SCR_LOAD_REL (scntl4, 1), |
3314 | ** offsetof(struct tcb, uval), |
3315 | */ |
3316 | |
3317 | SCR_NO_OP0x80000000, |
3318 | 0, |
3319 | /* |
3320 | * We expect MESSAGE IN phase. |
3321 | * If not, get help from the C code. |
3322 | */ |
3323 | SCR_INT0x98080000 ^ IFFALSE (WHEN (SCR_MSG_IN))(0x00080000 | ((0x00030000 | (0x07000000)))), |
3324 | SIR_RESEL_NO_MSG_IN(11), |
3325 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
3326 | NADDR (msgin)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin))), |
3327 | |
3328 | /* |
3329 | * If IDENTIFY LUN #0, use a faster path |
3330 | * to find the LCB structure. |
3331 | */ |
3332 | SCR_JUMPR0x80880000 ^ IFTRUE (MASK (0x80, 0xbf))(0x00000000 | ((0x00040000 | (((0xbf ^ 0xff) & 0xff) << 8ul)|((0x80) & 0xff)))), |
3333 | 56, |
3334 | /* |
3335 | * If message isn't an IDENTIFY, |
3336 | * tell the C code about. |
3337 | */ |
3338 | SCR_INT0x98080000 ^ IFFALSE (MASK (0x80, 0x80))(0x00080000 | ((0x00040000 | (((0x80 ^ 0xff) & 0xff) << 8ul)|((0x80) & 0xff)))), |
3339 | SIR_RESEL_NO_IDENTIFY(12), |
3340 | /* |
3341 | * It is an IDENTIFY message, |
3342 | * Load the LUN control block address. |
3343 | */ |
3344 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3345 | offsetof(struct tcb, b_luntbl)((size_t) (&((struct tcb *)0)->b_luntbl)), |
3346 | SCR_SFBR_REG (dsa, SCR_SHL, 0)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3347 | 0, |
3348 | SCR_REG_REG (dsa, SCR_SHL, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3349 | 0, |
3350 | SCR_REG_REG (dsa, SCR_AND, 0xfc)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x04000000 ) | (((0xfc)&0xff)<<8ul)), |
3351 | 0, |
3352 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3353 | 0, |
3354 | SCR_JUMPR0x80880000, |
3355 | 8, |
3356 | /* |
3357 | ** LUN 0 special case (but usual one :)) |
3358 | */ |
3359 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3360 | offsetof(struct tcb, b_lun0)((size_t) (&((struct tcb *)0)->b_lun0)), |
3361 | |
3362 | /* |
3363 | ** Load the reselect task action for this LUN. |
3364 | ** Load the tasks DSA array for this LUN. |
3365 | ** Call the action. |
3366 | */ |
3367 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3368 | offsetof(struct lcb, resel_task)((size_t) (&((struct lcb *)0)->resel_task)), |
3369 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3370 | offsetof(struct lcb, b_tasktbl)((size_t) (&((struct lcb *)0)->b_tasktbl)), |
3371 | SCR_RETURN0x90080000, |
3372 | 0, |
3373 | }/*-------------------------< RESEL_TAG >-------------------*/,{ |
3374 | /* |
3375 | ** ACK the IDENTIFY or TAG previously received |
3376 | */ |
3377 | |
3378 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3379 | 0, |
3380 | /* |
3381 | ** Read IDENTIFY + SIMPLE + TAG using a single MOVE. |
3382 | ** Agressive optimization, is'nt it? |
3383 | ** No need to test the SIMPLE TAG message, since the |
3384 | ** driver only supports conformant devices for tags. ;-) |
3385 | */ |
3386 | SCR_MOVE_ABS (2)((0x00000000 | 0x08000000) | (2)) ^ SCR_MSG_IN0x07000000, |
3387 | NADDR (msgin)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin))), |
3388 | /* |
3389 | ** Read the TAG from the SIDL. |
3390 | ** Still an aggressive optimization. ;-) |
3391 | ** Compute the CCB indirect jump address which |
3392 | ** is (#TAG*2 & 0xfc) due to tag numbering using |
3393 | ** 1,3,5..MAXTAGS*2+1 actual values. |
3394 | */ |
3395 | SCR_REG_SFBR (sidl, SCR_SHL, 0)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sidl)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_sidl)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3396 | 0, |
3397 | #if MAX_TASKS(256/4)*4 > 512 |
3398 | SCR_JUMPR0x80880000 ^ IFFALSE (CARRYSET)(0x00080000 | ((0x00200000))), |
3399 | 8, |
3400 | SCR_REG_REG (dsa1, SCR_OR, 2)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_dsa1)))) & 0x80))) | (0x02000000 ) | (((2)&0xff)<<8ul)), |
3401 | 0, |
3402 | SCR_REG_REG (sfbr, SCR_SHL, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sfbr)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_sfbr)))) & 0x80))) | (0x01000000 ) | (((0)&0xff)<<8ul)), |
3403 | 0, |
3404 | SCR_JUMPR0x80880000 ^ IFFALSE (CARRYSET)(0x00080000 | ((0x00200000))), |
3405 | 8, |
3406 | SCR_REG_REG (dsa1, SCR_OR, 1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_dsa1)))) & 0x80))) | (0x02000000 ) | (((1)&0xff)<<8ul)), |
3407 | 0, |
3408 | #elif MAX_TASKS(256/4)*4 > 256 |
3409 | SCR_JUMPR0x80880000 ^ IFFALSE (CARRYSET)(0x00080000 | ((0x00200000))), |
3410 | 8, |
3411 | SCR_REG_REG (dsa1, SCR_OR, 1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_dsa1)))) & 0x80))) | (0x02000000 ) | (((1)&0xff)<<8ul)), |
3412 | 0, |
3413 | #endif |
3414 | /* |
3415 | ** Retrieve the DSA of this task. |
3416 | ** JUMP indirectly to the restart point of the CCB. |
3417 | */ |
3418 | SCR_SFBR_REG (dsa, SCR_AND, 0xfc)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_dsa)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0x80))) | (0x04000000 ) | (((0xfc)&0xff)<<8ul)), |
3419 | 0, |
3420 | }/*-------------------------< RESEL_GO >-------------------*/,{ |
3421 | SCR_LOAD_REL (dsa, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_dsa)))) & 0xff) << 16ul) ) | (4)), |
3422 | 0, |
3423 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3424 | offsetof(struct ccb, phys.header.go.restart)((size_t) (&((struct ccb *)0)->phys.header.go.restart) ), |
3425 | SCR_RETURN0x90080000, |
3426 | 0, |
3427 | /* In normal situations we branch to RESEL_DSA */ |
3428 | }/*-------------------------< RESEL_NOTAG >-------------------*/,{ |
3429 | /* |
3430 | ** JUMP indirectly to the restart point of the CCB. |
3431 | */ |
3432 | SCR_JUMP0x80080000, |
3433 | PADDR (resel_go)(0x50000000 | ((size_t) (&((struct script *)0)->resel_go ))), |
3434 | |
3435 | }/*-------------------------< RESEL_DSA >-------------------*/,{ |
3436 | /* |
3437 | ** Ack the IDENTIFY or TAG previously received. |
3438 | */ |
3439 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3440 | 0, |
3441 | /* |
3442 | ** load the savep (saved pointer) into |
3443 | ** the actual data pointer. |
3444 | */ |
3445 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3446 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
3447 | /* |
3448 | ** Initialize the status registers |
3449 | */ |
3450 | SCR_LOAD_REL (scr0, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), |
3451 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
3452 | /* |
3453 | ** Jump to dispatcher. |
3454 | */ |
3455 | SCR_JUMP0x80080000, |
3456 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3457 | |
3458 | }/*-------------------------< DATA_IN >--------------------*/,{ |
3459 | /* |
3460 | ** Because the size depends on the |
3461 | ** #define MAX_SCATTER parameter, |
3462 | ** it is filled in at runtime. |
3463 | ** |
3464 | ** ##===========< i=0; i<MAX_SCATTER >========= |
3465 | ** || SCR_CHMOV_TBL ^ SCR_DATA_IN, |
3466 | ** || offsetof (struct dsb, data[ i]), |
3467 | ** ##========================================== |
3468 | ** |
3469 | **--------------------------------------------------------- |
3470 | */ |
3471 | 0 |
3472 | }/*-------------------------< DATA_IN2 >-------------------*/,{ |
3473 | SCR_CALL0x88080000, |
3474 | PADDR (datai_done)(0x50000000 | ((size_t) (&((struct script *)0)->datai_done ))), |
3475 | SCR_JUMP0x80080000, |
3476 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3477 | }/*-------------------------< DATA_OUT >--------------------*/,{ |
3478 | /* |
3479 | ** Because the size depends on the |
3480 | ** #define MAX_SCATTER parameter, |
3481 | ** it is filled in at runtime. |
3482 | ** |
3483 | ** ##===========< i=0; i<MAX_SCATTER >========= |
3484 | ** || SCR_CHMOV_TBL ^ SCR_DATA_OUT, |
3485 | ** || offsetof (struct dsb, data[ i]), |
3486 | ** ##========================================== |
3487 | ** |
3488 | **--------------------------------------------------------- |
3489 | */ |
3490 | 0 |
3491 | }/*-------------------------< DATA_OUT2 >-------------------*/,{ |
3492 | SCR_CALL0x88080000, |
3493 | PADDR (datao_done)(0x50000000 | ((size_t) (&((struct script *)0)->datao_done ))), |
3494 | SCR_JUMP0x80080000, |
3495 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3496 | |
3497 | }/*-------------------------< PM0_DATA >--------------------*/,{ |
3498 | /* |
3499 | ** Read our host flags to SFBR, so we will be able |
3500 | ** to check against the data direction we expect. |
3501 | */ |
3502 | SCR_FROM_REG (HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3503 | 0, |
3504 | /* |
3505 | ** Check against actual DATA PHASE. |
3506 | */ |
3507 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_DATA_IN))(0x00080000 | ((0x00030000 | (0x01000000)))), |
3508 | PADDR (pm0_data_out)(0x50000000 | ((size_t) (&((struct script *)0)->pm0_data_out ))), |
3509 | /* |
3510 | ** Actual phase is DATA IN. |
3511 | ** Check against expected direction. |
3512 | */ |
3513 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN))(0x00080000 | ((0x00040000 | ((((1u<<5) ^ 0xff) & 0xff ) << 8ul)|(((1u<<5)) & 0xff)))), |
3514 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3515 | /* |
3516 | ** Keep track we are moving data from the |
3517 | ** PM0 DATA mini-script. |
3518 | */ |
3519 | SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((1u)&0xff)<<8ul)), |
3520 | 0, |
3521 | /* |
3522 | ** Move the data to memory. |
3523 | */ |
3524 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000, |
3525 | offsetof (struct ccb, phys.pm0.sg)((size_t) (&((struct ccb *)0)->phys.pm0.sg)), |
3526 | SCR_JUMP0x80080000, |
3527 | PADDR (pm0_data_end)(0x50000000 | ((size_t) (&((struct script *)0)->pm0_data_end ))), |
3528 | }/*-------------------------< PM0_DATA_OUT >----------------*/,{ |
3529 | /* |
3530 | ** Actual phase is DATA OUT. |
3531 | ** Check against expected direction. |
3532 | */ |
3533 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN))(0x00000000 | ((0x00040000 | ((((1u<<5) ^ 0xff) & 0xff ) << 8ul)|(((1u<<5)) & 0xff)))), |
3534 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3535 | /* |
3536 | ** Keep track we are moving data from the |
3537 | ** PM0 DATA mini-script. |
3538 | */ |
3539 | SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((1u)&0xff)<<8ul)), |
3540 | 0, |
3541 | /* |
3542 | ** Move the data from memory. |
3543 | */ |
3544 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_OUT0x00000000, |
3545 | offsetof (struct ccb, phys.pm0.sg)((size_t) (&((struct ccb *)0)->phys.pm0.sg)), |
3546 | }/*-------------------------< PM0_DATA_END >----------------*/,{ |
3547 | /* |
3548 | ** Clear the flag that told we were moving |
3549 | ** data from the PM0 DATA mini-script. |
3550 | */ |
3551 | SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM0))(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x04000000 ) | ((((~1u))&0xff)<<8ul)), |
3552 | 0, |
3553 | /* |
3554 | ** Return to the previous DATA script which |
3555 | ** is guaranteed by design (if no bug) to be |
3556 | ** the main DATA script for this transfer. |
3557 | */ |
3558 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3559 | offsetof (struct ccb, phys.pm0.ret)((size_t) (&((struct ccb *)0)->phys.pm0.ret)), |
3560 | SCR_RETURN0x90080000, |
3561 | 0, |
3562 | }/*-------------------------< PM1_DATA >--------------------*/,{ |
3563 | /* |
3564 | ** Read our host flags to SFBR, so we will be able |
3565 | ** to check against the data direction we expect. |
3566 | */ |
3567 | SCR_FROM_REG (HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3568 | 0, |
3569 | /* |
3570 | ** Check against actual DATA PHASE. |
3571 | */ |
3572 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_DATA_IN))(0x00080000 | ((0x00030000 | (0x01000000)))), |
3573 | PADDR (pm1_data_out)(0x50000000 | ((size_t) (&((struct script *)0)->pm1_data_out ))), |
3574 | /* |
3575 | ** Actual phase is DATA IN. |
3576 | ** Check against expected direction. |
3577 | */ |
3578 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (HF_DATA_IN, HF_DATA_IN))(0x00080000 | ((0x00040000 | ((((1u<<5) ^ 0xff) & 0xff ) << 8ul)|(((1u<<5)) & 0xff)))), |
3579 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3580 | /* |
3581 | ** Keep track we are moving data from the |
3582 | ** PM1 DATA mini-script. |
3583 | */ |
3584 | SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | ((((1u<<1))&0xff)<<8ul)), |
3585 | 0, |
3586 | /* |
3587 | ** Move the data to memory. |
3588 | */ |
3589 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000, |
3590 | offsetof (struct ccb, phys.pm1.sg)((size_t) (&((struct ccb *)0)->phys.pm1.sg)), |
3591 | SCR_JUMP0x80080000, |
3592 | PADDR (pm1_data_end)(0x50000000 | ((size_t) (&((struct script *)0)->pm1_data_end ))), |
3593 | }/*-------------------------< PM1_DATA_OUT >----------------*/,{ |
3594 | /* |
3595 | ** Actual phase is DATA OUT. |
3596 | ** Check against expected direction. |
3597 | */ |
3598 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (HF_DATA_IN, HF_DATA_IN))(0x00000000 | ((0x00040000 | ((((1u<<5) ^ 0xff) & 0xff ) << 8ul)|(((1u<<5)) & 0xff)))), |
3599 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3600 | /* |
3601 | ** Keep track we are moving data from the |
3602 | ** PM1 DATA mini-script. |
3603 | */ |
3604 | SCR_REG_REG (HF_REG, SCR_OR, HF_IN_PM1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | ((((1u<<1))&0xff)<<8ul)), |
3605 | 0, |
3606 | /* |
3607 | ** Move the data from memory. |
3608 | */ |
3609 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_OUT0x00000000, |
3610 | offsetof (struct ccb, phys.pm1.sg)((size_t) (&((struct ccb *)0)->phys.pm1.sg)), |
3611 | }/*-------------------------< PM1_DATA_END >----------------*/,{ |
3612 | /* |
3613 | ** Clear the flag that told we were moving |
3614 | ** data from the PM1 DATA mini-script. |
3615 | */ |
3616 | SCR_REG_REG (HF_REG, SCR_AND, (~HF_IN_PM1))(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x04000000 ) | ((((~(1u<<1)))&0xff)<<8ul)), |
3617 | 0, |
3618 | /* |
3619 | ** Return to the previous DATA script which |
3620 | ** is guaranteed by design (if no bug) to be |
3621 | ** the main DATA script for this transfer. |
3622 | */ |
3623 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3624 | offsetof (struct ccb, phys.pm1.ret)((size_t) (&((struct ccb *)0)->phys.pm1.ret)), |
3625 | SCR_RETURN0x90080000, |
3626 | 0, |
3627 | }/*---------------------------------------------------------*/ |
3628 | }; |
3629 | |
3630 | |
3631 | static struct scripth scripth0 __initdata = { |
3632 | /*------------------------< START64 >-----------------------*/{ |
3633 | /* |
3634 | ** SCRIPT entry point for the 895A and the 896. |
3635 | ** For now, there is no specific stuff for that |
3636 | ** chip at this point, but this may come. |
3637 | */ |
3638 | SCR_JUMP0x80080000, |
3639 | PADDR (init)(0x50000000 | ((size_t) (&((struct script *)0)->init)) ), |
3640 | }/*-------------------------< NO_DATA >-------------------*/,{ |
3641 | SCR_JUMP0x80080000, |
3642 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
3643 | }/*-----------------------< SEL_FOR_ABORT >------------------*/,{ |
3644 | /* |
3645 | ** We are jumped here by the C code, if we have |
3646 | ** some target to reset or some disconnected |
3647 | ** job to abort. Since error recovery is a serious |
3648 | ** busyness, we will really reset the SCSI BUS, if |
3649 | ** case of a SCSI interrupt occurring in this path. |
3650 | */ |
3651 | |
3652 | /* |
3653 | ** Set initiator mode. |
3654 | */ |
3655 | SCR_CLR (SCR_TRG)(0x60000000 | (0x00000200)), |
3656 | 0, |
3657 | /* |
3658 | ** And try to select this target. |
3659 | */ |
3660 | SCR_SEL_TBL_ATN0x43000000 ^ offsetof (struct ncb, abrt_sel)((size_t) (&((struct ncb *)0)->abrt_sel)), |
3661 | PADDR (reselect)(0x50000000 | ((size_t) (&((struct script *)0)->reselect ))), |
3662 | |
3663 | /* |
3664 | ** Wait for the selection to complete or |
3665 | ** the selection to time out. |
3666 | */ |
3667 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
3668 | -8, |
3669 | /* |
3670 | ** Call the C code. |
3671 | */ |
3672 | SCR_INT0x98080000, |
3673 | SIR_TARGET_SELECTED(14), |
3674 | /* |
3675 | ** The C code should let us continue here. |
3676 | ** Send the 'kiss of death' message. |
3677 | ** We expect an immediate disconnect once |
3678 | ** the target has eaten the message. |
3679 | */ |
3680 | SCR_REG_REG (scntl2, SCR_AND, 0x7f)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x04000000 ) | (((0x7f)&0xff)<<8ul)), |
3681 | 0, |
3682 | SCR_MOVE_TBL(0x10000000 | 0x08000000) ^ SCR_MSG_OUT0x06000000, |
3683 | offsetof (struct ncb, abrt_tbl)((size_t) (&((struct ncb *)0)->abrt_tbl)), |
3684 | SCR_CLR (SCR_ACK|SCR_ATN)(0x60000000 | (0x00000040|0x00000008)), |
3685 | 0, |
3686 | SCR_WAIT_DISC0x48000000, |
3687 | 0, |
3688 | /* |
3689 | ** Tell the C code that we are done. |
3690 | */ |
3691 | SCR_INT0x98080000, |
3692 | SIR_ABORT_SENT(17), |
3693 | }/*-----------------------< SEL_FOR_ABORT_1 >--------------*/,{ |
3694 | /* |
3695 | ** Jump at scheduler. |
3696 | */ |
3697 | SCR_JUMP0x80080000, |
3698 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
3699 | |
3700 | }/*------------------------< SELECT_NO_ATN >-----------------*/,{ |
3701 | /* |
3702 | ** Set Initiator mode. |
3703 | ** And try to select this target without ATN. |
3704 | */ |
3705 | |
3706 | SCR_CLR (SCR_TRG)(0x60000000 | (0x00000200)), |
3707 | 0, |
3708 | SCR_SEL_TBL0x42000000 ^ offsetof (struct dsb, select)((size_t) (&((struct dsb *)0)->select)), |
3709 | PADDR (ungetjob)(0x50000000 | ((size_t) (&((struct script *)0)->ungetjob ))), |
3710 | /* |
3711 | ** load the savep (saved pointer) into |
3712 | ** the actual data pointer. |
3713 | */ |
3714 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
3715 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
3716 | /* |
3717 | ** Initialize the status registers |
3718 | */ |
3719 | SCR_LOAD_REL (scr0, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scr0)))) & 0xff) << 16ul )) | (4)), |
3720 | offsetof (struct ccb, phys.header.status)((size_t) (&((struct ccb *)0)->phys.header.status)), |
3721 | |
3722 | }/*------------------------< WF_SEL_DONE_NO_ATN >-----------------*/,{ |
3723 | /* |
3724 | ** Wait immediately for the next phase or |
3725 | ** the selection to complete or time-out. |
3726 | */ |
3727 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
3728 | 0, |
3729 | SCR_JUMP0x80080000, |
3730 | PADDR (select2)(0x50000000 | ((size_t) (&((struct script *)0)->select2 ))), |
3731 | |
3732 | }/*-------------------------< MSG_IN_ETC >--------------------*/,{ |
3733 | /* |
3734 | ** If it is an EXTENDED (variable size message) |
3735 | ** Handle it. |
3736 | */ |
3737 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (M_EXTENDED))(0x00000000 | ((0x00040000 | (((0x01)) & 0xff)))), |
3738 | PADDRH (msg_extended)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_extended ))), |
3739 | /* |
3740 | ** Let the C code handle any other |
3741 | ** 1 byte message. |
3742 | */ |
3743 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (0x00, 0xf0))(0x00000000 | ((0x00040000 | (((0xf0 ^ 0xff) & 0xff) << 8ul)|((0x00) & 0xff)))), |
3744 | PADDRH (msg_received)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_received ))), |
3745 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (0x10, 0xf0))(0x00000000 | ((0x00040000 | (((0xf0 ^ 0xff) & 0xff) << 8ul)|((0x10) & 0xff)))), |
3746 | PADDRH (msg_received)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_received ))), |
3747 | /* |
3748 | ** We donnot handle 2 bytes messages from SCRIPTS. |
3749 | ** So, let the C code deal with these ones too. |
3750 | */ |
3751 | SCR_JUMP0x80080000 ^ IFFALSE (MASK (0x20, 0xf0))(0x00080000 | ((0x00040000 | (((0xf0 ^ 0xff) & 0xff) << 8ul)|((0x20) & 0xff)))), |
3752 | PADDRH (msg_weird_seen)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_weird_seen ))), |
3753 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3754 | 0, |
3755 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
3756 | NADDR (msgin[1])(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin[1]) )), |
3757 | SCR_JUMP0x80080000, |
3758 | PADDRH (msg_received)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_received ))), |
3759 | |
3760 | }/*-------------------------< MSG_RECEIVED >--------------------*/,{ |
3761 | SCR_LOAD_REL (scratcha, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), /* DUMMY READ */ |
3762 | 0, |
3763 | SCR_INT0x98080000, |
3764 | SIR_MSG_RECEIVED(3), |
3765 | |
3766 | }/*-------------------------< MSG_WEIRD_SEEN >------------------*/,{ |
3767 | SCR_LOAD_REL (scratcha1, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha1)))) & 0xff) << 16ul)) | (4)), /* DUMMY READ */ |
3768 | 0, |
3769 | SCR_INT0x98080000, |
3770 | SIR_MSG_WEIRD(4), |
3771 | |
3772 | }/*-------------------------< MSG_EXTENDED >--------------------*/,{ |
3773 | /* |
3774 | ** Clear ACK and get the next byte |
3775 | ** assumed to be the message length. |
3776 | */ |
3777 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3778 | 0, |
3779 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
3780 | NADDR (msgin[1])(0x40000000 | ((size_t) (&((struct ncb *)0)->msgin[1]) )), |
3781 | /* |
3782 | ** Try to catch some unlikely situations as 0 length |
3783 | ** or too large the length. |
3784 | */ |
3785 | SCR_JUMP0x80080000 ^ IFTRUE (DATA (0))(0x00000000 | ((0x00040000 | ((0) & 0xff)))), |
3786 | PADDRH (msg_weird_seen)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_weird_seen ))), |
3787 | SCR_TO_REG (scratcha)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3788 | 0, |
3789 | SCR_REG_REG (sfbr, SCR_ADD, (256-8))(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sfbr)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_sfbr)))) & 0x80))) | (0x06000000 ) | ((((256 -8))&0xff)<<8ul)), |
3790 | 0, |
3791 | SCR_JUMP0x80080000 ^ IFTRUE (CARRYSET)(0x00000000 | ((0x00200000))), |
3792 | PADDRH (msg_weird_seen)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_weird_seen ))), |
3793 | /* |
3794 | ** We donnot handle extended messages from SCRIPTS. |
3795 | ** Read the amount of data correponding to the |
3796 | ** message length and call the C code. |
3797 | */ |
3798 | SCR_STORE_REL (scratcha, 1)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (1)), |
3799 | offsetof (struct dsb, smsg_ext.size)((size_t) (&((struct dsb *)0)->smsg_ext.size)), |
3800 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3801 | 0, |
3802 | SCR_MOVE_TBL(0x10000000 | 0x08000000) ^ SCR_MSG_IN0x07000000, |
3803 | offsetof (struct dsb, smsg_ext)((size_t) (&((struct dsb *)0)->smsg_ext)), |
3804 | SCR_JUMP0x80080000, |
3805 | PADDRH (msg_received)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_received ))), |
3806 | |
3807 | }/*-------------------------< MSG_BAD >------------------*/,{ |
3808 | /* |
3809 | ** unimplemented message - reject it. |
3810 | */ |
3811 | SCR_INT0x98080000, |
3812 | SIR_REJECT_TO_SEND(8), |
3813 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3814 | 0, |
3815 | SCR_JUMP0x80080000, |
3816 | PADDR (clrack)(0x50000000 | ((size_t) (&((struct script *)0)->clrack ))), |
3817 | |
3818 | }/*-------------------------< MSG_WEIRD >--------------------*/,{ |
3819 | /* |
3820 | ** weird message received |
3821 | ** ignore all MSG IN phases and reject it. |
3822 | */ |
3823 | SCR_INT0x98080000, |
3824 | SIR_REJECT_TO_SEND(8), |
3825 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3826 | 0, |
3827 | }/*-------------------------< MSG_WEIRD1 >--------------------*/,{ |
3828 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3829 | 0, |
3830 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_MSG_IN))(0x00080000 | ((0x00030000 | (0x07000000)))), |
3831 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3832 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_IN0x07000000, |
3833 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
3834 | SCR_JUMP0x80080000, |
3835 | PADDRH (msg_weird1)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_weird1 ))), |
3836 | }/*-------------------------< WDTR_RESP >----------------*/,{ |
3837 | /* |
3838 | ** let the target fetch our answer. |
3839 | */ |
3840 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3841 | 0, |
3842 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3843 | 0, |
3844 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
3845 | PADDRH (nego_bad_phase)(0x80000000 | ((size_t) (&((struct scripth *)0)->nego_bad_phase ))), |
3846 | |
3847 | }/*-------------------------< SEND_WDTR >----------------*/,{ |
3848 | /* |
3849 | ** Send the M_X_WIDE_REQ |
3850 | */ |
3851 | SCR_MOVE_ABS (4)((0x00000000 | 0x08000000) | (4)) ^ SCR_MSG_OUT0x06000000, |
3852 | NADDR (msgout)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgout))), |
3853 | SCR_JUMP0x80080000, |
3854 | PADDRH (msg_out_done)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_out_done ))), |
3855 | |
3856 | }/*-------------------------< SDTR_RESP >-------------*/,{ |
3857 | /* |
3858 | ** let the target fetch our answer. |
3859 | */ |
3860 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3861 | 0, |
3862 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3863 | 0, |
3864 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
3865 | PADDRH (nego_bad_phase)(0x80000000 | ((size_t) (&((struct scripth *)0)->nego_bad_phase ))), |
3866 | |
3867 | }/*-------------------------< SEND_SDTR >-------------*/,{ |
3868 | /* |
3869 | ** Send the M_X_SYNC_REQ |
3870 | */ |
3871 | SCR_MOVE_ABS (5)((0x00000000 | 0x08000000) | (5)) ^ SCR_MSG_OUT0x06000000, |
3872 | NADDR (msgout)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgout))), |
3873 | SCR_JUMP0x80080000, |
3874 | PADDRH (msg_out_done)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_out_done ))), |
3875 | |
3876 | }/*-------------------------< PPR_RESP >-------------*/,{ |
3877 | /* |
3878 | ** let the target fetch our answer. |
3879 | */ |
3880 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3881 | 0, |
3882 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3883 | 0, |
3884 | SCR_JUMP0x80080000 ^ IFFALSE (WHEN (SCR_MSG_OUT))(0x00080000 | ((0x00030000 | (0x06000000)))), |
3885 | PADDRH (nego_bad_phase)(0x80000000 | ((size_t) (&((struct scripth *)0)->nego_bad_phase ))), |
3886 | |
3887 | }/*-------------------------< SEND_PPR >-------------*/,{ |
3888 | /* |
3889 | ** Send the M_X_PPR_REQ |
3890 | */ |
3891 | SCR_MOVE_ABS (8)((0x00000000 | 0x08000000) | (8)) ^ SCR_MSG_OUT0x06000000, |
3892 | NADDR (msgout)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgout))), |
3893 | SCR_JUMP0x80080000, |
3894 | PADDRH (msg_out_done)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_out_done ))), |
3895 | |
3896 | }/*-------------------------< NEGO_BAD_PHASE >------------*/,{ |
3897 | SCR_INT0x98080000, |
3898 | SIR_NEGO_PROTO(6), |
3899 | SCR_JUMP0x80080000, |
3900 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3901 | |
3902 | }/*-------------------------< MSG_OUT >-------------------*/,{ |
3903 | /* |
3904 | ** The target requests a message. |
3905 | */ |
3906 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_OUT0x06000000, |
3907 | NADDR (msgout)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgout))), |
3908 | /* |
3909 | ** ... wait for the next phase |
3910 | ** if it's a message out, send it again, ... |
3911 | */ |
3912 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_MSG_OUT))(0x00000000 | ((0x00030000 | (0x06000000)))), |
3913 | PADDRH (msg_out)(0x80000000 | ((size_t) (&((struct scripth *)0)->msg_out ))), |
3914 | }/*-------------------------< MSG_OUT_DONE >--------------*/,{ |
3915 | /* |
3916 | ** ... else clear the message ... |
3917 | */ |
3918 | SCR_INT0x98080000, |
3919 | SIR_MSG_OUT_DONE(19), |
3920 | /* |
3921 | ** ... and process the next phase |
3922 | */ |
3923 | SCR_JUMP0x80080000, |
3924 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3925 | |
3926 | }/*-------------------------< DATA_OVRUN >-----------------------*/,{ |
3927 | /* |
3928 | * Use scratcha to count the extra bytes. |
3929 | */ |
3930 | SCR_LOAD_ABS (scratcha, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
3931 | PADDRH (zero)(0x80000000 | ((size_t) (&((struct scripth *)0)->zero) )), |
3932 | }/*-------------------------< DATA_OVRUN1 >----------------------*/,{ |
3933 | /* |
3934 | * The target may want to transfer too much data. |
3935 | * |
3936 | * If phase is DATA OUT write 1 byte and count it. |
3937 | */ |
3938 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_DATA_OUT))(0x00080000 | ((0x00030000 | (0x00000000)))), |
3939 | 16, |
3940 | SCR_CHMOV_ABS (1)((0x00000000) | (1)) ^ SCR_DATA_OUT0x00000000, |
3941 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
3942 | SCR_JUMP0x80080000, |
3943 | PADDRH (data_ovrun2)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun2 ))), |
3944 | /* |
3945 | * If WSR is set, clear this condition, and |
3946 | * count this byte. |
3947 | */ |
3948 | SCR_FROM_REG (scntl2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
3949 | 0, |
3950 | SCR_JUMPR0x80880000 ^ IFFALSE (MASK (WSR, WSR))(0x00080000 | ((0x00040000 | (((0x01 ^ 0xff) & 0xff) << 8ul)|((0x01) & 0xff)))), |
3951 | 16, |
3952 | SCR_REG_REG (scntl2, SCR_OR, WSR)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0x01)&0xff)<<8ul)), |
3953 | 0, |
3954 | SCR_JUMP0x80080000, |
3955 | PADDRH (data_ovrun2)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun2 ))), |
3956 | /* |
3957 | * Finally check against DATA IN phase. |
3958 | * Signal data overrun to the C code |
3959 | * and jump to dispatcher if not so. |
3960 | * Read 1 byte otherwise and count it. |
3961 | */ |
3962 | SCR_JUMPR0x80880000 ^ IFTRUE (WHEN (SCR_DATA_IN))(0x00000000 | ((0x00030000 | (0x01000000)))), |
3963 | 16, |
3964 | SCR_INT0x98080000, |
3965 | SIR_DATA_OVERRUN(22), |
3966 | SCR_JUMP0x80080000, |
3967 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
3968 | SCR_CHMOV_ABS (1)((0x00000000) | (1)) ^ SCR_DATA_IN0x01000000, |
3969 | NADDR (scratch)(0x40000000 | ((size_t) (&((struct ncb *)0)->scratch)) ), |
3970 | }/*-------------------------< DATA_OVRUN2 >----------------------*/,{ |
3971 | /* |
3972 | * Count this byte. |
3973 | * This will allow to return a negative |
3974 | * residual to user. |
3975 | */ |
3976 | SCR_REG_REG (scratcha, SCR_ADD, 0x01)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha)))) & 0x80))) | (0x06000000 ) | (((0x01)&0xff)<<8ul)), |
3977 | 0, |
3978 | SCR_REG_REG (scratcha1, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha1)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha1)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
3979 | 0, |
3980 | SCR_REG_REG (scratcha2, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha2)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
3981 | 0, |
3982 | /* |
3983 | * .. and repeat as required. |
3984 | */ |
3985 | SCR_JUMP0x80080000, |
3986 | PADDRH (data_ovrun1)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun1 ))), |
3987 | |
3988 | }/*-------------------------< ABORT_RESEL >----------------*/,{ |
3989 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
3990 | 0, |
3991 | SCR_CLR (SCR_ACK)(0x60000000 | (0x00000040)), |
3992 | 0, |
3993 | /* |
3994 | ** send the abort/abortag/reset message |
3995 | ** we expect an immediate disconnect |
3996 | */ |
3997 | SCR_REG_REG (scntl2, SCR_AND, 0x7f)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x04000000 ) | (((0x7f)&0xff)<<8ul)), |
3998 | 0, |
3999 | SCR_MOVE_ABS (1)((0x00000000 | 0x08000000) | (1)) ^ SCR_MSG_OUT0x06000000, |
4000 | NADDR (msgout)(0x40000000 | ((size_t) (&((struct ncb *)0)->msgout))), |
4001 | SCR_CLR (SCR_ACK|SCR_ATN)(0x60000000 | (0x00000040|0x00000008)), |
4002 | 0, |
4003 | SCR_WAIT_DISC0x48000000, |
4004 | 0, |
4005 | SCR_INT0x98080000, |
4006 | SIR_RESEL_ABORTED(18), |
4007 | SCR_JUMP0x80080000, |
4008 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
4009 | }/*-------------------------< RESEND_IDENT >-------------------*/,{ |
4010 | /* |
4011 | ** The target stays in MSG OUT phase after having acked |
4012 | ** Identify [+ Tag [+ Extended message ]]. Targets shall |
4013 | ** behave this way on parity error. |
4014 | ** We must send it again all the messages. |
4015 | */ |
4016 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), /* Shall be asserted 2 deskew delays before the */ |
4017 | 0, /* 1rst ACK = 90 ns. Hope the NCR is'nt too fast */ |
4018 | SCR_JUMP0x80080000, |
4019 | PADDR (send_ident)(0x50000000 | ((size_t) (&((struct script *)0)->send_ident ))), |
4020 | }/*-------------------------< IDENT_BREAK >-------------------*/,{ |
4021 | SCR_CLR (SCR_ATN)(0x60000000 | (0x00000008)), |
4022 | 0, |
4023 | SCR_JUMP0x80080000, |
4024 | PADDR (select2)(0x50000000 | ((size_t) (&((struct script *)0)->select2 ))), |
4025 | }/*-------------------------< IDENT_BREAK_ATN >----------------*/,{ |
4026 | SCR_SET (SCR_ATN)(0x58000000 | (0x00000008)), |
4027 | 0, |
4028 | SCR_JUMP0x80080000, |
4029 | PADDR (select2)(0x50000000 | ((size_t) (&((struct script *)0)->select2 ))), |
4030 | }/*-------------------------< SDATA_IN >-------------------*/,{ |
4031 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000, |
4032 | offsetof (struct dsb, sense)((size_t) (&((struct dsb *)0)->sense)), |
4033 | SCR_CALL0x88080000, |
4034 | PADDR (datai_done)(0x50000000 | ((size_t) (&((struct script *)0)->datai_done ))), |
4035 | SCR_JUMP0x80080000, |
4036 | PADDRH (data_ovrun)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_ovrun ))), |
4037 | }/*-------------------------< DATA_IO >--------------------*/,{ |
4038 | /* |
4039 | ** We jump here if the data direction was unknown at the |
4040 | ** time we had to queue the command to the scripts processor. |
4041 | ** Pointers had been set as follow in this situation: |
4042 | ** savep --> DATA_IO |
4043 | ** lastp --> start pointer when DATA_IN |
4044 | ** goalp --> goal pointer when DATA_IN |
4045 | ** wlastp --> start pointer when DATA_OUT |
4046 | ** wgoalp --> goal pointer when DATA_OUT |
4047 | ** This script sets savep/lastp/goalp according to the |
4048 | ** direction chosen by the target. |
4049 | */ |
4050 | SCR_JUMP0x80080000 ^ IFTRUE (WHEN (SCR_DATA_OUT))(0x00000000 | ((0x00030000 | (0x00000000)))), |
4051 | PADDRH(data_io_out)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_io_out ))), |
4052 | }/*-------------------------< DATA_IO_COM >-----------------*/,{ |
4053 | /* |
4054 | ** Direction is DATA IN. |
4055 | ** Warning: we jump here, even when phase is DATA OUT. |
4056 | */ |
4057 | SCR_LOAD_REL (scratcha, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4058 | offsetof (struct ccb, phys.header.lastp)((size_t) (&((struct ccb *)0)->phys.header.lastp)), |
4059 | SCR_STORE_REL (scratcha, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4060 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
4061 | |
4062 | /* |
4063 | ** Jump to the SCRIPTS according to actual direction. |
4064 | */ |
4065 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
4066 | offsetof (struct ccb, phys.header.savep)((size_t) (&((struct ccb *)0)->phys.header.savep)), |
4067 | SCR_RETURN0x90080000, |
4068 | 0, |
4069 | }/*-------------------------< DATA_IO_OUT >-----------------*/,{ |
4070 | /* |
4071 | ** Direction is DATA OUT. |
4072 | */ |
4073 | SCR_REG_REG (HF_REG, SCR_AND, (~HF_DATA_IN))(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x04000000 ) | ((((~(1u<<5)))&0xff)<<8ul)), |
4074 | 0, |
4075 | SCR_LOAD_REL (scratcha, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4076 | offsetof (struct ccb, phys.header.wlastp)((size_t) (&((struct ccb *)0)->phys.header.wlastp)), |
4077 | SCR_STORE_REL (scratcha, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4078 | offsetof (struct ccb, phys.header.lastp)((size_t) (&((struct ccb *)0)->phys.header.lastp)), |
4079 | SCR_LOAD_REL (scratcha, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4080 | offsetof (struct ccb, phys.header.wgoalp)((size_t) (&((struct ccb *)0)->phys.header.wgoalp)), |
4081 | SCR_STORE_REL (scratcha, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4082 | offsetof (struct ccb, phys.header.goalp)((size_t) (&((struct ccb *)0)->phys.header.goalp)), |
4083 | SCR_JUMP0x80080000, |
4084 | PADDRH(data_io_com)(0x80000000 | ((size_t) (&((struct scripth *)0)->data_io_com ))), |
4085 | |
4086 | }/*-------------------------< RESEL_BAD_LUN >---------------*/,{ |
4087 | /* |
4088 | ** Message is an IDENTIFY, but lun is unknown. |
4089 | ** Signal problem to C code for logging the event. |
4090 | ** Send a M_ABORT to clear all pending tasks. |
4091 | */ |
4092 | SCR_INT0x98080000, |
4093 | SIR_RESEL_BAD_LUN(13), |
4094 | SCR_JUMP0x80080000, |
4095 | PADDRH (abort_resel)(0x80000000 | ((size_t) (&((struct scripth *)0)->abort_resel ))), |
4096 | }/*-------------------------< BAD_I_T_L >------------------*/,{ |
4097 | /* |
4098 | ** We donnot have a task for that I_T_L. |
4099 | ** Signal problem to C code for logging the event. |
4100 | ** Send a M_ABORT message. |
4101 | */ |
4102 | SCR_INT0x98080000, |
4103 | SIR_RESEL_BAD_I_T_L(15), |
4104 | SCR_JUMP0x80080000, |
4105 | PADDRH (abort_resel)(0x80000000 | ((size_t) (&((struct scripth *)0)->abort_resel ))), |
4106 | }/*-------------------------< BAD_I_T_L_Q >----------------*/,{ |
4107 | /* |
4108 | ** We donnot have a task that matches the tag. |
4109 | ** Signal problem to C code for logging the event. |
4110 | ** Send a M_ABORTTAG message. |
4111 | */ |
4112 | SCR_INT0x98080000, |
4113 | SIR_RESEL_BAD_I_T_L_Q(16), |
4114 | SCR_JUMP0x80080000, |
4115 | PADDRH (abort_resel)(0x80000000 | ((size_t) (&((struct scripth *)0)->abort_resel ))), |
4116 | }/*-------------------------< BAD_STATUS >-----------------*/,{ |
4117 | /* |
4118 | ** Anything different from INTERMEDIATE |
4119 | ** CONDITION MET should be a bad SCSI status, |
4120 | ** given that GOOD status has already been tested. |
4121 | ** Call the C code. |
4122 | */ |
4123 | SCR_LOAD_ABS (scratcha, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
4124 | PADDRH (startpos)(0x80000000 | ((size_t) (&((struct scripth *)0)->startpos ))), |
4125 | SCR_INT0x98080000 ^ IFFALSE (DATA (S_COND_MET))(0x00080000 | ((0x00040000 | (((0x04)) & 0xff)))), |
4126 | SIR_BAD_STATUS(1), |
4127 | SCR_RETURN0x90080000, |
4128 | 0, |
4129 | |
4130 | }/*-------------------------< TWEAK_PMJ >------------------*/,{ |
4131 | /* |
4132 | ** Disable PM handling from SCRIPTS for the data phase |
4133 | ** and so force PM to be handled from C code if HF_PM_TO_C |
4134 | ** flag is set. |
4135 | */ |
4136 | SCR_FROM_REG(HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4137 | 0, |
4138 | SCR_JUMPR0x80880000 ^ IFTRUE (MASK (HF_PM_TO_C, HF_PM_TO_C))(0x00000000 | ((0x00040000 | ((((1u<<6) ^ 0xff) & 0xff ) << 8ul)|(((1u<<6)) & 0xff)))), |
4139 | 16, |
4140 | SCR_REG_REG (ccntl0, SCR_OR, ENPMJ)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ccntl0)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_ccntl0)))) & 0x80))) | (0x02000000 ) | (((0x80)&0xff)<<8ul)), |
4141 | 0, |
4142 | SCR_RETURN0x90080000, |
4143 | 0, |
4144 | SCR_REG_REG (ccntl0, SCR_AND, (~ENPMJ))(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ccntl0)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_ccntl0)))) & 0x80))) | (0x04000000 ) | ((((~0x80))&0xff)<<8ul)), |
4145 | 0, |
4146 | SCR_RETURN0x90080000, |
4147 | 0, |
4148 | |
4149 | }/*-------------------------< PM_HANDLE >------------------*/,{ |
4150 | /* |
4151 | ** Phase mismatch handling. |
4152 | ** |
4153 | ** Since we have to deal with 2 SCSI data pointers |
4154 | ** (current and saved), we need at least 2 contexts. |
4155 | ** Each context (pm0 and pm1) has a saved area, a |
4156 | ** SAVE mini-script and a DATA phase mini-script. |
4157 | */ |
4158 | /* |
4159 | ** Get the PM handling flags. |
4160 | */ |
4161 | SCR_FROM_REG (HF_REG)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4162 | 0, |
4163 | /* |
4164 | ** If no flags (1rst PM for example), avoid |
4165 | ** all the below heavy flags testing. |
4166 | ** This makes the normal case a bit faster. |
4167 | */ |
4168 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1 | HF_DP_SAVED)))(0x00000000 | ((0x00040000 | ((((1u | (1u<<1) | (1u<< 3)) ^ 0xff) & 0xff) << 8ul)|((0) & 0xff)))), |
4169 | PADDRH (pm_handle1)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_handle1 ))), |
4170 | /* |
4171 | ** If we received a SAVE DP, switch to the |
4172 | ** other PM context since the savep may point |
4173 | ** to the current PM context. |
4174 | */ |
4175 | SCR_JUMPR0x80880000 ^ IFFALSE (MASK (HF_DP_SAVED, HF_DP_SAVED))(0x00080000 | ((0x00040000 | ((((1u<<3) ^ 0xff) & 0xff ) << 8ul)|(((1u<<3)) & 0xff)))), |
4176 | 8, |
4177 | SCR_REG_REG (sfbr, SCR_XOR, HF_ACT_PM)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_sfbr)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_sfbr)))) & 0x80))) | (0x03000000 ) | ((((1u<<2))&0xff)<<8ul)), |
4178 | 0, |
4179 | /* |
4180 | ** If we have been interrupt in a PM DATA mini-script, |
4181 | ** we take the return address from the corresponding |
4182 | ** saved area. |
4183 | ** This ensure the return address always points to the |
4184 | ** main DATA script for this transfer. |
4185 | */ |
4186 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (0, (HF_IN_PM0 | HF_IN_PM1)))(0x00000000 | ((0x00040000 | ((((1u | (1u<<1)) ^ 0xff) & 0xff) << 8ul)|((0) & 0xff)))), |
4187 | PADDRH (pm_handle1)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_handle1 ))), |
4188 | SCR_JUMPR0x80880000 ^ IFFALSE (MASK (HF_IN_PM0, HF_IN_PM0))(0x00080000 | ((0x00040000 | (((1u ^ 0xff) & 0xff) << 8ul)|((1u) & 0xff)))), |
4189 | 16, |
4190 | SCR_LOAD_REL (ia, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ia)))) & 0xff) << 16ul)) | (4)), |
4191 | offsetof(struct ccb, phys.pm0.ret)((size_t) (&((struct ccb *)0)->phys.pm0.ret)), |
4192 | SCR_JUMP0x80080000, |
4193 | PADDRH (pm_save)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_save ))), |
4194 | SCR_LOAD_REL (ia, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ia)))) & 0xff) << 16ul)) | (4)), |
4195 | offsetof(struct ccb, phys.pm1.ret)((size_t) (&((struct ccb *)0)->phys.pm1.ret)), |
4196 | SCR_JUMP0x80080000, |
4197 | PADDRH (pm_save)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_save ))), |
4198 | }/*-------------------------< PM_HANDLE1 >-----------------*/,{ |
4199 | /* |
4200 | ** Normal case. |
4201 | ** Update the return address so that it |
4202 | ** will point after the interrupted MOVE. |
4203 | */ |
4204 | SCR_REG_REG (ia, SCR_ADD, 8)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ia)))) & 0x7f) << 16ul) + (((((size_t) (&((struct ncr_reg *)0)->nc_ia)))) & 0x80))) | (0x06000000) | (( (8)&0xff)<<8ul)), |
4205 | 0, |
4206 | SCR_REG_REG (ia1, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ia1)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_ia1)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
4207 | 0, |
4208 | }/*-------------------------< PM_SAVE >--------------------*/,{ |
4209 | /* |
4210 | ** Clear all the flags that told us if we were |
4211 | ** interrupted in a PM DATA mini-script and/or |
4212 | ** we received a SAVE DP. |
4213 | */ |
4214 | SCR_SFBR_REG (HF_REG, SCR_AND, (~(HF_IN_PM0|HF_IN_PM1|HF_DP_SAVED)))(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scr3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_scr3)))) & 0x80))) | (0x04000000 ) | ((((~(1u|(1u<<1)|(1u<<3))))&0xff)<< 8ul)), |
4215 | 0, |
4216 | /* |
4217 | ** Choose the current PM context. |
4218 | */ |
4219 | SCR_JUMP0x80080000 ^ IFTRUE (MASK (HF_ACT_PM, HF_ACT_PM))(0x00000000 | ((0x00040000 | ((((1u<<2) ^ 0xff) & 0xff ) << 8ul)|(((1u<<2)) & 0xff)))), |
4220 | PADDRH (pm1_save)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm1_save ))), |
4221 | }/*-------------------------< PM0_SAVE >-------------------*/,{ |
4222 | SCR_STORE_REL (ia, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ia)))) & 0xff) << 16ul)) | (4)), |
4223 | offsetof(struct ccb, phys.pm0.ret)((size_t) (&((struct ccb *)0)->phys.pm0.ret)), |
4224 | /* |
4225 | ** If WSR bit is set, either UA and RBC may |
4226 | ** have to be changed whatever the device wants |
4227 | ** to ignore this residue ot not. |
4228 | */ |
4229 | SCR_FROM_REG (scntl2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4230 | 0, |
4231 | SCR_CALL0x88080000 ^ IFTRUE (MASK (WSR, WSR))(0x00000000 | ((0x00040000 | (((0x01 ^ 0xff) & 0xff) << 8ul)|((0x01) & 0xff)))), |
4232 | PADDRH (pm_wsr_handle)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_wsr_handle ))), |
4233 | /* |
4234 | ** Save the remaining byte count, the updated |
4235 | ** address and the return address. |
4236 | */ |
4237 | SCR_STORE_REL (rbc, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_rbc)))) & 0xff) << 16ul) ) | (4)), |
4238 | offsetof(struct ccb, phys.pm0.sg.size)((size_t) (&((struct ccb *)0)->phys.pm0.sg.size)), |
4239 | SCR_STORE_REL (ua, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ua)))) & 0xff) << 16ul)) | (4)), |
4240 | offsetof(struct ccb, phys.pm0.sg.addr)((size_t) (&((struct ccb *)0)->phys.pm0.sg.addr)), |
4241 | /* |
4242 | ** Set the current pointer at the PM0 DATA mini-script. |
4243 | */ |
4244 | SCR_LOAD_ABS (temp, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul)) | (4)), |
4245 | PADDRH (pm0_data_addr)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm0_data_addr ))), |
4246 | SCR_JUMP0x80080000, |
4247 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
4248 | }/*-------------------------< PM1_SAVE >-------------------*/,{ |
4249 | SCR_STORE_REL (ia, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ia)))) & 0xff) << 16ul)) | (4)), |
4250 | offsetof(struct ccb, phys.pm1.ret)((size_t) (&((struct ccb *)0)->phys.pm1.ret)), |
4251 | /* |
4252 | ** If WSR bit is set, either UA and RBC may |
4253 | ** have been changed whatever the device wants |
4254 | ** to ignore this residue or not. |
4255 | */ |
4256 | SCR_FROM_REG (scntl2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl2)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4257 | 0, |
4258 | SCR_CALL0x88080000 ^ IFTRUE (MASK (WSR, WSR))(0x00000000 | ((0x00040000 | (((0x01 ^ 0xff) & 0xff) << 8ul)|((0x01) & 0xff)))), |
4259 | PADDRH (pm_wsr_handle)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm_wsr_handle ))), |
4260 | /* |
4261 | ** Save the remaining byte count, the updated |
4262 | ** address and the return address. |
4263 | */ |
4264 | SCR_STORE_REL (rbc, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_rbc)))) & 0xff) << 16ul) ) | (4)), |
4265 | offsetof(struct ccb, phys.pm1.sg.size)((size_t) (&((struct ccb *)0)->phys.pm1.sg.size)), |
4266 | SCR_STORE_REL (ua, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ua)))) & 0xff) << 16ul)) | (4)), |
4267 | offsetof(struct ccb, phys.pm1.sg.addr)((size_t) (&((struct ccb *)0)->phys.pm1.sg.addr)), |
4268 | /* |
4269 | ** Set the current pointer at the PM1 DATA mini-script. |
4270 | */ |
4271 | SCR_LOAD_ABS (temp, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul)) | (4)), |
4272 | PADDRH (pm1_data_addr)(0x80000000 | ((size_t) (&((struct scripth *)0)->pm1_data_addr ))), |
4273 | SCR_JUMP0x80080000, |
4274 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
4275 | }/*--------------------------< PM_WSR_HANDLE >-----------------------*/,{ |
4276 | /* |
4277 | * Phase mismatch handling from SCRIPT with WSR set. |
4278 | * Such a condition can occur if the chip wants to |
4279 | * execute a CHMOV(size > 1) when the WSR bit is |
4280 | * set and the target changes PHASE. |
4281 | */ |
4282 | #ifdef SYM_DEBUG_PM_WITH_WSR |
4283 | /* |
4284 | * Some debugging may still be needed.:) |
4285 | */ |
4286 | SCR_INT0x98080000, |
4287 | SIR_PM_WITH_WSR, |
4288 | #endif |
4289 | /* |
4290 | * We must move the residual byte to memory. |
4291 | * |
4292 | * UA contains bit 0..31 of the address to |
4293 | * move the residual byte. |
4294 | * Move it to the table indirect. |
4295 | */ |
4296 | SCR_STORE_REL (ua, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_ua)))) & 0xff) << 16ul)) | (4)), |
4297 | offsetof (struct ccb, phys.wresid.addr)((size_t) (&((struct ccb *)0)->phys.wresid.addr)), |
4298 | /* |
4299 | * Increment UA (move address to next position). |
4300 | */ |
4301 | SCR_REG_REG (ua, SCR_ADD, 1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ua)))) & 0x7f) << 16ul) + (((((size_t) (&((struct ncr_reg *)0)->nc_ua)))) & 0x80))) | (0x06000000) | (( (1)&0xff)<<8ul)), |
4302 | 0, |
4303 | SCR_REG_REG (ua1, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ua1)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_ua1)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
4304 | 0, |
4305 | SCR_REG_REG (ua2, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ua2)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_ua2)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
4306 | 0, |
4307 | SCR_REG_REG (ua3, SCR_ADDC, 0)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_ua3)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_ua3)))) & 0x80))) | (0x07000000 ) | (((0)&0xff)<<8ul)), |
4308 | 0, |
4309 | /* |
4310 | * Compute SCRATCHA as: |
4311 | * - size to transfer = 1 byte. |
4312 | * - bit 24..31 = high address bit [32...39]. |
4313 | */ |
4314 | SCR_LOAD_ABS (scratcha, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul)) | (4)), |
4315 | PADDRH (zero)(0x80000000 | ((size_t) (&((struct scripth *)0)->zero) )), |
4316 | SCR_REG_REG (scratcha, SCR_OR, 1)(0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha)))) & 0x80))) | (0x02000000 ) | (((1)&0xff)<<8ul)), |
4317 | 0, |
4318 | SCR_FROM_REG (rbc3)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_rbc3)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_rbc3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4319 | 0, |
4320 | SCR_TO_REG (scratcha3)(0x68000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scratcha3)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scratcha3)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4321 | 0, |
4322 | /* |
4323 | * Move this value to the table indirect. |
4324 | */ |
4325 | SCR_STORE_REL (scratcha, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4326 | offsetof (struct ccb, phys.wresid.size)((size_t) (&((struct ccb *)0)->phys.wresid.size)), |
4327 | /* |
4328 | * Wait for a valid phase. |
4329 | * While testing with bogus QUANTUM drives, the C1010 |
4330 | * sometimes raised a spurious phase mismatch with |
4331 | * WSR and the CHMOV(1) triggered another PM. |
4332 | * Waiting explicitely for the PHASE seemed to avoid |
4333 | * the nested phase mismatch. Btw, this didn't happen |
4334 | * using my IBM drives. |
4335 | */ |
4336 | SCR_JUMPR0x80880000 ^ IFFALSE (WHEN (SCR_DATA_IN))(0x00080000 | ((0x00030000 | (0x01000000)))), |
4337 | 0, |
4338 | /* |
4339 | * Perform the move of the residual byte. |
4340 | */ |
4341 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000, |
4342 | offsetof (struct ccb, phys.wresid)((size_t) (&((struct ccb *)0)->phys.wresid)), |
4343 | /* |
4344 | * We can now handle the phase mismatch with UA fixed. |
4345 | * RBC[0..23]=0 is a special case that does not require |
4346 | * a PM context. The C code also checks against this. |
4347 | */ |
4348 | SCR_FROM_REG (rbc)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_rbc)))) & 0x7f) << 16ul) + (((((size_t) (&(( struct ncr_reg *)0)->nc_rbc)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4349 | 0, |
4350 | SCR_RETURN0x90080000 ^ IFFALSE (DATA (0))(0x00080000 | ((0x00040000 | ((0) & 0xff)))), |
4351 | 0, |
4352 | SCR_FROM_REG (rbc1)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_rbc1)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_rbc1)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4353 | 0, |
4354 | SCR_RETURN0x90080000 ^ IFFALSE (DATA (0))(0x00080000 | ((0x00040000 | ((0) & 0xff)))), |
4355 | 0, |
4356 | SCR_FROM_REG (rbc2)(0x70000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_rbc2)))) & 0x7f) << 16ul) + (((((size_t) (&( (struct ncr_reg *)0)->nc_rbc2)))) & 0x80))) | (0x02000000 ) | (((0)&0xff)<<8ul)), |
4357 | 0, |
4358 | SCR_RETURN0x90080000 ^ IFFALSE (DATA (0))(0x00080000 | ((0x00040000 | ((0) & 0xff)))), |
4359 | 0, |
4360 | /* |
4361 | * RBC[0..23]=0. |
4362 | * Not only we donnot need a PM context, but this would |
4363 | * lead to a bogus CHMOV(0). This condition means that |
4364 | * the residual was the last byte to move from this CHMOV. |
4365 | * So, we just have to move the current data script pointer |
4366 | * (i.e. TEMP) to the SCRIPTS address following the |
4367 | * interrupted CHMOV and jump to dispatcher. |
4368 | */ |
4369 | SCR_STORE_ABS (ia, 4)(0xe0000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_ia)))) & 0xff) << 16ul)) | (4)), |
4370 | PADDRH (scratch)(0x80000000 | ((size_t) (&((struct scripth *)0)->scratch ))), |
4371 | SCR_LOAD_ABS (temp, 4)(0xe1000000 | 0x02000000 | (((((((size_t) (&((struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul)) | (4)), |
4372 | PADDRH (scratch)(0x80000000 | ((size_t) (&((struct scripth *)0)->scratch ))), |
4373 | SCR_JUMP0x80080000, |
4374 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
4375 | }/*--------------------------< WSR_MA_HELPER >-----------------------*/,{ |
4376 | /* |
4377 | * Helper for the C code when WSR bit is set. |
4378 | * Perform the move of the residual byte. |
4379 | */ |
4380 | SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000, |
4381 | offsetof (struct ccb, phys.wresid)((size_t) (&((struct ccb *)0)->phys.wresid)), |
4382 | SCR_JUMP0x80080000, |
4383 | PADDR (dispatch)(0x50000000 | ((size_t) (&((struct script *)0)->dispatch ))), |
4384 | }/*-------------------------< ZERO >------------------------*/,{ |
4385 | SCR_DATA_ZERO0xf00ff00f, |
4386 | }/*-------------------------< SCRATCH >---------------------*/,{ |
4387 | SCR_DATA_ZERO0xf00ff00f, |
4388 | }/*-------------------------< SCRATCH1 >--------------------*/,{ |
4389 | SCR_DATA_ZERO0xf00ff00f, |
4390 | }/*-------------------------< PM0_DATA_ADDR >---------------*/,{ |
4391 | SCR_DATA_ZERO0xf00ff00f, |
4392 | }/*-------------------------< PM1_DATA_ADDR >---------------*/,{ |
4393 | SCR_DATA_ZERO0xf00ff00f, |
4394 | }/*-------------------------< SAVED_DSA >-------------------*/,{ |
4395 | SCR_DATA_ZERO0xf00ff00f, |
4396 | }/*-------------------------< SAVED_DRS >-------------------*/,{ |
4397 | SCR_DATA_ZERO0xf00ff00f, |
4398 | }/*-------------------------< DONE_POS >--------------------*/,{ |
4399 | SCR_DATA_ZERO0xf00ff00f, |
4400 | }/*-------------------------< STARTPOS >--------------------*/,{ |
4401 | SCR_DATA_ZERO0xf00ff00f, |
4402 | }/*-------------------------< TARGTBL >---------------------*/,{ |
4403 | SCR_DATA_ZERO0xf00ff00f, |
4404 | |
4405 | |
4406 | /* |
4407 | ** We may use MEMORY MOVE instructions to load the on chip-RAM, |
4408 | ** if it happens that mapping PCI memory is not possible. |
4409 | ** But writing the RAM from the CPU is the preferred method, |
4410 | ** since PCI 2.2 seems to disallow PCI self-mastering. |
4411 | */ |
4412 | |
4413 | #ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
4414 | |
4415 | }/*-------------------------< START_RAM >-------------------*/,{ |
4416 | /* |
4417 | ** Load the script into on-chip RAM, |
4418 | ** and jump to start point. |
4419 | */ |
4420 | SCR_COPY (sizeof (struct script))(0xc0000000 | 0x01000000 | (sizeof (struct script))), |
4421 | }/*-------------------------< SCRIPT0_BA >--------------------*/,{ |
4422 | 0, |
4423 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
4424 | SCR_JUMP0x80080000, |
4425 | PADDR (init)(0x50000000 | ((size_t) (&((struct script *)0)->init)) ), |
4426 | |
4427 | }/*-------------------------< START_RAM64 >--------------------*/,{ |
4428 | /* |
4429 | ** Load the RAM and start for 64 bit PCI (895A,896). |
4430 | ** Both scripts (script and scripth) are loaded into |
4431 | ** the RAM which is 8K (4K for 825A/875/895). |
4432 | ** We also need to load some 32-63 bit segments |
4433 | ** address of the SCRIPTS processor. |
4434 | ** LOAD/STORE ABSOLUTE always refers to on-chip RAM |
4435 | ** in our implementation. The main memory is |
4436 | ** accessed using LOAD/STORE DSA RELATIVE. |
4437 | */ |
4438 | SCR_LOAD_REL (mmws, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_mmws)))) & 0xff) << 16ul )) | (4)), |
4439 | offsetof (struct ncb, scr_ram_seg)((size_t) (&((struct ncb *)0)->scr_ram_seg)), |
4440 | SCR_COPY (sizeof(struct script))(0xc0000000 | 0x01000000 | (sizeof(struct script))), |
4441 | }/*-------------------------< SCRIPT0_BA64 >--------------------*/,{ |
4442 | 0, |
4443 | PADDR (start)(0x50000000 | ((size_t) (&((struct script *)0)->start) )), |
4444 | SCR_COPY (sizeof(struct scripth))(0xc0000000 | 0x01000000 | (sizeof(struct scripth))), |
4445 | }/*-------------------------< SCRIPTH0_BA64 >--------------------*/,{ |
4446 | 0, |
4447 | PADDRH (start64)(0x80000000 | ((size_t) (&((struct scripth *)0)->start64 ))), |
4448 | SCR_LOAD_REL (mmrs, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_mmrs)))) & 0xff) << 16ul )) | (4)), |
4449 | offsetof (struct ncb, scr_ram_seg)((size_t) (&((struct ncb *)0)->scr_ram_seg)), |
4450 | SCR_JUMP640x80480000, |
4451 | PADDRH (start64)(0x80000000 | ((size_t) (&((struct scripth *)0)->start64 ))), |
4452 | }/*-------------------------< RAM_SEG64 >--------------------*/,{ |
4453 | 0, |
4454 | |
4455 | #endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */ |
4456 | |
4457 | }/*-------------------------< SNOOPTEST >-------------------*/,{ |
4458 | /* |
4459 | ** Read the variable. |
4460 | */ |
4461 | SCR_LOAD_REL (scratcha, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_scratcha)))) & 0xff) << 16ul )) | (4)), |
4462 | offsetof(struct ncb, ncr_cache)((size_t) (&((struct ncb *)0)->ncr_cache)), |
4463 | SCR_STORE_REL (temp, 4)(0xe0000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
4464 | offsetof(struct ncb, ncr_cache)((size_t) (&((struct ncb *)0)->ncr_cache)), |
4465 | SCR_LOAD_REL (temp, 4)(0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&(( struct ncr_reg *)0)->nc_temp)))) & 0xff) << 16ul )) | (4)), |
4466 | offsetof(struct ncb, ncr_cache)((size_t) (&((struct ncb *)0)->ncr_cache)), |
4467 | }/*-------------------------< SNOOPEND >-------------------*/,{ |
4468 | /* |
4469 | ** And stop. |
4470 | */ |
4471 | SCR_INT0x98080000, |
4472 | 99, |
4473 | }/*--------------------------------------------------------*/ |
4474 | }; |
4475 | |
4476 | /*========================================================== |
4477 | ** |
4478 | ** |
4479 | ** Fill in #define dependent parts of the script |
4480 | ** |
4481 | ** |
4482 | **========================================================== |
4483 | */ |
4484 | |
4485 | void __init ncr_script_fill (struct script * scr, struct scripth * scrh) |
4486 | { |
4487 | int i; |
4488 | ncrcmd *p; |
4489 | |
4490 | p = scr->data_in; |
4491 | for (i=0; i<MAX_SCATTER((127)); i++) { |
4492 | *p++ =SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_IN0x01000000; |
4493 | *p++ =offsetof (struct dsb, data[i])((size_t) (&((struct dsb *)0)->data[i])); |
4494 | }; |
4495 | |
4496 | assert ((u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in)){ if (!((unsigned long)p == (unsigned long)&scr->data_in + sizeof (scr->data_in))) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "(u_long)p == (u_long)&scr->data_in + sizeof (scr->data_in)" , "../linux/src/drivers/scsi/sym53c8xx.c", 4496); } }; |
4497 | |
4498 | p = scr->data_out; |
4499 | |
4500 | for (i=0; i<MAX_SCATTER((127)); i++) { |
4501 | *p++ =SCR_CHMOV_TBL(0x10000000) ^ SCR_DATA_OUT0x00000000; |
4502 | *p++ =offsetof (struct dsb, data[i])((size_t) (&((struct dsb *)0)->data[i])); |
4503 | }; |
4504 | |
4505 | assert ((u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out)){ if (!((unsigned long)p == (unsigned long)&scr->data_out + sizeof (scr->data_out))) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "(u_long)p == (u_long)&scr->data_out + sizeof (scr->data_out)" , "../linux/src/drivers/scsi/sym53c8xx.c", 4505); } }; |
4506 | } |
4507 | |
4508 | /*========================================================== |
4509 | ** |
4510 | ** |
4511 | ** Copy and rebind a script. |
4512 | ** |
4513 | ** |
4514 | **========================================================== |
4515 | */ |
4516 | |
4517 | static void __init |
4518 | ncr_script_copy_and_bind (ncb_p np,ncrcmd *src,ncrcmd *dst,int len) |
4519 | { |
4520 | ncrcmd opcode, new, old, tmp1, tmp2; |
4521 | ncrcmd *start, *end; |
4522 | int relocs; |
4523 | int opchanged = 0; |
4524 | |
4525 | start = src; |
4526 | end = src + len/4; |
4527 | |
4528 | while (src < end) { |
4529 | |
4530 | opcode = *src++; |
4531 | *dst++ = cpu_to_scr(opcode)(opcode); |
4532 | |
4533 | /* |
4534 | ** If we forget to change the length |
4535 | ** in struct script, a field will be |
4536 | ** padded with 0. This is an illegal |
4537 | ** command. |
4538 | */ |
4539 | |
4540 | if (opcode == 0) { |
4541 | printk (KERN_INFO"<6>" "%s: ERROR0 IN SCRIPT at %d.\n", |
4542 | ncr_name(np), (int) (src-start-1)); |
4543 | MDELAY (10000); |
4544 | continue; |
4545 | }; |
4546 | |
4547 | /* |
4548 | ** We use the bogus value 0xf00ff00f ;-) |
4549 | ** to reserve data area in SCRIPTS. |
4550 | */ |
4551 | if (opcode == SCR_DATA_ZERO0xf00ff00f) { |
4552 | dst[-1] = 0; |
4553 | continue; |
4554 | } |
4555 | |
4556 | if (DEBUG_FLAGSncr_debug & DEBUG_SCRIPT(0x0040)) |
4557 | printk (KERN_INFO"<6>" "%p: <%x>\n", |
4558 | (src-1), (unsigned)opcode); |
4559 | |
4560 | /* |
4561 | ** We don't have to decode ALL commands |
4562 | */ |
4563 | switch (opcode >> 28) { |
4564 | |
4565 | case 0xf: |
4566 | /* |
4567 | ** LOAD / STORE DSA relative, don't relocate. |
4568 | */ |
4569 | relocs = 0; |
4570 | break; |
4571 | case 0xe: |
4572 | /* |
4573 | ** LOAD / STORE absolute. |
4574 | */ |
4575 | relocs = 1; |
4576 | break; |
4577 | case 0xc: |
4578 | /* |
4579 | ** COPY has TWO arguments. |
4580 | */ |
4581 | relocs = 2; |
4582 | tmp1 = src[0]; |
4583 | tmp2 = src[1]; |
4584 | #ifdef RELOC_KVAR |
4585 | if ((tmp1 & RELOC_MASK0xf0000000) == RELOC_KVAR) |
4586 | tmp1 = 0; |
4587 | if ((tmp2 & RELOC_MASK0xf0000000) == RELOC_KVAR) |
4588 | tmp2 = 0; |
4589 | #endif |
4590 | if ((tmp1 ^ tmp2) & 3) { |
4591 | printk (KERN_ERR"<3>""%s: ERROR1 IN SCRIPT at %d.\n", |
4592 | ncr_name(np), (int) (src-start-1)); |
4593 | MDELAY (1000); |
4594 | } |
4595 | /* |
4596 | ** If PREFETCH feature not enabled, remove |
4597 | ** the NO FLUSH bit if present. |
4598 | */ |
4599 | if ((opcode & SCR_NO_FLUSH0x01000000) && |
4600 | !(np->features & FE_PFEN(1<<12))) { |
4601 | dst[-1] = cpu_to_scr(opcode & ~SCR_NO_FLUSH)(opcode & ~0x01000000); |
4602 | ++opchanged; |
4603 | } |
4604 | break; |
4605 | |
4606 | case 0x0: |
4607 | /* |
4608 | ** MOVE/CHMOV (absolute address) |
4609 | */ |
4610 | if (!(np->features & FE_WIDE(1<<1))) |
4611 | dst[-1] = cpu_to_scr(opcode | OPC_MOVE)(opcode | 0x08000000); |
4612 | relocs = 1; |
4613 | break; |
4614 | |
4615 | case 0x1: |
4616 | /* |
4617 | ** MOVE/CHMOV (table indirect) |
4618 | */ |
4619 | if (!(np->features & FE_WIDE(1<<1))) |
4620 | dst[-1] = cpu_to_scr(opcode | OPC_MOVE)(opcode | 0x08000000); |
4621 | relocs = 0; |
4622 | break; |
4623 | |
4624 | case 0x8: |
4625 | /* |
4626 | ** JUMP / CALL |
4627 | ** dont't relocate if relative :-) |
4628 | */ |
4629 | if (opcode & 0x00800000) |
4630 | relocs = 0; |
4631 | else if ((opcode & 0xf8400000) == 0x80400000)/*JUMP64*/ |
4632 | relocs = 2; |
4633 | else |
4634 | relocs = 1; |
4635 | break; |
4636 | |
4637 | case 0x4: |
4638 | case 0x5: |
4639 | case 0x6: |
4640 | case 0x7: |
4641 | relocs = 1; |
4642 | break; |
4643 | |
4644 | default: |
4645 | relocs = 0; |
4646 | break; |
4647 | }; |
4648 | |
4649 | if (!relocs) { |
4650 | *dst++ = cpu_to_scr(*src++)(*src++); |
4651 | continue; |
4652 | } |
4653 | while (relocs--) { |
4654 | old = *src++; |
4655 | |
4656 | switch (old & RELOC_MASK0xf0000000) { |
4657 | case RELOC_REGISTER0x60000000: |
4658 | new = (old & ~RELOC_MASK0xf0000000) + pcivtobus(np->base_ba)(np->base_ba); |
4659 | break; |
4660 | case RELOC_LABEL0x50000000: |
4661 | new = (old & ~RELOC_MASK0xf0000000) + np->p_script; |
4662 | break; |
4663 | case RELOC_LABELH0x80000000: |
4664 | new = (old & ~RELOC_MASK0xf0000000) + np->p_scripth; |
4665 | break; |
4666 | case RELOC_SOFTC0x40000000: |
4667 | new = (old & ~RELOC_MASK0xf0000000) + np->p_ncb; |
4668 | break; |
4669 | #ifdef RELOC_KVAR |
4670 | case RELOC_KVAR: |
4671 | new=0; |
4672 | if (((old & ~RELOC_MASK0xf0000000) < SCRIPT_KVAR_FIRST) || |
4673 | ((old & ~RELOC_MASK0xf0000000) > SCRIPT_KVAR_LAST)) |
4674 | panic("ncr KVAR out of range"); |
4675 | new = vtobus(script_kvars[old & ~RELOC_MASK])virt_to_phys(script_kvars[old & ~0xf0000000]); |
4676 | #endif |
4677 | break; |
4678 | case 0: |
4679 | /* Don't relocate a 0 address. */ |
4680 | if (old == 0) { |
4681 | new = old; |
4682 | break; |
4683 | } |
4684 | /* fall through */ |
4685 | default: |
4686 | new = 0; /* For 'cc' not to complain */ |
4687 | panic("ncr_script_copy_and_bind: " |
4688 | "weird relocation %x\n", old); |
4689 | break; |
4690 | } |
4691 | |
4692 | *dst++ = cpu_to_scr(new)(new); |
4693 | } |
4694 | }; |
4695 | } |
4696 | |
4697 | /*========================================================== |
4698 | ** |
4699 | ** |
4700 | ** Auto configuration: attach and init a host adapter. |
4701 | ** |
4702 | ** |
4703 | **========================================================== |
4704 | */ |
4705 | |
4706 | /* |
4707 | ** Linux host data structure. |
4708 | */ |
4709 | |
4710 | struct host_data { |
4711 | struct ncb *ncb; |
4712 | }; |
4713 | |
4714 | /* |
4715 | ** Print something which allows to retrieve the controler type, unit, |
4716 | ** target, lun concerned by a kernel message. |
4717 | */ |
4718 | |
4719 | static void PRINT_TARGET(ncb_p np, int target) |
4720 | { |
4721 | printk(KERN_INFO"<6>" "%s-<%d,*>: ", ncr_name(np), target); |
4722 | } |
4723 | |
4724 | static void PRINT_LUN(ncb_p np, int target, int lun) |
4725 | { |
4726 | printk(KERN_INFO"<6>" "%s-<%d,%d>: ", ncr_name(np), target, lun); |
4727 | } |
4728 | |
4729 | static void PRINT_ADDR(Scsi_Cmnd *cmd) |
4730 | { |
4731 | struct host_data *host_data = (struct host_data *) cmd->host->hostdata; |
4732 | PRINT_LUN(host_data->ncb, cmd->target, cmd->lun); |
4733 | } |
4734 | |
4735 | /*========================================================== |
4736 | ** |
4737 | ** NCR chip clock divisor table. |
4738 | ** Divisors are multiplied by 10,000,000 in order to make |
4739 | ** calculations more simple. |
4740 | ** |
4741 | **========================================================== |
4742 | */ |
4743 | |
4744 | #define _5M5000000 5000000 |
4745 | static u_longunsigned long div_10M[] = |
4746 | {2*_5M5000000, 3*_5M5000000, 4*_5M5000000, 6*_5M5000000, 8*_5M5000000, 12*_5M5000000, 16*_5M5000000}; |
4747 | |
4748 | |
4749 | /*=============================================================== |
4750 | ** |
4751 | ** Prepare io register values used by ncr_init() according |
4752 | ** to selected and supported features. |
4753 | ** |
4754 | ** NCR/SYMBIOS chips allow burst lengths of 2, 4, 8, 16, 32, 64, |
4755 | ** 128 transfers. All chips support at least 16 transfers bursts. |
4756 | ** The 825A, 875 and 895 chips support bursts of up to 128 |
4757 | ** transfers and the 895A and 896 support bursts of up to 64 |
4758 | ** transfers. All other chips support up to 16 transfers bursts. |
4759 | ** |
4760 | ** For PCI 32 bit data transfers each transfer is a DWORD (4 bytes). |
4761 | ** It is a QUADWORD (8 bytes) for PCI 64 bit data transfers. |
4762 | ** Only the 896 is able to perform 64 bit data transfers. |
4763 | ** |
4764 | ** We use log base 2 (burst length) as internal code, with |
4765 | ** value 0 meaning "burst disabled". |
4766 | ** |
4767 | **=============================================================== |
4768 | */ |
4769 | |
4770 | /* |
4771 | * Burst length from burst code. |
4772 | */ |
4773 | #define burst_length(bc)(!(bc))? 0 : 1 << (bc) (!(bc))? 0 : 1 << (bc) |
4774 | |
4775 | /* |
4776 | * Burst code from io register bits. |
4777 | */ |
4778 | #define burst_code(dmode, ctest4, ctest5)(ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ( (ctest5) & 0x04) + 1 \ |
4779 | (ctest4) & 0x80? 0 : (((dmode) & 0xc0) >> 6) + ((ctest5) & 0x04) + 1 |
4780 | |
4781 | /* |
4782 | * Set initial io register bits from burst code. |
4783 | */ |
4784 | static inlineinline __attribute__((always_inline)) void ncr_init_burst(ncb_p np, u_charunsigned char bc) |
4785 | { |
4786 | np->rv_ctest4 &= ~0x80; |
4787 | np->rv_dmode &= ~(0x3 << 6); |
4788 | np->rv_ctest5 &= ~0x4; |
4789 | |
4790 | if (!bc) { |
4791 | np->rv_ctest4 |= 0x80; |
4792 | } |
4793 | else { |
4794 | --bc; |
4795 | np->rv_dmode |= ((bc & 0x3) << 6); |
4796 | np->rv_ctest5 |= (bc & 0x4); |
4797 | } |
4798 | } |
4799 | |
4800 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
4801 | |
4802 | /* |
4803 | ** Get target set-up from Symbios format NVRAM. |
4804 | */ |
4805 | |
4806 | static void __init |
4807 | ncr_Symbios_setup_target(ncb_p np, int target, Symbios_nvram *nvram) |
4808 | { |
4809 | tcb_p tp = &np->target[target]; |
4810 | Symbios_target *tn = &nvram->target[target]; |
4811 | |
4812 | tp->usrsync = tn->sync_period ? (tn->sync_period + 3) / 4 : 255; |
4813 | tp->usrwide = tn->bus_width == 0x10 ? 1 : 0; |
4814 | tp->usrtags = |
4815 | (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED(1<<3))? MAX_TAGS(8) : 0; |
4816 | |
4817 | if (!(tn->flags & SYMBIOS_DISCONNECT_ENABLE(1))) |
4818 | tp->usrflag |= UF_NODISC(0x02); |
4819 | if (!(tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME(1<<1))) |
4820 | tp->usrflag |= UF_NOSCAN(0x04); |
4821 | } |
4822 | |
4823 | /* |
4824 | ** Get target set-up from Tekram format NVRAM. |
4825 | */ |
4826 | |
4827 | static void __init |
4828 | ncr_Tekram_setup_target(ncb_p np, int target, Tekram_nvram *nvram) |
4829 | { |
4830 | tcb_p tp = &np->target[target]; |
4831 | struct Tekram_target *tn = &nvram->target[target]; |
4832 | int i; |
4833 | |
4834 | if (tn->flags & TEKRAM_SYNC_NEGO(1<<1)) { |
4835 | i = tn->sync_index & 0xf; |
4836 | tp->usrsync = Tekram_sync[i]; |
4837 | } |
4838 | |
4839 | tp->usrwide = (tn->flags & TEKRAM_WIDE_NEGO(1<<5)) ? 1 : 0; |
4840 | |
4841 | if (tn->flags & TEKRAM_TAGGED_COMMANDS(1<<4)) { |
4842 | tp->usrtags = 2 << nvram->max_tags_index; |
4843 | } |
4844 | |
4845 | if (!(tn->flags & TEKRAM_DISCONNECT_ENABLE(1<<2))) |
4846 | tp->usrflag = UF_NODISC(0x02); |
4847 | |
4848 | /* If any device does not support parity, we will not use this option */ |
4849 | if (!(tn->flags & TEKRAM_PARITY_CHECK(1))) |
4850 | np->rv_scntl0 &= ~0x0a; /* SCSI parity checking disabled */ |
4851 | } |
4852 | #endif /* SCSI_NCR_NVRAM_SUPPORT */ |
4853 | |
4854 | /* |
4855 | ** Save initial settings of some IO registers. |
4856 | ** Assumed to have been set by BIOS. |
4857 | */ |
4858 | static void __init ncr_save_initial_setting(ncb_p np) |
4859 | { |
4860 | np->sv_scntl0 = INB(nc_scntl0)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl0))))) & 0x0a; |
4861 | np->sv_dmode = INB(nc_dmode)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dmode))))) & 0xce; |
4862 | np->sv_dcntl = INB(nc_dcntl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl))))) & 0xa8; |
4863 | np->sv_ctest3 = INB(nc_ctest3)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest3))))) & 0x01; |
4864 | np->sv_ctest4 = INB(nc_ctest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest4))))) & 0x80; |
4865 | np->sv_gpcntl = INB(nc_gpcntl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpcntl))))); |
4866 | np->sv_stest2 = INB(nc_stest2)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest2))))) & 0x20; |
4867 | np->sv_stest4 = INB(nc_stest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest4))))); |
4868 | np->sv_stest1 = INB(nc_stest1)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest1))))); |
4869 | |
4870 | np->sv_scntl3 = INB(nc_scntl3)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl3))))) & 0x07; |
4871 | |
4872 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
4873 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21) ){ |
4874 | /* |
4875 | ** C1010 always uses large fifo, bit 5 rsvd |
4876 | ** scntl4 used ONLY with C1010 |
4877 | */ |
4878 | np->sv_ctest5 = INB(nc_ctest5)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest5))))) & 0x04 ; |
4879 | np->sv_scntl4 = INB(nc_scntl4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl4))))); |
4880 | } |
4881 | else { |
4882 | np->sv_ctest5 = INB(nc_ctest5)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest5))))) & 0x24 ; |
4883 | np->sv_scntl4 = 0; |
4884 | } |
4885 | } |
4886 | |
4887 | /* |
4888 | ** Prepare io register values used by ncr_init() |
4889 | ** according to selected and supported features. |
4890 | */ |
4891 | static int __init ncr_prepare_setting(ncb_p np, ncr_nvram *nvram) |
4892 | { |
4893 | u_charunsigned char burst_max; |
4894 | u_longunsigned long period; |
4895 | int i; |
4896 | |
4897 | /* |
4898 | ** Wide ? |
4899 | */ |
4900 | |
4901 | np->maxwide = (np->features & FE_WIDE(1<<1))? 1 : 0; |
4902 | |
4903 | /* |
4904 | ** Get the frequency of the chip's clock. |
4905 | ** Find the right value for scntl3. |
4906 | */ |
4907 | |
4908 | if (np->features & FE_QUAD(1<<5)) |
4909 | np->multiplier = 4; |
4910 | else if (np->features & FE_DBLR(1<<4)) |
4911 | np->multiplier = 2; |
4912 | else |
4913 | np->multiplier = 1; |
4914 | |
4915 | np->clock_khz = (np->features & FE_CLK80(1<<15))? 80000 : 40000; |
4916 | np->clock_khz *= np->multiplier; |
4917 | |
4918 | if (np->clock_khz != 40000) |
4919 | ncr_getclock(np, np->multiplier); |
4920 | |
4921 | /* |
4922 | * Divisor to be used for async (timer pre-scaler). |
4923 | * |
4924 | * Note: For C1010 the async divisor is 2(8) if he |
4925 | * quadrupler is disabled (enabled). |
4926 | */ |
4927 | |
4928 | if ( (np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
4929 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
4930 | |
4931 | np->rv_scntl3 = 0; |
4932 | } |
4933 | else |
4934 | { |
4935 | i = np->clock_divn - 1; |
4936 | while (--i >= 0) { |
4937 | if (10ul * SCSI_NCR_MIN_ASYNC(40) * np->clock_khz |
4938 | > div_10M[i]) { |
4939 | ++i; |
4940 | break; |
4941 | } |
4942 | } |
4943 | np->rv_scntl3 = i+1; |
4944 | } |
4945 | |
4946 | |
4947 | /* |
4948 | * Save the ultra3 register for the C1010/C1010_66 |
4949 | */ |
4950 | |
4951 | np->rv_scntl4 = np->sv_scntl4; |
4952 | |
4953 | /* |
4954 | * Minimum synchronous period factor supported by the chip. |
4955 | * Btw, 'period' is in tenths of nanoseconds. |
4956 | */ |
4957 | |
4958 | period = (4 * div_10M[0] + np->clock_khz - 1) / np->clock_khz; |
4959 | if (period <= 250) np->minsync = 10; |
4960 | else if (period <= 303) np->minsync = 11; |
4961 | else if (period <= 500) np->minsync = 12; |
4962 | else np->minsync = (period + 40 - 1) / 40; |
4963 | |
4964 | /* |
4965 | * Fix up. If sync. factor is 10 (160000Khz clock) and chip |
4966 | * supports ultra3, then min. sync. period 12.5ns and the factor is 9 |
4967 | */ |
4968 | |
4969 | if ((np->minsync == 10) && (np->features & FE_ULTRA3(1<<22))) |
4970 | np->minsync = 9; |
4971 | |
4972 | /* |
4973 | * Check against chip SCSI standard support (SCSI-2,ULTRA,ULTRA2). |
4974 | * |
4975 | * Transfer period minimums: SCSI-1 200 (50); Fast 100 (25) |
4976 | * Ultra 50 (12); Ultra2 (6); Ultra3 (3) |
4977 | */ |
4978 | |
4979 | if (np->minsync < 25 && !(np->features & (FE_ULTRA(1<<2)|FE_ULTRA2(1<<3)|FE_ULTRA3(1<<22)))) |
4980 | np->minsync = 25; |
4981 | else if (np->minsync < 12 && (np->features & FE_ULTRA(1<<2))) |
4982 | np->minsync = 12; |
4983 | else if (np->minsync < 10 && (np->features & FE_ULTRA2(1<<3))) |
4984 | np->minsync = 10; |
4985 | else if (np->minsync < 9 && (np->features & FE_ULTRA3(1<<22))) |
4986 | np->minsync = 9; |
4987 | |
4988 | /* |
4989 | * Maximum synchronous period factor supported by the chip. |
4990 | */ |
4991 | |
4992 | period = (11 * div_10M[np->clock_divn - 1]) / (4 * np->clock_khz); |
4993 | np->maxsync = period > 2540 ? 254 : period / 10; |
4994 | |
4995 | /* |
4996 | ** 64 bit (53C895A or 53C896) ? |
4997 | */ |
4998 | if (np->features & FE_64BIT(1<<17)) |
4999 | #ifdef SCSI_NCR_USE_64BIT_DAC |
5000 | np->rv_ccntl1 |= (XTIMOD0x04 | EXTIBMV0x02); |
5001 | #else |
5002 | np->rv_ccntl1 |= (DDAC0x08); |
5003 | #endif |
5004 | |
5005 | /* |
5006 | ** Phase mismatch handled by SCRIPTS (53C895A, 53C896 or C1010) ? |
5007 | */ |
5008 | if (np->features & FE_NOPM(1<<19)) |
5009 | np->rv_ccntl0 |= (ENPMJ0x80); |
5010 | |
5011 | /* |
5012 | ** Prepare initial value of other IO registers |
5013 | */ |
5014 | #if defined SCSI_NCR_TRUST_BIOS_SETTING |
5015 | np->rv_scntl0 = np->sv_scntl0; |
5016 | np->rv_dmode = np->sv_dmode; |
5017 | np->rv_dcntl = np->sv_dcntl; |
5018 | np->rv_ctest3 = np->sv_ctest3; |
5019 | np->rv_ctest4 = np->sv_ctest4; |
5020 | np->rv_ctest5 = np->sv_ctest5; |
5021 | burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5)(np->sv_ctest4) & 0x80? 0 : (((np->sv_dmode) & 0xc0 ) >> 6) + ((np->sv_ctest5) & 0x04) + 1; |
5022 | #else |
5023 | |
5024 | /* |
5025 | ** Select burst length (dwords) |
5026 | */ |
5027 | burst_max = driver_setup.burst_max; |
5028 | if (burst_max == 255) |
5029 | burst_max = burst_code(np->sv_dmode, np->sv_ctest4, np->sv_ctest5)(np->sv_ctest4) & 0x80? 0 : (((np->sv_dmode) & 0xc0 ) >> 6) + ((np->sv_ctest5) & 0x04) + 1; |
5030 | if (burst_max > 7) |
5031 | burst_max = 7; |
5032 | if (burst_max > np->maxburst) |
5033 | burst_max = np->maxburst; |
5034 | |
5035 | /* |
5036 | ** DEL 352 - 53C810 Rev x11 - Part Number 609-0392140 - ITEM 2. |
5037 | ** This chip and the 860 Rev 1 may wrongly use PCI cache line |
5038 | ** based transactions on LOAD/STORE instructions. So we have |
5039 | ** to prevent these chips from using such PCI transactions in |
5040 | ** this driver. The generic sym53c8xx driver that does not use |
5041 | ** LOAD/STORE instructions does not need this work-around. |
5042 | */ |
5043 | if ((np->device_id == PCI_DEVICE_ID_NCR_53C8100x0001 && |
5044 | np->revision_id >= 0x10 && np->revision_id <= 0x11) || |
5045 | (np->device_id == PCI_DEVICE_ID_NCR_53C8600x0006 && |
5046 | np->revision_id <= 0x1)) |
5047 | np->features &= ~(FE_WRIE(1<<8)|FE_ERL(1<<6)|FE_ERMP(1<<9)); |
5048 | |
5049 | /* |
5050 | ** DEL ? - 53C1010 Rev 1 - Part Number 609-0393638 |
5051 | ** 64-bit Slave Cycles must be disabled. |
5052 | */ |
5053 | if ( ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) && (np->revision_id < 0x02) ) |
5054 | || (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21 ) ) |
5055 | np->rv_ccntl1 |= 0x10; |
5056 | |
5057 | /* |
5058 | ** Select all supported special features. |
5059 | ** If we are using on-board RAM for scripts, prefetch (PFEN) |
5060 | ** does not help, but burst op fetch (BOF) does. |
5061 | ** Disabling PFEN makes sure BOF will be used. |
5062 | */ |
5063 | if (np->features & FE_ERL(1<<6)) |
5064 | np->rv_dmode |= ERL0x08; /* Enable Read Line */ |
5065 | if (np->features & FE_BOF(1<<10)) |
5066 | np->rv_dmode |= BOF0x02; /* Burst Opcode Fetch */ |
5067 | if (np->features & FE_ERMP(1<<9)) |
5068 | np->rv_dmode |= ERMP0x04; /* Enable Read Multiple */ |
5069 | #if 1 |
5070 | if ((np->features & FE_PFEN(1<<12)) && !np->base2_ba) |
5071 | #else |
5072 | if (np->features & FE_PFEN(1<<12)) |
5073 | #endif |
5074 | np->rv_dcntl |= PFEN0x20; /* Prefetch Enable */ |
5075 | if (np->features & FE_CLSE(1<<7)) |
5076 | np->rv_dcntl |= CLSE0x80; /* Cache Line Size Enable */ |
5077 | if (np->features & FE_WRIE(1<<8)) |
5078 | np->rv_ctest3 |= WRIE0x01; /* Write and Invalidate */ |
5079 | |
5080 | |
5081 | if ( (np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
5082 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21) && |
5083 | (np->features & FE_DFS(1<<11))) |
5084 | np->rv_ctest5 |= DFS0x20; /* Dma Fifo Size */ |
5085 | /* C1010/C1010_66 always large fifo */ |
5086 | |
5087 | /* |
5088 | ** Select some other |
5089 | */ |
5090 | if (driver_setup.master_parity) |
5091 | np->rv_ctest4 |= MPEE0x08; /* Master parity checking */ |
5092 | if (driver_setup.scsi_parity) |
5093 | np->rv_scntl0 |= 0x0a; /* full arb., ena parity, par->ATN */ |
5094 | |
5095 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
5096 | /* |
5097 | ** Get parity checking, host ID and verbose mode from NVRAM |
5098 | **/ |
5099 | if (nvram) { |
5100 | switch(nvram->type) { |
5101 | case SCSI_NCR_TEKRAM_NVRAM(2): |
5102 | np->myaddr = nvram->data.Tekram.host_id & 0x0f; |
5103 | break; |
5104 | case SCSI_NCR_SYMBIOS_NVRAM(1): |
5105 | if (!(nvram->data.Symbios.flags & SYMBIOS_PARITY_ENABLE(1<<1))) |
5106 | np->rv_scntl0 &= ~0x0a; |
5107 | np->myaddr = nvram->data.Symbios.host_id & 0x0f; |
5108 | if (nvram->data.Symbios.flags & SYMBIOS_VERBOSE_MSGS(1<<2)) |
5109 | np->verbose += 1; |
5110 | break; |
5111 | } |
5112 | } |
5113 | #endif |
5114 | /* |
5115 | ** Get SCSI addr of host adapter (set by bios?). |
5116 | */ |
5117 | if (np->myaddr == 255) { |
5118 | np->myaddr = INB(nc_scid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scid))))) & 0x07; |
5119 | if (!np->myaddr) |
5120 | np->myaddr = SCSI_NCR_MYADDR(7); |
5121 | } |
5122 | |
5123 | #endif /* SCSI_NCR_TRUST_BIOS_SETTING */ |
5124 | |
5125 | /* |
5126 | * Prepare initial io register bits for burst length |
5127 | */ |
5128 | ncr_init_burst(np, burst_max); |
5129 | |
5130 | /* |
5131 | ** Set SCSI BUS mode. |
5132 | ** |
5133 | ** - ULTRA2 chips (895/895A/896) |
5134 | ** and ULTRA 3 chips (1010) report the current |
5135 | ** BUS mode through the STEST4 IO register. |
5136 | ** - For previous generation chips (825/825A/875), |
5137 | ** user has to tell us how to check against HVD, |
5138 | ** since a 100% safe algorithm is not possible. |
5139 | */ |
5140 | np->scsi_mode = SMODE_SE0x80; |
5141 | if (np->features & (FE_ULTRA2(1<<3) | FE_ULTRA3(1<<22))) |
5142 | np->scsi_mode = (np->sv_stest4 & SMODE0xc0); |
5143 | else if (np->features & FE_DIFF(1<<21)) { |
5144 | switch(driver_setup.diff_support) { |
5145 | case 4: /* Trust previous settings if present, then GPIO3 */ |
5146 | if (np->sv_scntl3) { |
5147 | if (np->sv_stest2 & 0x20) |
5148 | np->scsi_mode = SMODE_HVD0x40; |
5149 | break; |
5150 | } |
5151 | case 3: /* SYMBIOS controllers report HVD through GPIO3 */ |
5152 | if (nvram && nvram->type != SCSI_NCR_SYMBIOS_NVRAM(1)) |
5153 | break; |
5154 | if (INB(nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))) & 0x08) |
5155 | break; |
5156 | case 2: /* Set HVD unconditionally */ |
5157 | np->scsi_mode = SMODE_HVD0x40; |
5158 | case 1: /* Trust previous settings for HVD */ |
5159 | if (np->sv_stest2 & 0x20) |
5160 | np->scsi_mode = SMODE_HVD0x40; |
5161 | break; |
5162 | default:/* Don't care about HVD */ |
5163 | break; |
5164 | } |
5165 | } |
5166 | if (np->scsi_mode == SMODE_HVD0x40) |
5167 | np->rv_stest2 |= 0x20; |
5168 | |
5169 | /* |
5170 | ** Set LED support from SCRIPTS. |
5171 | ** Ignore this feature for boards known to use a |
5172 | ** specific GPIO wiring and for the 895A or 896 |
5173 | ** that drive the LED directly. |
5174 | ** Also probe initial setting of GPIO0 as output. |
5175 | */ |
5176 | if ((driver_setup.led_pin || |
5177 | (nvram && nvram->type == SCSI_NCR_SYMBIOS_NVRAM(1))) && |
5178 | !(np->features & FE_LEDC(1<<20)) && !(np->sv_gpcntl & 0x01)) |
5179 | np->features |= FE_LED0(1<<0); |
5180 | |
5181 | /* |
5182 | ** Set irq mode. |
5183 | */ |
5184 | switch(driver_setup.irqm & 3) { |
5185 | case 2: |
5186 | np->rv_dcntl |= IRQM0x08; |
5187 | break; |
5188 | case 1: |
5189 | np->rv_dcntl |= (np->sv_dcntl & IRQM0x08); |
5190 | break; |
5191 | default: |
5192 | break; |
5193 | } |
5194 | |
5195 | /* |
5196 | ** Configure targets according to driver setup. |
5197 | ** If NVRAM present get targets setup from NVRAM. |
5198 | ** Allow to override sync, wide and NOSCAN from |
5199 | ** boot command line. |
5200 | */ |
5201 | for (i = 0 ; i < MAX_TARGET((16)) ; i++) { |
5202 | tcb_p tp = &np->target[i]; |
5203 | |
5204 | tp->usrsync = 255; |
5205 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
5206 | if (nvram) { |
5207 | switch(nvram->type) { |
5208 | case SCSI_NCR_TEKRAM_NVRAM(2): |
5209 | ncr_Tekram_setup_target(np, i, &nvram->data.Tekram); |
5210 | break; |
5211 | case SCSI_NCR_SYMBIOS_NVRAM(1): |
5212 | ncr_Symbios_setup_target(np, i, &nvram->data.Symbios); |
5213 | break; |
5214 | } |
5215 | if (driver_setup.use_nvram & 0x2) |
5216 | tp->usrsync = driver_setup.default_sync; |
5217 | if (driver_setup.use_nvram & 0x4) |
5218 | tp->usrwide = driver_setup.max_wide; |
5219 | if (driver_setup.use_nvram & 0x8) |
5220 | tp->usrflag &= ~UF_NOSCAN(0x04); |
5221 | } |
5222 | else { |
5223 | #else |
5224 | if (1) { |
5225 | #endif |
5226 | tp->usrsync = driver_setup.default_sync; |
5227 | tp->usrwide = driver_setup.max_wide; |
5228 | tp->usrtags = MAX_TAGS(8); |
5229 | if (!driver_setup.disconnection) |
5230 | np->target[i].usrflag = UF_NODISC(0x02); |
5231 | } |
5232 | } |
5233 | |
5234 | /* |
5235 | ** Announce all that stuff to user. |
5236 | */ |
5237 | |
5238 | i = nvram ? nvram->type : 0; |
5239 | printk(KERN_INFO"<6>" "%s: %sID %d, Fast-%d%s%s\n", ncr_name(np), |
5240 | i == SCSI_NCR_SYMBIOS_NVRAM(1) ? "Symbios format NVRAM, " : |
5241 | (i == SCSI_NCR_TEKRAM_NVRAM(2) ? "Tekram format NVRAM, " : ""), |
5242 | np->myaddr, |
5243 | np->minsync < 10 ? 80 : |
5244 | (np->minsync < 12 ? 40 : (np->minsync < 25 ? 20 : 10) ), |
5245 | (np->rv_scntl0 & 0xa) ? ", Parity Checking" : ", NO Parity", |
5246 | (np->rv_stest2 & 0x20) ? ", Differential" : ""); |
5247 | |
5248 | if (bootverbose(np->verbose) > 1) { |
5249 | printk (KERN_INFO"<6>" "%s: initial SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " |
5250 | "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", |
5251 | ncr_name(np), np->sv_scntl3, np->sv_dmode, np->sv_dcntl, |
5252 | np->sv_ctest3, np->sv_ctest4, np->sv_ctest5); |
5253 | |
5254 | printk (KERN_INFO"<6>" "%s: final SCNTL3/DMODE/DCNTL/CTEST3/4/5 = " |
5255 | "(hex) %02x/%02x/%02x/%02x/%02x/%02x\n", |
5256 | ncr_name(np), np->rv_scntl3, np->rv_dmode, np->rv_dcntl, |
5257 | np->rv_ctest3, np->rv_ctest4, np->rv_ctest5); |
5258 | } |
5259 | |
5260 | if (bootverbose(np->verbose) && np->base2_ba) |
5261 | printk (KERN_INFO"<6>" "%s: on-chip RAM at 0x%lx\n", |
5262 | ncr_name(np), np->base2_ba); |
5263 | |
5264 | return 0; |
5265 | } |
5266 | |
5267 | |
5268 | #ifdef SCSI_NCR_DEBUG_NVRAM |
5269 | |
5270 | void __init ncr_display_Symbios_nvram(ncb_p np, Symbios_nvram *nvram) |
5271 | { |
5272 | int i; |
5273 | |
5274 | /* display Symbios nvram host data */ |
5275 | printk(KERN_DEBUG"<7>" "%s: HOST ID=%d%s%s%s%s%s\n", |
5276 | ncr_name(np), nvram->host_id & 0x0f, |
5277 | (nvram->flags & SYMBIOS_SCAM_ENABLE(1)) ? " SCAM" :"", |
5278 | (nvram->flags & SYMBIOS_PARITY_ENABLE(1<<1)) ? " PARITY" :"", |
5279 | (nvram->flags & SYMBIOS_VERBOSE_MSGS(1<<2)) ? " VERBOSE" :"", |
5280 | (nvram->flags & SYMBIOS_CHS_MAPPING(1<<3)) ? " CHS_ALT" :"", |
5281 | (nvram->flags1 & SYMBIOS_SCAN_HI_LO(1)) ? " HI_LO" :""); |
5282 | |
5283 | /* display Symbios nvram drive data */ |
5284 | for (i = 0 ; i < 15 ; i++) { |
5285 | struct Symbios_target *tn = &nvram->target[i]; |
5286 | printk(KERN_DEBUG"<7>" "%s-%d:%s%s%s%s WIDTH=%d SYNC=%d TMO=%d\n", |
5287 | ncr_name(np), i, |
5288 | (tn->flags & SYMBIOS_DISCONNECT_ENABLE(1)) ? " DISC" : "", |
5289 | (tn->flags & SYMBIOS_SCAN_AT_BOOT_TIME(1<<1)) ? " SCAN_BOOT" : "", |
5290 | (tn->flags & SYMBIOS_SCAN_LUNS(1<<2)) ? " SCAN_LUNS" : "", |
5291 | (tn->flags & SYMBIOS_QUEUE_TAGS_ENABLED(1<<3))? " TCQ" : "", |
5292 | tn->bus_width, |
5293 | tn->sync_period / 4, |
5294 | tn->timeout); |
5295 | } |
5296 | } |
5297 | |
5298 | static u_charunsigned char Tekram_boot_delay[7] __initdata = {3, 5, 10, 20, 30, 60, 120}; |
5299 | |
5300 | void __init ncr_display_Tekram_nvram(ncb_p np, Tekram_nvram *nvram) |
5301 | { |
5302 | int i, tags, boot_delay; |
5303 | char *rem; |
5304 | |
5305 | /* display Tekram nvram host data */ |
5306 | tags = 2 << nvram->max_tags_index; |
5307 | boot_delay = 0; |
5308 | if (nvram->boot_delay_index < 6) |
5309 | boot_delay = Tekram_boot_delay[nvram->boot_delay_index]; |
5310 | switch((nvram->flags & TEKRAM_REMOVABLE_FLAGS(3<<6)) >> 6) { |
5311 | default: |
5312 | case 0: rem = ""; break; |
5313 | case 1: rem = " REMOVABLE=boot device"; break; |
5314 | case 2: rem = " REMOVABLE=all"; break; |
5315 | } |
5316 | |
5317 | printk(KERN_DEBUG"<7>" |
5318 | "%s: HOST ID=%d%s%s%s%s%s%s%s%s%s BOOT DELAY=%d tags=%d\n", |
5319 | ncr_name(np), nvram->host_id & 0x0f, |
5320 | (nvram->flags1 & SYMBIOS_SCAM_ENABLE(1)) ? " SCAM" :"", |
5321 | (nvram->flags & TEKRAM_MORE_THAN_2_DRIVES(1)) ? " >2DRIVES" :"", |
5322 | (nvram->flags & TEKRAM_DRIVES_SUP_1GB(1<<1)) ? " >1GB" :"", |
5323 | (nvram->flags & TEKRAM_RESET_ON_POWER_ON(1<<2)) ? " RESET" :"", |
5324 | (nvram->flags & TEKRAM_ACTIVE_NEGATION(1<<3)) ? " ACT_NEG" :"", |
5325 | (nvram->flags & TEKRAM_IMMEDIATE_SEEK(1<<4)) ? " IMM_SEEK" :"", |
5326 | (nvram->flags & TEKRAM_SCAN_LUNS(1<<5)) ? " SCAN_LUNS" :"", |
5327 | (nvram->flags1 & TEKRAM_F2_F6_ENABLED(1)) ? " F2_F6" :"", |
5328 | rem, boot_delay, tags); |
5329 | |
5330 | /* display Tekram nvram drive data */ |
5331 | for (i = 0; i <= 15; i++) { |
5332 | int sync, j; |
5333 | struct Tekram_target *tn = &nvram->target[i]; |
5334 | j = tn->sync_index & 0xf; |
5335 | sync = Tekram_sync[j]; |
5336 | printk(KERN_DEBUG"<7>" "%s-%d:%s%s%s%s%s%s PERIOD=%d\n", |
5337 | ncr_name(np), i, |
5338 | (tn->flags & TEKRAM_PARITY_CHECK(1)) ? " PARITY" : "", |
5339 | (tn->flags & TEKRAM_SYNC_NEGO(1<<1)) ? " SYNC" : "", |
5340 | (tn->flags & TEKRAM_DISCONNECT_ENABLE(1<<2)) ? " DISC" : "", |
5341 | (tn->flags & TEKRAM_START_CMD(1<<3)) ? " START" : "", |
5342 | (tn->flags & TEKRAM_TAGGED_COMMANDS(1<<4)) ? " TCQ" : "", |
5343 | (tn->flags & TEKRAM_WIDE_NEGO(1<<5)) ? " WIDE" : "", |
5344 | sync); |
5345 | } |
5346 | } |
5347 | #endif /* SCSI_NCR_DEBUG_NVRAM */ |
5348 | |
5349 | /* |
5350 | ** Host attach and initialisations. |
5351 | ** |
5352 | ** Allocate host data and ncb structure. |
5353 | ** Request IO region and remap MMIO region. |
5354 | ** Do chip initialization. |
5355 | ** If all is OK, install interrupt handling and |
5356 | ** start the timer daemon. |
5357 | */ |
5358 | |
5359 | static int __init |
5360 | ncr_attach (Scsi_Host_Template *tpnt, int unit, ncr_device *device) |
5361 | { |
5362 | struct host_data *host_data; |
5363 | ncb_p np = 0; |
5364 | struct Scsi_Host *instance = 0; |
5365 | u_longunsigned long flags = 0; |
5366 | ncr_nvram *nvram = device->nvram; |
5367 | int i; |
5368 | |
5369 | printk(KERN_INFO"<6>" NAME53C"sym53c" "%s-%d: rev 0x%x on pci bus %d device %d function %d " |
5370 | #ifdef __sparc__ |
5371 | "irq %s\n", |
5372 | #else |
5373 | "irq %d\n", |
5374 | #endif |
5375 | device->chip.name, unit, device->chip.revision_id, |
5376 | device->slot.bus, (device->slot.device_fn & 0xf8) >> 3, |
5377 | device->slot.device_fn & 7, |
5378 | #ifdef __sparc__ |
5379 | __irq_itoa(device->slot.irq)); |
5380 | #else |
5381 | device->slot.irq); |
5382 | #endif |
5383 | |
5384 | /* |
5385 | ** Allocate host_data structure |
5386 | */ |
5387 | if (!(instance = scsi_register(tpnt, sizeof(*host_data)))) |
5388 | goto attach_error; |
5389 | host_data = (struct host_data *) instance->hostdata; |
5390 | |
5391 | /* |
5392 | ** Allocate the host control block. |
5393 | */ |
5394 | np = __m_calloc_dma(device->pdev, sizeof(struct ncb), "NCB")m_calloc(sizeof(struct ncb), "NCB"); |
5395 | if (!np) |
5396 | goto attach_error; |
5397 | NCR_INIT_LOCK_NCB(np)do { } while (0); |
5398 | np->pdev = device->pdev; |
5399 | np->p_ncb = vtobus(np)virt_to_phys(np); |
5400 | host_data->ncb = np; |
5401 | |
5402 | /* |
5403 | ** Store input informations in the host data structure. |
5404 | */ |
5405 | strncpy(np->chip_name, device->chip.name, sizeof(np->chip_name) - 1); |
5406 | np->unit = unit; |
5407 | np->verbose = driver_setup.verbose; |
5408 | sprintflinux_sprintf(np->inst_name, NAME53C"sym53c" "%s-%d", np->chip_name, np->unit); |
5409 | np->device_id = device->chip.device_id; |
5410 | np->revision_id = device->chip.revision_id; |
5411 | np->bus = device->slot.bus; |
5412 | np->device_fn = device->slot.device_fn; |
5413 | np->features = device->chip.features; |
5414 | np->clock_divn = device->chip.nr_divisor; |
5415 | np->maxoffs = device->chip.offset_max; |
5416 | np->maxburst = device->chip.burst_max; |
5417 | np->myaddr = device->host_id; |
5418 | |
5419 | /* |
5420 | ** Allocate the start queue. |
5421 | */ |
5422 | np->squeue = (ncrcmd *) |
5423 | m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "SQUEUE")m_calloc(sizeof(ncrcmd)*(((8*(8) + 2*(16)) + 4)*2), "SQUEUE"); |
5424 | if (!np->squeue) |
5425 | goto attach_error; |
5426 | np->p_squeue = vtobus(np->squeue)virt_to_phys(np->squeue); |
5427 | |
5428 | /* |
5429 | ** Allocate the done queue. |
5430 | */ |
5431 | np->dqueue = (ncrcmd *) |
5432 | m_calloc_dma(sizeof(ncrcmd)*(MAX_START*2), "DQUEUE")m_calloc(sizeof(ncrcmd)*(((8*(8) + 2*(16)) + 4)*2), "DQUEUE"); |
5433 | if (!np->dqueue) |
5434 | goto attach_error; |
5435 | |
5436 | /* |
5437 | ** Allocate the target bus address array. |
5438 | */ |
5439 | np->targtbl = (u_int32 *) m_calloc_dma(256, "TARGTBL")m_calloc(256, "TARGTBL"); |
5440 | if (!np->targtbl) |
5441 | goto attach_error; |
5442 | |
5443 | /* |
5444 | ** Allocate SCRIPTS areas |
5445 | */ |
5446 | np->script0 = (struct script *) |
5447 | m_calloc_dma(sizeof(struct script), "SCRIPT")m_calloc(sizeof(struct script), "SCRIPT"); |
5448 | if (!np->script0) |
5449 | goto attach_error; |
5450 | np->scripth0 = (struct scripth *) |
5451 | m_calloc_dma(sizeof(struct scripth), "SCRIPTH")m_calloc(sizeof(struct scripth), "SCRIPTH"); |
5452 | if (!np->scripth0) |
5453 | goto attach_error; |
5454 | |
5455 | /* |
5456 | ** Initialyze the CCB free queue and, |
5457 | ** allocate some CCB. We need at least ONE. |
5458 | */ |
5459 | xpt_que_init(&np->free_ccbq)do { (&np->free_ccbq)->flink = (&np->free_ccbq ); (&np->free_ccbq)->blink = (&np->free_ccbq ); } while (0); |
5460 | xpt_que_init(&np->b0_ccbq)do { (&np->b0_ccbq)->flink = (&np->b0_ccbq); (&np->b0_ccbq)->blink = (&np->b0_ccbq); } while (0); |
5461 | if (!ncr_alloc_ccb(np)) |
5462 | goto attach_error; |
5463 | |
5464 | /* |
5465 | ** Initialize timer structure |
5466 | ** |
5467 | */ |
5468 | init_timer(&np->timer); |
5469 | np->timer.data = (unsigned long) np; |
5470 | np->timer.function = sym53c8xx_timeout; |
5471 | |
5472 | /* |
5473 | ** Try to map the controller chip to |
5474 | ** virtual and physical memory. |
5475 | */ |
5476 | |
5477 | np->base_ba = device->slot.base; |
5478 | np->base_ws = (np->features & FE_IO256(1<<18))? 256 : 128; |
5479 | np->base2_ba = (np->features & FE_RAM(1<<14))? device->slot.base_2 : 0; |
5480 | |
5481 | #ifndef SCSI_NCR_IOMAPPED |
5482 | np->base_va = remap_pci_mem(np->base_ba, np->base_ws); |
5483 | if (!np->base_va) { |
5484 | printk(KERN_ERR"<3>" "%s: can't map PCI MMIO region\n",ncr_name(np)); |
5485 | goto attach_error; |
5486 | } |
5487 | else if (bootverbose(np->verbose) > 1) |
5488 | printk(KERN_INFO"<6>" "%s: using memory mapped IO\n", ncr_name(np)); |
5489 | |
5490 | /* |
5491 | ** Make the controller's registers available. |
5492 | ** Now the INB INW INL OUTB OUTW OUTL macros |
5493 | ** can be used safely. |
5494 | */ |
5495 | |
5496 | np->reg = (struct ncr_reg *) np->base_va; |
5497 | |
5498 | #endif /* !defined SCSI_NCR_IOMAPPED */ |
5499 | |
5500 | /* |
5501 | ** If on-chip RAM is used, make sure SCRIPTS isn't too large. |
5502 | */ |
5503 | if (np->base2_ba && sizeof(struct script) > 4096) { |
5504 | printk(KERN_ERR"<3>" "%s: script too large.\n", ncr_name(np)); |
5505 | goto attach_error; |
5506 | } |
5507 | |
5508 | /* |
5509 | ** Try to map the controller chip into iospace. |
5510 | */ |
5511 | |
5512 | if (device->slot.io_port) { |
5513 | request_region(device->slot.io_port, np->base_ws, NAME53C8XX"sym53c8xx"); |
5514 | np->base_io = device->slot.io_port; |
5515 | } |
5516 | |
5517 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
5518 | if (nvram) { |
5519 | switch(nvram->type) { |
5520 | case SCSI_NCR_SYMBIOS_NVRAM(1): |
5521 | #ifdef SCSI_NCR_DEBUG_NVRAM |
5522 | ncr_display_Symbios_nvram(np, &nvram->data.Symbios); |
5523 | #endif |
5524 | break; |
5525 | case SCSI_NCR_TEKRAM_NVRAM(2): |
5526 | #ifdef SCSI_NCR_DEBUG_NVRAM |
5527 | ncr_display_Tekram_nvram(np, &nvram->data.Tekram); |
5528 | #endif |
5529 | break; |
5530 | default: |
5531 | nvram = 0; |
5532 | #ifdef SCSI_NCR_DEBUG_NVRAM |
5533 | printk(KERN_DEBUG"<7>" "%s: NVRAM: None or invalid data.\n", ncr_name(np)); |
5534 | #endif |
5535 | } |
5536 | } |
5537 | #endif |
5538 | |
5539 | /* |
5540 | ** Save setting of some IO registers, so we will |
5541 | ** be able to probe specific implementations. |
5542 | */ |
5543 | ncr_save_initial_setting (np); |
5544 | |
5545 | /* |
5546 | ** Reset the chip now, since it has been reported |
5547 | ** that SCSI clock calibration may not work properly |
5548 | ** if the chip is currently active. |
5549 | */ |
5550 | ncr_chip_reset (np); |
5551 | |
5552 | /* |
5553 | ** Do chip dependent initialization. |
5554 | */ |
5555 | (void) ncr_prepare_setting(np, nvram); |
5556 | |
5557 | /* |
5558 | ** Check the PCI clock frequency if needed. |
5559 | ** |
5560 | ** Must be done after ncr_prepare_setting since it destroys |
5561 | ** STEST1 that is used to probe for the clock multiplier. |
5562 | ** |
5563 | ** The range is currently [22688 - 45375 Khz], given |
5564 | ** the values used by ncr_getclock(). |
5565 | ** This calibration of the frequecy measurement |
5566 | ** algorithm against the PCI clock frequency is only |
5567 | ** performed if the driver has had to measure the SCSI |
5568 | ** clock due to other heuristics not having been enough |
5569 | ** to deduce the SCSI clock frequency. |
5570 | ** |
5571 | ** When the chip has been initialized correctly by the |
5572 | ** SCSI BIOS, the driver deduces the presence of the |
5573 | ** clock multiplier and the value of the SCSI clock from |
5574 | ** initial values of IO registers, and therefore no |
5575 | ** clock measurement is performed. |
5576 | ** Normally the driver should never have to measure any |
5577 | ** clock, unless the controller may use a 80 MHz clock |
5578 | ** or has a clock multiplier and any of the following |
5579 | ** condition is met: |
5580 | ** |
5581 | ** - No SCSI BIOS is present. |
5582 | ** - SCSI BIOS did'nt enable the multiplier for some reason. |
5583 | ** - User has disabled the controller from the SCSI BIOS. |
5584 | ** - User booted the O/S from another O/S that did'nt enable |
5585 | ** the multiplier for some reason. |
5586 | ** |
5587 | ** As a result, the driver may only have to measure some |
5588 | ** frequency in very unusual situations. |
5589 | ** |
5590 | ** For this reality test against the PCI clock to really |
5591 | ** protect against flaws in the udelay() calibration or |
5592 | ** driver problem that affect the clock measurement |
5593 | ** algorithm, the actual PCI clock frequency must be 33 MHz. |
5594 | */ |
5595 | i = np->pciclock_max ? ncr_getpciclock(np) : 0; |
5596 | if (i && (i < np->pciclock_min || i > np->pciclock_max)) { |
5597 | printk(KERN_ERR"<3>" "%s: PCI clock (%u KHz) is out of range " |
5598 | "[%u KHz - %u KHz].\n", |
5599 | ncr_name(np), i, np->pciclock_min, np->pciclock_max); |
5600 | goto attach_error; |
5601 | } |
5602 | |
5603 | /* |
5604 | ** Patch script to physical addresses |
5605 | */ |
5606 | ncr_script_fill (&script0, &scripth0); |
5607 | |
5608 | np->p_script = vtobus(np->script0)virt_to_phys(np->script0); |
5609 | np->p_scripth = vtobus(np->scripth0)virt_to_phys(np->scripth0); |
5610 | np->p_scripth0 = np->p_scripth; |
5611 | |
5612 | if (np->base2_ba) { |
5613 | np->p_script = pcivtobus(np->base2_ba)(np->base2_ba); |
5614 | if (np->features & FE_RAM8K(1<<16)) { |
5615 | np->base2_ws = 8192; |
5616 | np->p_scripth = np->p_script + 4096; |
5617 | #if BITS_PER_LONG32 > 32 |
5618 | np->scr_ram_seg = cpu_to_scr(np->base2_ba >> 32)(np->base2_ba >> 32); |
5619 | #endif |
5620 | } |
5621 | else |
5622 | np->base2_ws = 4096; |
5623 | #ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
5624 | np->base2_va = remap_pci_mem(np->base2_ba, np->base2_ws); |
5625 | if (!np->base2_va) { |
5626 | printk(KERN_ERR"<3>" "%s: can't map PCI MEMORY region\n", |
5627 | ncr_name(np)); |
5628 | goto attach_error; |
5629 | } |
5630 | #endif |
5631 | } |
5632 | |
5633 | ncr_script_copy_and_bind (np, (ncrcmd *) &script0, (ncrcmd *) np->script0, sizeof(struct script)); |
5634 | ncr_script_copy_and_bind (np, (ncrcmd *) &scripth0, (ncrcmd *) np->scripth0, sizeof(struct scripth)); |
5635 | |
5636 | /* |
5637 | ** Patch some variables in SCRIPTS |
5638 | */ |
5639 | np->scripth0->pm0_data_addr[0] = |
5640 | cpu_to_scr(NCB_SCRIPT_PHYS(np, pm0_data))((np->p_script + ((size_t) (&((struct script *)0)-> pm0_data)))); |
5641 | np->scripth0->pm1_data_addr[0] = |
5642 | cpu_to_scr(NCB_SCRIPT_PHYS(np, pm1_data))((np->p_script + ((size_t) (&((struct script *)0)-> pm1_data)))); |
5643 | |
5644 | /* |
5645 | ** Patch if not Ultra 3 - Do not write to scntl4 |
5646 | */ |
5647 | if (np->features & FE_ULTRA3(1<<22)) { |
5648 | np->script0->resel_scntl4[0] = cpu_to_scr(SCR_LOAD_REL (scntl4, 1))((0xe1000000 | 0x02000000|0x10000000 | (((((((size_t) (&( (struct ncr_reg *)0)->nc_scntl4)))) & 0xff) << 16ul )) | (1))); |
5649 | np->script0->resel_scntl4[1] = cpu_to_scr(offsetof(struct tcb, uval))(((size_t) (&((struct tcb *)0)->uval))); |
5650 | } |
5651 | |
5652 | |
5653 | #ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
5654 | np->scripth0->script0_ba[0] = cpu_to_scr(vtobus(np->script0))(virt_to_phys(np->script0)); |
5655 | np->scripth0->script0_ba64[0] = cpu_to_scr(vtobus(np->script0))(virt_to_phys(np->script0)); |
5656 | np->scripth0->scripth0_ba64[0] = cpu_to_scr(vtobus(np->scripth0))(virt_to_phys(np->scripth0)); |
5657 | np->scripth0->ram_seg64[0] = np->scr_ram_seg; |
5658 | #endif |
5659 | /* |
5660 | ** Prepare the idle and invalid task actions. |
5661 | */ |
5662 | np->idletask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle))((np->p_script + ((size_t) (&((struct script *)0)-> idle)))); |
5663 | np->idletask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> bad_i_t_l)))); |
5664 | np->p_idletask = NCB_PHYS(np, idletask)(np->p_ncb + ((size_t) (&((struct ncb *)0)->idletask ))); |
5665 | |
5666 | np->notask.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle))((np->p_script + ((size_t) (&((struct script *)0)-> idle)))); |
5667 | np->notask.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> bad_i_t_l)))); |
5668 | np->p_notask = NCB_PHYS(np, notask)(np->p_ncb + ((size_t) (&((struct ncb *)0)->notask) )); |
5669 | |
5670 | np->bad_i_t_l.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle))((np->p_script + ((size_t) (&((struct script *)0)-> idle)))); |
5671 | np->bad_i_t_l.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np, bad_i_t_l))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> bad_i_t_l)))); |
5672 | np->p_bad_i_t_l = NCB_PHYS(np, bad_i_t_l)(np->p_ncb + ((size_t) (&((struct ncb *)0)->bad_i_t_l ))); |
5673 | |
5674 | np->bad_i_t_l_q.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle))((np->p_script + ((size_t) (&((struct script *)0)-> idle)))); |
5675 | np->bad_i_t_l_q.restart = cpu_to_scr(NCB_SCRIPTH_PHYS (np,bad_i_t_l_q))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> bad_i_t_l_q)))); |
5676 | np->p_bad_i_t_l_q = NCB_PHYS(np, bad_i_t_l_q)(np->p_ncb + ((size_t) (&((struct ncb *)0)->bad_i_t_l_q ))); |
5677 | |
5678 | /* |
5679 | ** Allocate and prepare the bad lun table. |
5680 | */ |
5681 | np->badluntbl = m_calloc_dma(256, "BADLUNTBL")m_calloc(256, "BADLUNTBL"); |
5682 | if (!np->badluntbl) |
5683 | goto attach_error; |
5684 | |
5685 | assert (offsetof(struct lcb, resel_task) == 0){ if (!(((size_t) (&((struct lcb *)0)->resel_task)) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "offsetof(struct lcb, resel_task) == 0", "../linux/src/drivers/scsi/sym53c8xx.c" , 5685); } }; |
5686 | np->resel_badlun = cpu_to_scr(NCB_SCRIPTH_PHYS(np, resel_bad_lun))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> resel_bad_lun)))); |
5687 | |
5688 | for (i = 0 ; i < 64 ; i++) |
5689 | np->badluntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun))((np->p_ncb + ((size_t) (&((struct ncb *)0)->resel_badlun )))); |
5690 | |
5691 | /* |
5692 | ** Prepare the target bus address array. |
5693 | */ |
5694 | np->scripth0->targtbl[0] = cpu_to_scr(vtobus(np->targtbl))(virt_to_phys(np->targtbl)); |
5695 | for (i = 0 ; i < MAX_TARGET((16)) ; i++) { |
5696 | np->targtbl[i] = cpu_to_scr(NCB_PHYS(np, target[i]))((np->p_ncb + ((size_t) (&((struct ncb *)0)->target [i])))); |
5697 | np->target[i].b_luntbl = cpu_to_scr(vtobus(np->badluntbl))(virt_to_phys(np->badluntbl)); |
5698 | np->target[i].b_lun0 = cpu_to_scr(NCB_PHYS(np, resel_badlun))((np->p_ncb + ((size_t) (&((struct ncb *)0)->resel_badlun )))); |
5699 | } |
5700 | |
5701 | /* |
5702 | ** Patch the script for LED support. |
5703 | */ |
5704 | |
5705 | if (np->features & FE_LED0(1<<0)) { |
5706 | np->script0->idle[0] = |
5707 | cpu_to_scr(SCR_REG_REG(gpreg, SCR_OR, 0x01))((0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_gpreg)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_gpreg)))) & 0x80))) | (0x02000000 ) | (((0x01)&0xff)<<8ul))); |
5708 | np->script0->reselected[0] = |
5709 | cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe))((0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_gpreg)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_gpreg)))) & 0x80))) | (0x04000000 ) | (((0xfe)&0xff)<<8ul))); |
5710 | np->script0->start[0] = |
5711 | cpu_to_scr(SCR_REG_REG(gpreg, SCR_AND, 0xfe))((0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_gpreg)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_gpreg)))) & 0x80))) | (0x04000000 ) | (((0xfe)&0xff)<<8ul))); |
5712 | } |
5713 | |
5714 | /* |
5715 | ** Patch the script to provide an extra clock cycle on |
5716 | ** data out phase - 53C1010_66MHz part only. |
5717 | */ |
5718 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21){ |
5719 | np->script0->datao_phase[0] = |
5720 | cpu_to_scr(SCR_REG_REG(scntl4, SCR_OR, 0x0c))((0x78000000 | ((((((((size_t) (&((struct ncr_reg *)0)-> nc_scntl4)))) & 0x7f) << 16ul) + (((((size_t) (& ((struct ncr_reg *)0)->nc_scntl4)))) & 0x80))) | (0x02000000 ) | (((0x0c)&0xff)<<8ul))); |
5721 | } |
5722 | |
5723 | #ifdef SCSI_NCR_IARB_SUPPORT |
5724 | /* |
5725 | ** If user does not want to use IMMEDIATE ARBITRATION |
5726 | ** when we are reselected while attempting to arbitrate, |
5727 | ** patch the SCRIPTS accordingly with a SCRIPT NO_OP. |
5728 | */ |
5729 | if (!(driver_setup.iarb & 1)) |
5730 | np->script0->ungetjob[0] = cpu_to_scr(SCR_NO_OP)(0x80000000); |
5731 | /* |
5732 | ** If user wants IARB to be set when we win arbitration |
5733 | ** and have other jobs, compute the max number of consecutive |
5734 | ** settings of IARB hint before we leave devices a chance to |
5735 | ** arbitrate for reselection. |
5736 | */ |
5737 | np->iarb_max = (driver_setup.iarb >> 4); |
5738 | #endif |
5739 | |
5740 | /* |
5741 | ** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5. |
5742 | */ |
5743 | if (np->device_id == PCI_DEVICE_ID_NCR_53C8960x000b && |
5744 | np->revision_id <= 0x1 && (np->features & FE_NOPM(1<<19))) { |
5745 | np->scatter = ncr_scatter_896R1; |
5746 | np->script0->datai_phase[0] = cpu_to_scr(SCR_JUMP)(0x80080000); |
5747 | np->script0->datai_phase[1] = |
5748 | cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> tweak_pmj)))); |
5749 | np->script0->datao_phase[0] = cpu_to_scr(SCR_JUMP)(0x80080000); |
5750 | np->script0->datao_phase[1] = |
5751 | cpu_to_scr(NCB_SCRIPTH_PHYS (np, tweak_pmj))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> tweak_pmj)))); |
5752 | } |
5753 | else |
5754 | #ifdef DEBUG_896R1 |
5755 | np->scatter = ncr_scatter_896R1; |
5756 | #else |
5757 | np->scatter = ncr_scatter; |
5758 | #endif |
5759 | |
5760 | /* |
5761 | ** Reset chip. |
5762 | ** We should use ncr_soft_reset(), but we donnot want to do |
5763 | ** so, since we may not be safe if ABRT interrupt occurs due |
5764 | ** to the BIOS or previous O/S having enable this interrupt. |
5765 | ** |
5766 | ** For C1010 need to set ABRT bit prior to SRST if SCRIPTs |
5767 | ** are running. Not true in this case. |
5768 | */ |
5769 | ncr_chip_reset(np); |
5770 | |
5771 | /* |
5772 | ** Now check the cache handling of the pci chipset. |
5773 | */ |
5774 | |
5775 | if (ncr_snooptest (np)) { |
5776 | printk (KERN_ERR"<3>" "CACHE INCORRECTLY CONFIGURED.\n"); |
5777 | goto attach_error; |
5778 | }; |
5779 | |
5780 | /* |
5781 | ** Install the interrupt handler. |
5782 | ** If we synchonize the C code with SCRIPTS on interrupt, |
5783 | ** we donnot want to share the INTR line at all. |
5784 | */ |
5785 | if (request_irq(device->slot.irq, sym53c8xx_intr, |
5786 | #ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR |
5787 | ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT0x20000000), |
5788 | #else |
5789 | ((driver_setup.irqm & 0x10) ? 0 : SA_SHIRQ0x04000000) | |
5790 | |
5791 | #if 0 && LINUX_VERSION_CODE131108 < LinuxVersionCode(2,2,0)(((2)<<16)+((2)<<8)+(0)) |
5792 | ((driver_setup.irqm & 0x20) ? 0 : SA_INTERRUPT0x20000000), |
5793 | #else |
5794 | 0, |
5795 | #endif |
5796 | #endif |
5797 | NAME53C8XX"sym53c8xx", np)) { |
5798 | printk(KERN_ERR"<3>" "%s: request irq %d failure\n", |
5799 | ncr_name(np), device->slot.irq); |
5800 | goto attach_error; |
5801 | } |
5802 | np->irq = device->slot.irq; |
5803 | |
5804 | /* |
5805 | ** After SCSI devices have been opened, we cannot |
5806 | ** reset the bus safely, so we do it here. |
5807 | ** Interrupt handler does the real work. |
5808 | ** Process the reset exception, |
5809 | ** if interrupts are not enabled yet. |
5810 | ** Then enable disconnects. |
5811 | */ |
5812 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
5813 | if (ncr_reset_scsi_bus(np, 0, driver_setup.settle_delay) != 0) { |
5814 | printk(KERN_ERR"<3>" "%s: FATAL ERROR: CHECK SCSI BUS - CABLES, TERMINATION, DEVICE POWER etc.!\n", ncr_name(np)); |
5815 | |
5816 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
5817 | goto attach_error; |
5818 | } |
5819 | ncr_exception (np); |
5820 | |
5821 | /* |
5822 | ** The middle-level SCSI driver does not |
5823 | ** wait for devices to settle. |
5824 | ** Wait synchronously if more than 2 seconds. |
5825 | */ |
5826 | if (driver_setup.settle_delay > 2) { |
5827 | printk(KERN_INFO"<6>" "%s: waiting %d seconds for scsi devices to settle...\n", |
5828 | ncr_name(np), driver_setup.settle_delay); |
5829 | MDELAY (1000 * driver_setup.settle_delay); |
5830 | } |
5831 | |
5832 | /* |
5833 | ** start the timeout daemon |
5834 | */ |
5835 | np->lasttime=0; |
5836 | ncr_timeout (np); |
5837 | |
5838 | /* |
5839 | ** use SIMPLE TAG messages by default |
5840 | */ |
5841 | #ifdef SCSI_NCR_ALWAYS_SIMPLE_TAG |
5842 | np->order = M_SIMPLE_TAG(0x20); |
5843 | #endif |
5844 | |
5845 | /* |
5846 | ** Done. |
5847 | */ |
5848 | if (!first_host) |
5849 | first_host = instance; |
5850 | |
5851 | /* |
5852 | ** Fill Linux host instance structure |
5853 | ** and return success. |
5854 | */ |
5855 | instance->max_channel = 0; |
5856 | instance->this_id = np->myaddr; |
5857 | instance->max_id = np->maxwide ? 16 : 8; |
5858 | instance->max_lun = MAX_LUN64; |
5859 | #ifndef SCSI_NCR_IOMAPPED |
5860 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,3,29)(((2)<<16)+((3)<<8)+(29)) |
5861 | instance->base = (unsigned long) np->reg; |
5862 | #else |
5863 | instance->base = (char *) np->reg; |
5864 | #endif |
5865 | #endif |
5866 | instance->irq = np->irq; |
5867 | instance->unique_id = np->base_io; |
5868 | instance->io_port = np->base_io; |
5869 | instance->n_io_port = np->base_ws; |
5870 | instance->dma_channel = 0; |
5871 | instance->cmd_per_lun = MAX_TAGS(8); |
5872 | instance->can_queue = (MAX_START((8*(8) + 2*(16)) + 4)-4); |
5873 | |
5874 | np->check_integrity = 0; |
5875 | |
5876 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
5877 | instance->check_integrity = 0; |
5878 | |
5879 | #ifdef SCSI_NCR_ENABLE_INTEGRITY_CHECK |
5880 | if ( !(driver_setup.bus_check & 0x04) ) { |
5881 | np->check_integrity = 1; |
5882 | instance->check_integrity = 1; |
5883 | } |
5884 | #endif |
5885 | #endif |
5886 | |
5887 | instance->select_queue_depths = sym53c8xx_select_queue_depths; |
5888 | |
5889 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
5890 | |
5891 | /* |
5892 | ** Now let the generic SCSI driver |
5893 | ** look for the SCSI devices on the bus .. |
5894 | */ |
5895 | return 0; |
5896 | |
5897 | attach_error: |
5898 | if (!instance) return -1; |
5899 | printk(KERN_INFO"<6>" "%s: giving up ...\n", ncr_name(np)); |
5900 | if (np) |
5901 | ncr_free_resources(np); |
5902 | scsi_unregister(instance); |
5903 | |
5904 | return -1; |
5905 | } |
5906 | |
5907 | |
5908 | /* |
5909 | ** Free controller resources. |
5910 | */ |
5911 | static void ncr_free_resources(ncb_p np) |
5912 | { |
5913 | ccb_p cp; |
5914 | tcb_p tp; |
5915 | lcb_p lp; |
5916 | int target, lun; |
5917 | |
5918 | if (np->irq) |
5919 | free_irq(np->irq, np); |
5920 | if (np->base_io) |
5921 | release_region(np->base_io, np->base_ws); |
5922 | #ifndef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
5923 | if (np->base_va) |
5924 | unmap_pci_mem(np->base_va, np->base_ws); |
5925 | if (np->base2_va) |
5926 | unmap_pci_mem(np->base2_va, np->base2_ws); |
5927 | #endif |
5928 | if (np->scripth0) |
5929 | m_free_dma(np->scripth0, sizeof(struct scripth), "SCRIPTH")m_free(np->scripth0, sizeof(struct scripth), "SCRIPTH"); |
5930 | if (np->script0) |
5931 | m_free_dma(np->script0, sizeof(struct script), "SCRIPT")m_free(np->script0, sizeof(struct script), "SCRIPT"); |
5932 | if (np->squeue) |
5933 | m_free_dma(np->squeue, sizeof(ncrcmd)*(MAX_START*2), "SQUEUE")m_free(np->squeue, sizeof(ncrcmd)*(((8*(8) + 2*(16)) + 4)* 2), "SQUEUE"); |
5934 | if (np->dqueue) |
5935 | m_free_dma(np->dqueue, sizeof(ncrcmd)*(MAX_START*2),"DQUEUE")m_free(np->dqueue, sizeof(ncrcmd)*(((8*(8) + 2*(16)) + 4)* 2), "DQUEUE"); |
5936 | |
5937 | while ((cp = np->ccbc) != NULL((void *) 0)) { |
5938 | np->ccbc = cp->link_ccb; |
5939 | m_free_dma(cp, sizeof(*cp), "CCB")m_free(cp, sizeof(*cp), "CCB"); |
5940 | } |
5941 | |
5942 | if (np->badluntbl) |
5943 | m_free_dma(np->badluntbl, 256,"BADLUNTBL")m_free(np->badluntbl, 256, "BADLUNTBL"); |
5944 | |
5945 | for (target = 0; target < MAX_TARGET((16)) ; target++) { |
5946 | tp = &np->target[target]; |
5947 | for (lun = 0 ; lun < MAX_LUN64 ; lun++) { |
5948 | lp = ncr_lp(np, tp, lun)(!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0; |
5949 | if (!lp) |
5950 | continue; |
5951 | if (lp->tasktbl != &lp->tasktbl_0) |
5952 | m_free_dma(lp->tasktbl, MAX_TASKS*4, "TASKTBL")m_free(lp->tasktbl, (256/4)*4, "TASKTBL"); |
5953 | if (lp->cb_tags) |
5954 | m_free(lp->cb_tags, MAX_TAGS(8), "CB_TAGS"); |
5955 | m_free_dma(lp, sizeof(*lp), "LCB")m_free(lp, sizeof(*lp), "LCB"); |
5956 | } |
5957 | #if MAX_LUN64 > 1 |
5958 | if (tp->lmp) |
5959 | m_free(tp->lmp, MAX_LUN64 * sizeof(lcb_p), "LMP"); |
5960 | if (tp->luntbl) |
5961 | m_free_dma(tp->luntbl, 256, "LUNTBL")m_free(tp->luntbl, 256, "LUNTBL"); |
5962 | #endif |
5963 | } |
5964 | |
5965 | if (np->targtbl) |
5966 | m_free_dma(np->targtbl, 256, "TARGTBL")m_free(np->targtbl, 256, "TARGTBL"); |
5967 | |
5968 | m_free_dma(np, sizeof(*np), "NCB")m_free(np, sizeof(*np), "NCB"); |
5969 | } |
5970 | |
5971 | |
5972 | /*========================================================== |
5973 | ** |
5974 | ** |
5975 | ** Done SCSI commands list management. |
5976 | ** |
5977 | ** We donnot enter the scsi_done() callback immediately |
5978 | ** after a command has been seen as completed but we |
5979 | ** insert it into a list which is flushed outside any kind |
5980 | ** of driver critical section. |
5981 | ** This allows to do minimal stuff under interrupt and |
5982 | ** inside critical sections and to also avoid locking up |
5983 | ** on recursive calls to driver entry points under SMP. |
5984 | ** In fact, the only kernel point which is entered by the |
5985 | ** driver with a driver lock set is get_free_pages(GFP_ATOMIC...) |
5986 | ** that shall not reenter the driver under any circumstance. |
5987 | ** |
5988 | **========================================================== |
5989 | */ |
5990 | static inlineinline __attribute__((always_inline)) void ncr_queue_done_cmd(ncb_p np, Scsi_Cmnd *cmd) |
5991 | { |
5992 | unmap_scsi_data(np, cmd)do {; } while (0); |
5993 | cmd->host_scribble = (char *) np->done_list; |
5994 | np->done_list = cmd; |
5995 | } |
5996 | |
5997 | static inlineinline __attribute__((always_inline)) void ncr_flush_done_cmds(Scsi_Cmnd *lcmd) |
5998 | { |
5999 | Scsi_Cmnd *cmd; |
6000 | |
6001 | while (lcmd) { |
6002 | cmd = lcmd; |
6003 | lcmd = (Scsi_Cmnd *) cmd->host_scribble; |
6004 | cmd->scsi_done(cmd); |
6005 | } |
6006 | } |
6007 | |
6008 | /*========================================================== |
6009 | ** |
6010 | ** |
6011 | ** Prepare the next negotiation message for integrity check, |
6012 | ** if needed. |
6013 | ** |
6014 | ** Fill in the part of message buffer that contains the |
6015 | ** negotiation and the nego_status field of the CCB. |
6016 | ** Returns the size of the message in bytes. |
6017 | ** |
6018 | ** If tp->ppr_negotiation is 1 and a M_REJECT occurs, then |
6019 | ** we disable ppr_negotiation. If the first ppr_negotiation is |
6020 | ** successful, set this flag to 2. |
6021 | ** |
6022 | **========================================================== |
6023 | */ |
6024 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
6025 | static int ncr_ic_nego(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd, u_charunsigned char *msgptr) |
6026 | { |
6027 | tcb_p tp = &np->target[cp->target]; |
6028 | int msglen = 0; |
6029 | int nego = 0; |
6030 | u_charunsigned char new_width, new_offset, new_period; |
6031 | u_charunsigned char no_increase; |
6032 | |
6033 | if (tp->ppr_negotiation == 1) /* PPR message successful */ |
6034 | tp->ppr_negotiation = 2; |
6035 | |
6036 | if (tp->inq_done) { |
6037 | |
6038 | if (!tp->ic_maximums_set) { |
6039 | tp->ic_maximums_set = 1; |
6040 | |
6041 | /* |
6042 | * Check against target, host and user limits |
6043 | */ |
6044 | if ( (tp->inq_byte7 & INQ7_WIDE16(0x20)) && |
6045 | np->maxwide && tp->usrwide) |
6046 | tp->ic_max_width = 1; |
6047 | else |
6048 | tp->ic_max_width = 0; |
6049 | |
6050 | |
6051 | if ((tp->inq_byte7 & INQ7_SYNC(0x10)) && tp->maxoffs) |
6052 | tp->ic_min_sync = (tp->minsync < np->minsync) ? |
6053 | np->minsync : tp->minsync; |
6054 | else |
6055 | tp->ic_min_sync = 255; |
6056 | |
6057 | tp->period = 1; |
6058 | tp->widedone = 1; |
6059 | |
6060 | /* |
6061 | * Enable PPR negotiation - only if Ultra3 support |
6062 | * is accessible. |
6063 | */ |
6064 | |
6065 | #if 0 |
6066 | if (tp->ic_max_width && (tp->ic_min_sync != 255 )) |
6067 | tp->ppr_negotiation = 1; |
6068 | #endif |
6069 | tp->ppr_negotiation = 0; |
6070 | if (np->features & FE_ULTRA3(1<<22)) { |
6071 | if (tp->ic_max_width && (tp->ic_min_sync == 0x09)) |
6072 | tp->ppr_negotiation = 1; |
6073 | } |
6074 | |
6075 | if (!tp->ppr_negotiation) |
6076 | cmd->ic_nego &= ~NS_PPR(4); |
6077 | } |
6078 | |
6079 | if (DEBUG_FLAGSncr_debug & DEBUG_IC(0x0800)) { |
6080 | printk("%s: cmd->ic_nego %d, 1st byte 0x%2X\n", |
6081 | ncr_name(np), cmd->ic_nego, cmd->cmnd[0]); |
6082 | } |
6083 | |
6084 | /* Previous command recorded a parity or an initiator |
6085 | * detected error condition. Force bus to narrow for this |
6086 | * target. Clear flag. Negotation on request sense. |
6087 | * Note: kernel forces 2 bus resets :o( but clears itself out. |
6088 | * Minor bug? in scsi_obsolete.c (ugly) |
6089 | */ |
6090 | if (np->check_integ_par) { |
6091 | printk("%s: Parity Error. Target set to narrow.\n", |
6092 | ncr_name(np)); |
6093 | tp->ic_max_width = 0; |
6094 | tp->widedone = tp->period = 0; |
6095 | } |
6096 | |
6097 | /* Initializing: |
6098 | * If ic_nego == NS_PPR, we are in the initial test for |
6099 | * PPR messaging support. If driver flag is clear, then |
6100 | * either we don't support PPR nego (narrow or async device) |
6101 | * or this is the second TUR and we have had a M. REJECT |
6102 | * or unexpected disconnect on the first PPR negotiation. |
6103 | * Do not negotiate, reset nego flags (in case a reset has |
6104 | * occurred), clear ic_nego and return. |
6105 | * General case: Kernel will clear flag on a fallback. |
6106 | * Do only SDTR or WDTR in the future. |
6107 | */ |
6108 | if (!tp->ppr_negotiation && (cmd->ic_nego == NS_PPR(4) )) { |
6109 | tp->ppr_negotiation = 0; |
6110 | cmd->ic_nego &= ~NS_PPR(4); |
6111 | tp->widedone = tp->period = 1; |
6112 | return msglen; |
6113 | } |
6114 | else if (( tp->ppr_negotiation && !(cmd->ic_nego & NS_PPR(4) )) || |
6115 | (!tp->ppr_negotiation && (cmd->ic_nego & NS_PPR(4) )) ) { |
6116 | tp->ppr_negotiation = 0; |
6117 | cmd->ic_nego &= ~NS_PPR(4); |
6118 | } |
6119 | |
6120 | /* |
6121 | * Always check the PPR nego. flag bit if ppr_negotiation |
6122 | * is set. If the ic_nego PPR bit is clear, |
6123 | * there must have been a fallback. Do only |
6124 | * WDTR / SDTR in the future. |
6125 | */ |
6126 | if ((tp->ppr_negotiation) && (!(cmd->ic_nego & NS_PPR(4)))) |
6127 | tp->ppr_negotiation = 0; |
6128 | |
6129 | /* In case of a bus reset, ncr_negotiate will reset |
6130 | * the flags tp->widedone and tp->period to 0, forcing |
6131 | * a new negotiation. Do WDTR then SDTR. If PPR, do both. |
6132 | * Do NOT increase the period. It is possible for the Scsi_Cmnd |
6133 | * flags to be set to increase the period when a bus reset |
6134 | * occurs - we don't want to change anything. |
6135 | */ |
6136 | |
6137 | no_increase = 0; |
6138 | |
6139 | if (tp->ppr_negotiation && (!tp->widedone) && (!tp->period) ) { |
6140 | cmd->ic_nego = NS_PPR(4); |
6141 | tp->widedone = tp->period = 1; |
6142 | no_increase = 1; |
6143 | } |
6144 | else if (!tp->widedone) { |
6145 | cmd->ic_nego = NS_WIDE(2); |
6146 | tp->widedone = 1; |
6147 | no_increase = 1; |
6148 | } |
6149 | else if (!tp->period) { |
6150 | cmd->ic_nego = NS_SYNC(1); |
6151 | tp->period = 1; |
6152 | no_increase = 1; |
6153 | } |
6154 | |
6155 | new_width = cmd->ic_nego_width & tp->ic_max_width; |
6156 | |
6157 | switch (cmd->ic_nego_sync) { |
6158 | case 2: /* increase the period */ |
6159 | if (!no_increase) { |
6160 | if (tp->ic_min_sync <= 0x09) |
6161 | tp->ic_min_sync = 0x0A; |
6162 | else if (tp->ic_min_sync <= 0x0A) |
6163 | tp->ic_min_sync = 0x0C; |
6164 | else if (tp->ic_min_sync <= 0x0C) |
6165 | tp->ic_min_sync = 0x19; |
6166 | else if (tp->ic_min_sync <= 0x19) |
6167 | tp->ic_min_sync *= 2; |
6168 | else { |
6169 | tp->ic_min_sync = 255; |
6170 | cmd->ic_nego_sync = 0; |
6171 | tp->maxoffs = 0; |
6172 | } |
6173 | } |
6174 | new_period = tp->maxoffs?tp->ic_min_sync:0; |
6175 | new_offset = tp->maxoffs; |
6176 | break; |
6177 | |
6178 | case 1: /* nego. to maximum */ |
6179 | new_period = tp->maxoffs?tp->ic_min_sync:0; |
6180 | new_offset = tp->maxoffs; |
6181 | break; |
6182 | |
6183 | case 0: /* nego to async */ |
6184 | default: |
6185 | new_period = 0; |
6186 | new_offset = 0; |
6187 | break; |
6188 | }; |
6189 | |
6190 | |
6191 | nego = NS_NOCHANGE(0); |
6192 | if (tp->ppr_negotiation) { |
6193 | u_charunsigned char options_byte = 0; |
6194 | |
6195 | /* |
6196 | ** Must make sure data is consistent. |
6197 | ** If period is 9 and sync, must be wide and DT bit set. |
6198 | ** else period must be larger. If the width is 0, |
6199 | ** reset bus to wide but increase the period to 0x0A. |
6200 | ** Note: The strange else clause is due to the integrity check. |
6201 | ** If fails at 0x09, wide, the I.C. code will redo at the same |
6202 | ** speed but a narrow bus. The driver must take care of slowing |
6203 | ** the bus speed down. |
6204 | ** |
6205 | ** The maximum offset in ST mode is 31, in DT mode 62 (1010/1010_66 only) |
6206 | */ |
6207 | if ( (new_period==0x09) && new_offset) { |
6208 | if (new_width) |
6209 | options_byte = 0x02; |
6210 | else { |
6211 | tp->ic_min_sync = 0x0A; |
6212 | new_period = 0x0A; |
6213 | cmd->ic_nego_width = 1; |
6214 | new_width = 1; |
6215 | new_offset &= 0x1f; |
6216 | } |
6217 | } |
6218 | else if (new_period > 0x09) |
6219 | new_offset &= 0x1f; |
6220 | |
6221 | nego = NS_PPR(4); |
6222 | |
6223 | msgptr[msglen++] = M_EXTENDED(0x01); |
6224 | msgptr[msglen++] = 6; |
6225 | msgptr[msglen++] = M_X_PPR_REQ(0x04); |
6226 | msgptr[msglen++] = new_period; |
6227 | msgptr[msglen++] = 0; |
6228 | msgptr[msglen++] = new_offset; |
6229 | msgptr[msglen++] = new_width; |
6230 | msgptr[msglen++] = options_byte; |
6231 | |
6232 | } |
6233 | else { |
6234 | switch (cmd->ic_nego & ~NS_PPR(4)) { |
6235 | case NS_WIDE(2): |
6236 | /* |
6237 | ** WDTR negotiation on if device supports |
6238 | ** wide or if wide device forced narrow |
6239 | ** due to a parity error. |
6240 | */ |
6241 | |
6242 | cmd->ic_nego_width &= tp->ic_max_width; |
6243 | |
6244 | if (tp->ic_max_width | np->check_integ_par) { |
6245 | nego = NS_WIDE(2); |
6246 | msgptr[msglen++] = M_EXTENDED(0x01); |
6247 | msgptr[msglen++] = 2; |
6248 | msgptr[msglen++] = M_X_WIDE_REQ(0x03); |
6249 | msgptr[msglen++] = new_width; |
6250 | } |
6251 | break; |
6252 | |
6253 | case NS_SYNC(1): |
6254 | /* |
6255 | ** negotiate synchronous transfers |
6256 | ** Target must support sync transfers. |
6257 | ** Min. period = 0x0A, maximum offset of 31=0x1f. |
6258 | */ |
6259 | |
6260 | if (tp->inq_byte7 & INQ7_SYNC(0x10)) { |
6261 | |
6262 | if (new_offset && (new_period < 0x0A)) { |
6263 | tp->ic_min_sync = 0x0A; |
6264 | new_period = 0x0A; |
6265 | } |
6266 | nego = NS_SYNC(1); |
6267 | msgptr[msglen++] = M_EXTENDED(0x01); |
6268 | msgptr[msglen++] = 3; |
6269 | msgptr[msglen++] = M_X_SYNC_REQ(0x01); |
6270 | msgptr[msglen++] = new_period; |
6271 | msgptr[msglen++] = new_offset & 0x1f; |
6272 | } |
6273 | else |
6274 | cmd->ic_nego_sync = 0; |
6275 | break; |
6276 | |
6277 | case NS_NOCHANGE(0): |
6278 | break; |
6279 | } |
6280 | } |
6281 | |
6282 | }; |
6283 | |
6284 | cp->nego_status = nego; |
6285 | np->check_integ_par = 0; |
6286 | |
6287 | if (nego) { |
6288 | tp->nego_cp = cp; |
6289 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
6290 | ncr_print_msg(cp, nego == NS_WIDE(2) ? |
6291 | "wide/narrow msgout": |
6292 | (nego == NS_SYNC(1) ? "sync/async msgout" : "ppr msgout"), |
6293 | msgptr); |
6294 | }; |
6295 | }; |
6296 | |
6297 | return msglen; |
6298 | } |
6299 | #endif /* SCSI_NCR_INTEGRITY_CHECKING */ |
6300 | |
6301 | /*========================================================== |
6302 | ** |
6303 | ** |
6304 | ** Prepare the next negotiation message if needed. |
6305 | ** |
6306 | ** Fill in the part of message buffer that contains the |
6307 | ** negotiation and the nego_status field of the CCB. |
6308 | ** Returns the size of the message in bytes. |
6309 | ** |
6310 | ** |
6311 | **========================================================== |
6312 | */ |
6313 | |
6314 | |
6315 | static int ncr_prepare_nego(ncb_p np, ccb_p cp, u_charunsigned char *msgptr) |
6316 | { |
6317 | tcb_p tp = &np->target[cp->target]; |
6318 | int msglen = 0; |
6319 | int nego = 0; |
6320 | u_charunsigned char width, offset, factor, last_byte; |
6321 | |
6322 | if (!np->check_integrity) { |
6323 | /* If integrity checking disabled, enable PPR messaging |
6324 | * if device supports wide, sync and ultra 3 |
6325 | */ |
6326 | if (tp->ppr_negotiation == 1) /* PPR message successful */ |
6327 | tp->ppr_negotiation = 2; |
6328 | |
6329 | if ((tp->inq_done) && (!tp->ic_maximums_set)) { |
6330 | tp->ic_maximums_set = 1; |
6331 | |
6332 | /* |
6333 | * Issue PPR only if board is capable |
6334 | * and set-up for Ultra3 transfers. |
6335 | */ |
6336 | tp->ppr_negotiation = 0; |
6337 | if ( (np->features & FE_ULTRA3(1<<22)) && |
6338 | (tp->usrwide) && (tp->maxoffs) && |
6339 | (tp->minsync == 0x09) ) |
6340 | tp->ppr_negotiation = 1; |
6341 | } |
6342 | } |
6343 | |
6344 | if (tp->inq_done) { |
6345 | /* |
6346 | * Get the current width, offset and period |
6347 | */ |
6348 | ncr_get_xfer_info( np, tp, &factor, |
6349 | &offset, &width); |
6350 | |
6351 | /* |
6352 | ** negotiate wide transfers ? |
6353 | */ |
6354 | |
6355 | if (!tp->widedone) { |
6356 | if (tp->inq_byte7 & INQ7_WIDE16(0x20)) { |
6357 | if (tp->ppr_negotiation) |
6358 | nego = NS_PPR(4); |
6359 | else |
6360 | nego = NS_WIDE(2); |
6361 | |
6362 | width = tp->usrwide; |
6363 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
6364 | if (tp->ic_done) |
6365 | width &= tp->ic_max_width; |
6366 | #endif |
6367 | } else |
6368 | tp->widedone=1; |
6369 | |
6370 | }; |
6371 | |
6372 | /* |
6373 | ** negotiate synchronous transfers? |
6374 | */ |
6375 | |
6376 | if ((nego != NS_WIDE(2)) && !tp->period) { |
6377 | if (tp->inq_byte7 & INQ7_SYNC(0x10)) { |
6378 | if (tp->ppr_negotiation) |
6379 | nego = NS_PPR(4); |
6380 | else |
6381 | nego = NS_SYNC(1); |
6382 | |
6383 | /* Check for async flag */ |
6384 | if (tp->maxoffs == 0) { |
6385 | offset = 0; |
6386 | factor = 0; |
6387 | } |
6388 | else { |
6389 | offset = tp->maxoffs; |
6390 | factor = tp->minsync; |
6391 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
6392 | if ((tp->ic_done) && |
6393 | (factor < tp->ic_min_sync)) |
6394 | factor = tp->ic_min_sync; |
6395 | #endif |
6396 | } |
6397 | |
6398 | } else { |
6399 | offset = 0; |
6400 | factor = 0; |
6401 | tp->period =0xffff; |
6402 | PRINT_TARGET(np, cp->target); |
6403 | printk ("target did not report SYNC.\n"); |
6404 | }; |
6405 | }; |
6406 | }; |
6407 | |
6408 | switch (nego) { |
6409 | case NS_PPR(4): |
6410 | /* |
6411 | ** Must make sure data is consistent. |
6412 | ** If period is 9 and sync, must be wide and DT bit set |
6413 | ** else period must be larger. |
6414 | ** Maximum offset is 31=0x1f is ST mode, 62 if DT mode |
6415 | */ |
6416 | last_byte = 0; |
6417 | if ( (factor==9) && offset) { |
6418 | if (!width) { |
6419 | factor = 0x0A; |
6420 | offset &= 0x1f; |
6421 | } |
6422 | else |
6423 | last_byte = 0x02; |
6424 | } |
6425 | else if (factor > 0x09) |
6426 | offset &= 0x1f; |
6427 | |
6428 | msgptr[msglen++] = M_EXTENDED(0x01); |
6429 | msgptr[msglen++] = 6; |
6430 | msgptr[msglen++] = M_X_PPR_REQ(0x04); |
6431 | msgptr[msglen++] = factor; |
6432 | msgptr[msglen++] = 0; |
6433 | msgptr[msglen++] = offset; |
6434 | msgptr[msglen++] = width; |
6435 | msgptr[msglen++] = last_byte; |
6436 | break; |
6437 | case NS_SYNC(1): |
6438 | /* |
6439 | ** Never negotiate faster than Ultra 2 (25ns periods) |
6440 | */ |
6441 | if (offset && (factor < 0x0A)) { |
6442 | factor = 0x0A; |
6443 | tp->minsync = 0x0A; |
6444 | } |
6445 | |
6446 | msgptr[msglen++] = M_EXTENDED(0x01); |
6447 | msgptr[msglen++] = 3; |
6448 | msgptr[msglen++] = M_X_SYNC_REQ(0x01); |
6449 | msgptr[msglen++] = factor; |
6450 | msgptr[msglen++] = offset & 0x1f; |
6451 | break; |
6452 | case NS_WIDE(2): |
6453 | msgptr[msglen++] = M_EXTENDED(0x01); |
6454 | msgptr[msglen++] = 2; |
6455 | msgptr[msglen++] = M_X_WIDE_REQ(0x03); |
6456 | msgptr[msglen++] = width; |
6457 | break; |
6458 | }; |
6459 | |
6460 | cp->nego_status = nego; |
6461 | |
6462 | if (nego) { |
6463 | tp->nego_cp = cp; |
6464 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
6465 | ncr_print_msg(cp, nego == NS_WIDE(2) ? |
6466 | "wide msgout": |
6467 | (nego == NS_SYNC(1) ? "sync msgout" : "ppr msgout"), |
6468 | msgptr); |
6469 | }; |
6470 | }; |
6471 | |
6472 | return msglen; |
6473 | } |
6474 | |
6475 | /*========================================================== |
6476 | ** |
6477 | ** |
6478 | ** Start execution of a SCSI command. |
6479 | ** This is called from the generic SCSI driver. |
6480 | ** |
6481 | ** |
6482 | **========================================================== |
6483 | */ |
6484 | static int ncr_queue_command (ncb_p np, Scsi_Cmnd *cmd) |
6485 | { |
6486 | /* Scsi_Device *device = cmd->device; */ |
6487 | tcb_p tp = &np->target[cmd->target]; |
6488 | lcb_p lp = ncr_lp(np, tp, cmd->lun)(!cmd->lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[( cmd->lun)] : 0; |
6489 | ccb_p cp; |
6490 | |
6491 | u_charunsigned char idmsg, *msgptr; |
6492 | u_intunsigned int msglen; |
6493 | int direction; |
6494 | u_int32 lastp, goalp; |
6495 | |
6496 | /*--------------------------------------------- |
6497 | ** |
6498 | ** Some shortcuts ... |
6499 | ** |
6500 | **--------------------------------------------- |
6501 | */ |
6502 | if ((cmd->target == np->myaddr ) || |
6503 | (cmd->target >= MAX_TARGET((16))) || |
6504 | (cmd->lun >= MAX_LUN64 )) { |
6505 | return(DID_BAD_TARGET0x04); |
6506 | } |
6507 | |
6508 | /*--------------------------------------------- |
6509 | ** |
6510 | ** Complete the 1st TEST UNIT READY command |
6511 | ** with error condition if the device is |
6512 | ** flagged NOSCAN, in order to speed up |
6513 | ** the boot. |
6514 | ** |
6515 | **--------------------------------------------- |
6516 | */ |
6517 | if (cmd->cmnd[0] == 0 && (tp->usrflag & UF_NOSCAN(0x04))) { |
6518 | tp->usrflag &= ~UF_NOSCAN(0x04); |
6519 | return DID_BAD_TARGET0x04; |
6520 | } |
6521 | |
6522 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) { |
6523 | PRINT_ADDR(cmd); |
6524 | printk ("CMD=%x ", cmd->cmnd[0]); |
6525 | } |
6526 | |
6527 | /*--------------------------------------------------- |
6528 | ** |
6529 | ** Assign a ccb / bind cmd. |
6530 | ** If resetting, shorten settle_time if necessary |
6531 | ** in order to avoid spurious timeouts. |
6532 | ** If resetting or no free ccb, |
6533 | ** insert cmd into the waiting list. |
6534 | ** |
6535 | **---------------------------------------------------- |
6536 | */ |
6537 | if (np->settle_time && cmd->timeout_per_command >= HZ100) { |
6538 | u_longunsigned long tlimit = ktime_get(cmd->timeout_per_command - HZ)(jiffies + (unsigned long) cmd->timeout_per_command - 100); |
6539 | if (ktime_dif(np->settle_time, tlimit)((long)(np->settle_time) - (long)(tlimit)) > 0) |
6540 | np->settle_time = tlimit; |
6541 | } |
6542 | |
6543 | if (np->settle_time || !(cp=ncr_get_ccb (np, cmd->target, cmd->lun))) { |
6544 | insert_into_waiting_list(np, cmd); |
6545 | return(DID_OK0x00); |
6546 | } |
6547 | cp->cmd = cmd; |
6548 | |
6549 | /*--------------------------------------------------- |
6550 | ** |
6551 | ** Enable tagged queue if asked by scsi ioctl |
6552 | ** |
6553 | **---------------------------------------------------- |
6554 | */ |
6555 | #if 0 /* This stuff was only usefull for linux-1.2.13 */ |
6556 | if (lp && !lp->numtags && cmd->device && cmd->device->tagged_queue) { |
6557 | lp->numtags = tp->usrtags; |
6558 | ncr_setup_tags (np, cp->target, cp->lun); |
6559 | } |
6560 | #endif |
6561 | |
6562 | /*---------------------------------------------------- |
6563 | ** |
6564 | ** Build the identify / tag / sdtr message |
6565 | ** |
6566 | **---------------------------------------------------- |
6567 | */ |
6568 | |
6569 | idmsg = M_IDENTIFY(0x80) | cp->lun; |
6570 | |
6571 | if (cp ->tag != NO_TAG(256) || (lp && !(tp->usrflag & UF_NODISC(0x02)))) |
6572 | idmsg |= 0x40; |
6573 | |
6574 | msgptr = cp->scsi_smsg; |
6575 | msglen = 0; |
6576 | msgptr[msglen++] = idmsg; |
6577 | |
6578 | if (cp->tag != NO_TAG(256)) { |
6579 | char order = np->order; |
6580 | |
6581 | /* |
6582 | ** Force ordered tag if necessary to avoid timeouts |
6583 | ** and to preserve interactivity. |
6584 | */ |
6585 | if (lp && ktime_exp(lp->tags_stime)((long)(jiffies) - (long)(lp->tags_stime) >= 0)) { |
6586 | lp->tags_si = !(lp->tags_si); |
6587 | if (lp->tags_sum[lp->tags_si]) { |
6588 | order = M_ORDERED_TAG(0x22); |
6589 | if ((DEBUG_FLAGSncr_debug & DEBUG_TAGS(0x0400))||bootverbose(np->verbose)>0){ |
6590 | PRINT_ADDR(cmd); |
6591 | printk("ordered tag forced.\n"); |
6592 | } |
6593 | } |
6594 | lp->tags_stime = ktime_get(3*HZ)(jiffies + (unsigned long) 3*100); |
6595 | } |
6596 | |
6597 | if (order == 0) { |
6598 | /* |
6599 | ** Ordered write ops, unordered read ops. |
6600 | */ |
6601 | switch (cmd->cmnd[0]) { |
6602 | case 0x08: /* READ_SMALL (6) */ |
6603 | case 0x28: /* READ_BIG (10) */ |
6604 | case 0xa8: /* READ_HUGE (12) */ |
6605 | order = M_SIMPLE_TAG(0x20); |
6606 | break; |
6607 | default: |
6608 | order = M_ORDERED_TAG(0x22); |
6609 | } |
6610 | } |
6611 | msgptr[msglen++] = order; |
6612 | /* |
6613 | ** For less than 128 tags, actual tags are numbered |
6614 | ** 1,3,5,..2*MAXTAGS+1,since we may have to deal |
6615 | ** with devices that have problems with #TAG 0 or too |
6616 | ** great #TAG numbers. For more tags (up to 256), |
6617 | ** we use directly our tag number. |
6618 | */ |
6619 | #if MAX_TASKS(256/4) > (512/4) |
6620 | msgptr[msglen++] = cp->tag; |
6621 | #else |
6622 | msgptr[msglen++] = (cp->tag << 1) + 1; |
6623 | #endif |
6624 | } |
6625 | |
6626 | cp->host_flagsphys.header.status[3] = 0; |
6627 | |
6628 | /*---------------------------------------------------- |
6629 | ** |
6630 | ** Build the data descriptors |
6631 | ** |
6632 | **---------------------------------------------------- |
6633 | */ |
6634 | |
6635 | direction = scsi_data_direction(cmd); |
6636 | if (direction != SCSI_DATA_NONE3) { |
6637 | cp->segments = np->scatter (np, cp, cp->cmd); |
6638 | if (cp->segments < 0) { |
6639 | ncr_free_ccb(np, cp); |
6640 | return(DID_ERROR0x07); |
6641 | } |
6642 | } |
6643 | else { |
6644 | cp->data_len = 0; |
6645 | cp->segments = 0; |
6646 | } |
6647 | |
6648 | /*--------------------------------------------------- |
6649 | ** |
6650 | ** negotiation required? |
6651 | ** |
6652 | ** (nego_status is filled by ncr_prepare_nego()) |
6653 | ** |
6654 | **--------------------------------------------------- |
6655 | */ |
6656 | |
6657 | cp->nego_status = 0; |
6658 | |
6659 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
6660 | if ((np->check_integrity && tp->ic_done) || !np->check_integrity) { |
6661 | if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) { |
6662 | msglen += ncr_prepare_nego (np, cp, msgptr + msglen); |
6663 | } |
6664 | } |
6665 | else if (np->check_integrity && (cmd->ic_in_progress)) { |
6666 | msglen += ncr_ic_nego (np, cp, cmd, msgptr + msglen); |
6667 | } |
6668 | else if (np->check_integrity && cmd->ic_complete) { |
6669 | u_longunsigned long current_period; |
6670 | u_charunsigned char current_offset, current_width, current_factor; |
6671 | |
6672 | ncr_get_xfer_info (np, tp, ¤t_factor, |
6673 | ¤t_offset, ¤t_width); |
6674 | |
6675 | tp->ic_max_width = current_width; |
6676 | tp->ic_min_sync = current_factor; |
6677 | |
6678 | if (current_factor == 9) current_period = 125; |
6679 | else if (current_factor == 10) current_period = 250; |
6680 | else if (current_factor == 11) current_period = 303; |
6681 | else if (current_factor == 12) current_period = 500; |
6682 | else current_period = current_factor * 40; |
6683 | |
6684 | /* |
6685 | * Negotiation for this target is complete. Update flags. |
6686 | */ |
6687 | tp->period = current_period; |
6688 | tp->widedone = 1; |
6689 | tp->ic_done = 1; |
6690 | |
6691 | printk("%s: Integrity Check Complete: \n", ncr_name(np)); |
6692 | |
6693 | printk("%s: %s %s SCSI", ncr_name(np), |
6694 | current_offset?"SYNC":"ASYNC", |
6695 | tp->ic_max_width?"WIDE":"NARROW"); |
6696 | if (current_offset) { |
6697 | u_longunsigned long mbs = 10000 * (tp->ic_max_width + 1); |
6698 | |
6699 | printk(" %d.%d MB/s", |
6700 | (int) (mbs / current_period), (int) (mbs % current_period)); |
6701 | |
6702 | printk(" (%d ns, %d offset)\n", |
6703 | (int) current_period/10, current_offset); |
6704 | } |
6705 | else |
6706 | printk(" %d MB/s. \n ", (tp->ic_max_width+1)*5); |
6707 | } |
6708 | #else |
6709 | if ((!tp->widedone || !tp->period) && !tp->nego_cp && lp) { |
6710 | msglen += ncr_prepare_nego (np, cp, msgptr + msglen); |
6711 | } |
6712 | #endif /* SCSI_NCR_INTEGRITY_CHECKING */ |
6713 | |
6714 | |
6715 | /*---------------------------------------------------- |
6716 | ** |
6717 | ** Determine xfer direction. |
6718 | ** |
6719 | **---------------------------------------------------- |
6720 | */ |
6721 | if (!cp->data_len) |
6722 | direction = SCSI_DATA_NONE3; |
6723 | |
6724 | /* |
6725 | ** If data direction is UNKNOWN, speculate DATA_READ |
6726 | ** but prepare alternate pointers for WRITE in case |
6727 | ** of our speculation will be just wrong. |
6728 | ** SCRIPTS will swap values if needed. |
6729 | */ |
6730 | switch(direction) { |
6731 | case SCSI_DATA_UNKNOWN0: |
6732 | case SCSI_DATA_WRITE1: |
6733 | goalp = NCB_SCRIPT_PHYS (np, data_out2)(np->p_script + ((size_t) (&((struct script *)0)->data_out2 ))) + 8; |
6734 | lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE(2)*4)); |
6735 | if (direction != SCSI_DATA_UNKNOWN0) |
6736 | break; |
6737 | cp->phys.header.wgoalp = cpu_to_scr(goalp)(goalp); |
6738 | cp->phys.header.wlastp = cpu_to_scr(lastp)(lastp); |
6739 | /* fall through */ |
6740 | case SCSI_DATA_READ2: |
6741 | cp->host_flagsphys.header.status[3] |= HF_DATA_IN(1u<<5); |
6742 | goalp = NCB_SCRIPT_PHYS (np, data_in2)(np->p_script + ((size_t) (&((struct script *)0)->data_in2 ))) + 8; |
6743 | lastp = goalp - 8 - (cp->segments * (SCR_SG_SIZE(2)*4)); |
6744 | break; |
6745 | default: |
6746 | case SCSI_DATA_NONE3: |
6747 | lastp = goalp = NCB_SCRIPTH_PHYS (np, no_data)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> no_data))); |
6748 | break; |
6749 | } |
6750 | |
6751 | /* |
6752 | ** Set all pointers values needed by SCRIPTS. |
6753 | ** If direction is unknown, start at data_io. |
6754 | */ |
6755 | cp->phys.header.lastp = cpu_to_scr(lastp)(lastp); |
6756 | cp->phys.header.goalp = cpu_to_scr(goalp)(goalp); |
6757 | |
6758 | if (direction == SCSI_DATA_UNKNOWN0) |
6759 | cp->phys.header.savep = |
6760 | cpu_to_scr(NCB_SCRIPTH_PHYS (np, data_io))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> data_io)))); |
6761 | else |
6762 | cp->phys.header.savep= cpu_to_scr(lastp)(lastp); |
6763 | |
6764 | /* |
6765 | ** Save the initial data pointer in order to be able |
6766 | ** to redo the command. |
6767 | ** We also have to save the initial lastp, since it |
6768 | ** will be changed to DATA_IO if we don't know the data |
6769 | ** direction and the device completes the command with |
6770 | ** QUEUE FULL status (without entering the data phase). |
6771 | */ |
6772 | cp->startp = cp->phys.header.savep; |
6773 | cp->lastp0 = cp->phys.header.lastp; |
6774 | |
6775 | /*---------------------------------------------------- |
6776 | ** |
6777 | ** fill in ccb |
6778 | ** |
6779 | **---------------------------------------------------- |
6780 | ** |
6781 | ** |
6782 | ** physical -> virtual backlink |
6783 | ** Generic SCSI command |
6784 | */ |
6785 | |
6786 | /* |
6787 | ** Startqueue |
6788 | */ |
6789 | cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np,select))((np->p_script + ((size_t) (&((struct script *)0)-> select)))); |
6790 | cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPT_PHYS (np,resel_dsa))((np->p_script + ((size_t) (&((struct script *)0)-> resel_dsa)))); |
6791 | /* |
6792 | ** select |
6793 | */ |
6794 | cp->phys.select.sel_id = cp->target; |
6795 | cp->phys.select.sel_scntl3 = tp->wval; |
6796 | cp->phys.select.sel_sxfer = tp->sval; |
6797 | cp->phys.select.sel_scntl4 = tp->uval; |
6798 | /* |
6799 | ** message |
6800 | */ |
6801 | cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg))((cp->p_ccb + ((size_t) (&((struct ccb *)0)->scsi_smsg )))); |
6802 | cp->phys.smsg.size = cpu_to_scr(msglen)(msglen); |
6803 | |
6804 | /* |
6805 | ** command |
6806 | */ |
6807 | memcpy(cp->cdb_buf, cmd->cmnd, MIN(cmd->cmd_len, sizeof(cp->cdb_buf)))(__builtin_constant_p((((cmd->cmd_len) < (sizeof(cp-> cdb_buf))) ? (cmd->cmd_len) : (sizeof(cp->cdb_buf)))) ? __constant_memcpy((cp->cdb_buf),(cmd->cmnd),((((cmd-> cmd_len) < (sizeof(cp->cdb_buf))) ? (cmd->cmd_len) : (sizeof(cp->cdb_buf))))) : __memcpy((cp->cdb_buf),(cmd ->cmnd),((((cmd->cmd_len) < (sizeof(cp->cdb_buf)) ) ? (cmd->cmd_len) : (sizeof(cp->cdb_buf)))))); |
6808 | cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, cdb_buf[0]))((cp->p_ccb + ((size_t) (&((struct ccb *)0)->cdb_buf [0])))); |
6809 | cp->phys.cmd.size = cpu_to_scr(cmd->cmd_len)(cmd->cmd_len); |
6810 | |
6811 | /* |
6812 | ** status |
6813 | */ |
6814 | cp->actualquirksphys.header.status[0] = tp->quirks; |
6815 | cp->host_statusphys.header.status[1] = cp->nego_status ? HS_NEGOTIATE(2) : HS_BUSY(1); |
6816 | cp->scsi_statusphys.header.status[2] = S_ILLEGAL(0xff); |
6817 | cp->xerr_status = 0; |
6818 | cp->extra_bytes = 0; |
6819 | |
6820 | /* |
6821 | ** extreme data pointer. |
6822 | ** shall be positive, so -1 is lower than lowest.:) |
6823 | */ |
6824 | cp->ext_sg = -1; |
6825 | cp->ext_ofs = 0; |
6826 | |
6827 | /*---------------------------------------------------- |
6828 | ** |
6829 | ** Critical region: start this job. |
6830 | ** |
6831 | **---------------------------------------------------- |
6832 | */ |
6833 | |
6834 | /* |
6835 | ** activate this job. |
6836 | */ |
6837 | |
6838 | /* |
6839 | ** insert next CCBs into start queue. |
6840 | ** 2 max at a time is enough to flush the CCB wait queue. |
6841 | */ |
6842 | if (lp) |
6843 | ncr_start_next_ccb(np, lp, 2); |
6844 | else |
6845 | ncr_put_start_queue(np, cp); |
6846 | |
6847 | /* |
6848 | ** Command is successfully queued. |
6849 | */ |
6850 | |
6851 | return(DID_OK0x00); |
6852 | } |
6853 | |
6854 | |
6855 | /*========================================================== |
6856 | ** |
6857 | ** |
6858 | ** Insert a CCB into the start queue and wake up the |
6859 | ** SCRIPTS processor. |
6860 | ** |
6861 | ** |
6862 | **========================================================== |
6863 | */ |
6864 | |
6865 | static void ncr_start_next_ccb(ncb_p np, lcb_p lp, int maxn) |
6866 | { |
6867 | XPT_QUEHEAD *qp; |
6868 | ccb_p cp; |
6869 | |
6870 | while (maxn-- && lp->queuedccbs < lp->queuedepth) { |
6871 | qp = xpt_remque_head(&lp->wait_ccbq); |
6872 | if (!qp) |
6873 | break; |
6874 | ++lp->queuedccbs; |
6875 | cp = xpt_que_entry(qp, struct ccb, link_ccbq)((struct ccb *)((char *)(qp)-(unsigned long)(&((struct ccb *)0)->link_ccbq))); |
6876 | xpt_insque_tail(qp, &lp->busy_ccbq)__xpt_que_add(qp, (&lp->busy_ccbq)->blink, &lp-> busy_ccbq); |
6877 | lp->tasktbl[cp->tag == NO_TAG(256) ? 0 : cp->tag] = |
6878 | cpu_to_scr(cp->p_ccb)(cp->p_ccb); |
6879 | ncr_put_start_queue(np, cp); |
6880 | } |
6881 | } |
6882 | |
6883 | static void ncr_put_start_queue(ncb_p np, ccb_p cp) |
6884 | { |
6885 | u_shortunsigned short qidx; |
6886 | |
6887 | #ifdef SCSI_NCR_IARB_SUPPORT |
6888 | /* |
6889 | ** If the previously queued CCB is not yet done, |
6890 | ** set the IARB hint. The SCRIPTS will go with IARB |
6891 | ** for this job when starting the previous one. |
6892 | ** We leave devices a chance to win arbitration by |
6893 | ** not using more than 'iarb_max' consecutive |
6894 | ** immediate arbitrations. |
6895 | */ |
6896 | if (np->last_cp && np->iarb_count < np->iarb_max) { |
6897 | np->last_cp->host_flagsphys.header.status[3] |= HF_HINT_IARB; |
6898 | ++np->iarb_count; |
6899 | } |
6900 | else |
6901 | np->iarb_count = 0; |
6902 | np->last_cp = cp; |
6903 | #endif |
6904 | |
6905 | /* |
6906 | ** insert into start queue. |
6907 | */ |
6908 | qidx = np->squeueput + 2; |
6909 | if (qidx >= MAX_START((8*(8) + 2*(16)) + 4)*2) qidx = 0; |
6910 | |
6911 | np->squeue [qidx] = cpu_to_scr(np->p_idletask)(np->p_idletask); |
6912 | MEMORY_BARRIER()do { ; } while(0); |
6913 | np->squeue [np->squeueput] = cpu_to_scr(cp->p_ccb)(cp->p_ccb); |
6914 | |
6915 | np->squeueput = qidx; |
6916 | cp->queued = 1; |
6917 | |
6918 | if (DEBUG_FLAGSncr_debug & DEBUG_QUEUE(0x0008)) |
6919 | printk ("%s: queuepos=%d.\n", ncr_name (np), np->squeueput); |
6920 | |
6921 | /* |
6922 | ** Script processor may be waiting for reselect. |
6923 | ** Wake it up. |
6924 | */ |
6925 | MEMORY_BARRIER()do { ; } while(0); |
6926 | OUTB (nc_istat, SIGP|np->istat_sem)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20|np-> istat_sem)))); |
6927 | } |
6928 | |
6929 | |
6930 | /*========================================================== |
6931 | ** |
6932 | ** Soft reset the chip. |
6933 | ** |
6934 | ** Some 896 and 876 chip revisions may hang-up if we set |
6935 | ** the SRST (soft reset) bit at the wrong time when SCRIPTS |
6936 | ** are running. |
6937 | ** So, we need to abort the current operation prior to |
6938 | ** soft resetting the chip. |
6939 | ** |
6940 | **========================================================== |
6941 | */ |
6942 | |
6943 | static void ncr_chip_reset (ncb_p np) |
6944 | { |
6945 | OUTB (nc_istat, SRST)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x40)))); |
6946 | UDELAY (10); |
6947 | OUTB (nc_istat, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0)))); |
6948 | } |
6949 | |
6950 | static void ncr_soft_reset(ncb_p np) |
6951 | { |
6952 | u_charunsigned char istat; |
6953 | int i; |
6954 | |
6955 | OUTB (nc_istat, CABRT)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x80)))); |
6956 | for (i = 1000000 ; i ; --i) { |
6957 | istat = INB (nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))); |
6958 | if (istat & SIP0x02) { |
6959 | INW (nc_sist)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sist))))); |
6960 | continue; |
6961 | } |
6962 | if (istat & DIP0x01) { |
6963 | OUTB (nc_istat, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0)))); |
6964 | INB (nc_dstat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dstat))))); |
6965 | break; |
6966 | } |
6967 | } |
6968 | if (!i) |
6969 | printk("%s: unable to abort current chip operation.\n", |
6970 | ncr_name(np)); |
6971 | ncr_chip_reset(np); |
6972 | } |
6973 | |
6974 | /*========================================================== |
6975 | ** |
6976 | ** |
6977 | ** Start reset process. |
6978 | ** The interrupt handler will reinitialize the chip. |
6979 | ** The timeout handler will wait for settle_time before |
6980 | ** clearing it and so resuming command processing. |
6981 | ** |
6982 | ** |
6983 | **========================================================== |
6984 | */ |
6985 | static void ncr_start_reset(ncb_p np) |
6986 | { |
6987 | (void) ncr_reset_scsi_bus(np, 1, driver_setup.settle_delay); |
6988 | } |
6989 | |
6990 | static int ncr_reset_scsi_bus(ncb_p np, int enab_int, int settle_delay) |
6991 | { |
6992 | u_int32 term; |
6993 | int retv = 0; |
6994 | |
6995 | np->settle_time = ktime_get(settle_delay * HZ)(jiffies + (unsigned long) settle_delay * 100); |
6996 | |
6997 | if (bootverbose(np->verbose) > 1) |
6998 | printk("%s: resetting, " |
6999 | "command processing suspended for %d seconds\n", |
7000 | ncr_name(np), settle_delay); |
7001 | |
7002 | ncr_soft_reset(np); /* Soft reset the chip */ |
7003 | UDELAY (2000); /* The 895/6 need time for the bus mode to settle */ |
7004 | if (enab_int) |
7005 | OUTW (nc_sien, RST)((*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sien))))) = (((0x02)))); |
7006 | /* |
7007 | ** Enable Tolerant, reset IRQD if present and |
7008 | ** properly set IRQ mode, prior to resetting the bus. |
7009 | */ |
7010 | OUTB (nc_stest3, TE)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80))) ); |
7011 | OUTB (nc_dcntl, (np->rv_dcntl & IRQM))((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dcntl))))) = ((((np-> rv_dcntl & 0x08))))); |
7012 | OUTB (nc_scntl1, CRST)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl1))))) = (((0x08))) ); |
7013 | UDELAY (200); |
7014 | |
7015 | if (!driver_setup.bus_check) |
7016 | goto out; |
7017 | /* |
7018 | ** Check for no terminators or SCSI bus shorts to ground. |
7019 | ** Read SCSI data bus, data parity bits and control signals. |
7020 | ** We are expecting RESET to be TRUE and other signals to be |
7021 | ** FALSE. |
7022 | */ |
7023 | term = INB(nc_sstat0)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sstat0))))); |
7024 | term = ((term & 2) << 7) + ((term & 1) << 17); /* rst sdp0 */ |
7025 | term |= ((INB(nc_sstat2)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sstat2))))) & 0x01) << 26) | /* sdp1 */ |
7026 | ((INW(nc_sbdl)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sbdl))))) & 0xff) << 9) | /* d7-0 */ |
7027 | ((INW(nc_sbdl)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sbdl))))) & 0xff00) << 10) | /* d15-8 */ |
7028 | INB(nc_sbcl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbcl))))); /* req ack bsy sel atn msg cd io */ |
7029 | |
7030 | if (!(np->features & FE_WIDE(1<<1))) |
7031 | term &= 0x3ffff; |
7032 | |
7033 | if (term != (2<<7)) { |
7034 | printk("%s: suspicious SCSI data while resetting the BUS.\n", |
7035 | ncr_name(np)); |
7036 | printk("%s: %sdp0,d7-0,rst,req,ack,bsy,sel,atn,msg,c/d,i/o = " |
7037 | "0x%lx, expecting 0x%lx\n", |
7038 | ncr_name(np), |
7039 | (np->features & FE_WIDE(1<<1)) ? "dp1,d15-8," : "", |
7040 | (u_longunsigned long)term, (u_longunsigned long)(2<<7)); |
7041 | if (driver_setup.bus_check == 1) |
7042 | retv = 1; |
7043 | } |
7044 | out: |
7045 | OUTB (nc_scntl1, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl1))))) = (((0)))); |
7046 | return retv; |
7047 | } |
7048 | |
7049 | /*========================================================== |
7050 | ** |
7051 | ** |
7052 | ** Reset the SCSI BUS. |
7053 | ** This is called from the generic SCSI driver. |
7054 | ** |
7055 | ** |
7056 | **========================================================== |
7057 | */ |
7058 | static int ncr_reset_bus (ncb_p np, Scsi_Cmnd *cmd, int sync_reset) |
7059 | { |
7060 | /* Scsi_Device *device = cmd->device; */ |
7061 | ccb_p cp; |
7062 | int found; |
7063 | |
7064 | /* |
7065 | * Return immediately if reset is in progress. |
7066 | */ |
7067 | if (np->settle_time) { |
7068 | return SCSI_RESET_PUNT1; |
7069 | } |
7070 | /* |
7071 | * Start the reset process. |
7072 | * The script processor is then assumed to be stopped. |
7073 | * Commands will now be queued in the waiting list until a settle |
7074 | * delay of 2 seconds will be completed. |
7075 | */ |
7076 | ncr_start_reset(np); |
7077 | /* |
7078 | * First, look in the wakeup list |
7079 | */ |
7080 | for (found=0, cp=np->ccbc; cp; cp=cp->link_ccb) { |
7081 | /* |
7082 | ** look for the ccb of this command. |
7083 | */ |
7084 | if (cp->host_statusphys.header.status[1] == HS_IDLE(0)) continue; |
7085 | if (cp->cmd == cmd) { |
7086 | found = 1; |
7087 | break; |
7088 | } |
7089 | } |
7090 | /* |
7091 | * Then, look in the waiting list |
7092 | */ |
7093 | if (!found && retrieve_from_waiting_list(0, np, cmd)) |
7094 | found = 1; |
7095 | /* |
7096 | * Wake-up all awaiting commands with DID_RESET. |
7097 | */ |
7098 | reset_waiting_list(np)process_waiting_list((np), 0x08); |
7099 | /* |
7100 | * Wake-up all pending commands with HS_RESET -> DID_RESET. |
7101 | */ |
7102 | ncr_wakeup(np, HS_RESET(6|(0x80))); |
7103 | /* |
7104 | * If the involved command was not in a driver queue, and the |
7105 | * scsi driver told us reset is synchronous, and the command is not |
7106 | * currently in the waiting list, complete it with DID_RESET status, |
7107 | * in order to keep it alive. |
7108 | */ |
7109 | if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) { |
7110 | SetScsiResult(cmd, DID_RESET, 0)cmd->result = (((0x08) << 16) + ((0) & 0x7f)); |
7111 | ncr_queue_done_cmd(np, cmd); |
7112 | } |
7113 | |
7114 | return SCSI_RESET_SUCCESS2; |
7115 | } |
7116 | |
7117 | /*========================================================== |
7118 | ** |
7119 | ** |
7120 | ** Abort an SCSI command. |
7121 | ** This is called from the generic SCSI driver. |
7122 | ** |
7123 | ** |
7124 | **========================================================== |
7125 | */ |
7126 | static int ncr_abort_command (ncb_p np, Scsi_Cmnd *cmd) |
7127 | { |
7128 | /* Scsi_Device *device = cmd->device; */ |
7129 | ccb_p cp; |
7130 | |
7131 | /* |
7132 | * First, look for the scsi command in the waiting list |
7133 | */ |
7134 | if (remove_from_waiting_list(np, cmd)retrieve_from_waiting_list(1, (np), (cmd))) { |
7135 | SetScsiAbortResult(cmd)cmd->result = (((0x05) << 16) + ((0xff) & 0x7f)); |
7136 | ncr_queue_done_cmd(np, cmd); |
7137 | return SCSI_ABORT_SUCCESS1; |
7138 | } |
7139 | |
7140 | /* |
7141 | * Then, look in the wakeup list |
7142 | */ |
7143 | for (cp=np->ccbc; cp; cp=cp->link_ccb) { |
7144 | /* |
7145 | ** look for the ccb of this command. |
7146 | */ |
7147 | if (cp->host_statusphys.header.status[1] == HS_IDLE(0)) continue; |
7148 | if (cp->cmd == cmd) |
7149 | break; |
7150 | } |
7151 | |
7152 | if (!cp) { |
7153 | return SCSI_ABORT_NOT_RUNNING4; |
7154 | } |
7155 | |
7156 | /* |
7157 | ** Keep track we have to abort this job. |
7158 | */ |
7159 | cp->to_abort = 1; |
7160 | |
7161 | /* |
7162 | ** Tell the SCRIPTS processor to stop |
7163 | ** and synchronize with us. |
7164 | */ |
7165 | np->istat_sem = SEM0x10; |
7166 | |
7167 | /* |
7168 | ** If there are no requests, the script |
7169 | ** processor will sleep on SEL_WAIT_RESEL. |
7170 | ** Let's wake it up, since it may have to work. |
7171 | */ |
7172 | OUTB (nc_istat, SIGP|SEM)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20|0x10 )))); |
7173 | |
7174 | /* |
7175 | ** Tell user we are working for him. |
7176 | */ |
7177 | return SCSI_ABORT_PENDING2; |
7178 | } |
7179 | |
7180 | /*========================================================== |
7181 | ** |
7182 | ** Linux release module stuff. |
7183 | ** |
7184 | ** Called before unloading the module |
7185 | ** Detach the host. |
7186 | ** We have to free resources and halt the NCR chip |
7187 | ** |
7188 | **========================================================== |
7189 | */ |
7190 | |
7191 | #ifdef MODULE |
7192 | static int ncr_detach(ncb_p np) |
7193 | { |
7194 | int i; |
7195 | |
7196 | printk("%s: detaching ...\n", ncr_name(np)); |
7197 | |
7198 | /* |
7199 | ** Stop the ncr_timeout process |
7200 | ** Set release_stage to 1 and wait that ncr_timeout() set it to 2. |
7201 | */ |
7202 | np->release_stage = 1; |
7203 | for (i = 50 ; i && np->release_stage != 2 ; i--) MDELAY (100); |
7204 | if (np->release_stage != 2) |
7205 | printk("%s: the timer seems to be already stopped\n", |
7206 | ncr_name(np)); |
7207 | else np->release_stage = 2; |
7208 | |
7209 | /* |
7210 | ** Reset NCR chip. |
7211 | ** We should use ncr_soft_reset(), but we donnot want to do |
7212 | ** so, since we may not be safe if interrupts occur. |
7213 | */ |
7214 | |
7215 | printk("%s: resetting chip\n", ncr_name(np)); |
7216 | ncr_chip_reset(np); |
7217 | |
7218 | /* |
7219 | ** Restore bios setting for automatic clock detection. |
7220 | */ |
7221 | OUTB(nc_dmode, np->sv_dmode)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dmode))))) = (((np->sv_dmode )))); |
7222 | OUTB(nc_dcntl, np->sv_dcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dcntl))))) = (((np->sv_dcntl )))); |
7223 | OUTB(nc_ctest3, np->sv_ctest3)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = (((np-> sv_ctest3)))); |
7224 | OUTB(nc_ctest4, np->sv_ctest4)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest4))))) = (((np-> sv_ctest4)))); |
7225 | OUTB(nc_ctest5, np->sv_ctest5)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest5))))) = (((np-> sv_ctest5)))); |
7226 | OUTB(nc_gpcntl, np->sv_gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((np-> sv_gpcntl)))); |
7227 | OUTB(nc_stest2, np->sv_stest2)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest2))))) = (((np-> sv_stest2)))); |
7228 | |
7229 | ncr_selectclock(np, np->sv_scntl3); |
7230 | /* |
7231 | ** Free host resources |
7232 | */ |
7233 | ncr_free_resources(np); |
7234 | |
7235 | return 1; |
7236 | } |
7237 | #endif |
7238 | |
7239 | /*========================================================== |
7240 | ** |
7241 | ** |
7242 | ** Complete execution of a SCSI command. |
7243 | ** Signal completion to the generic SCSI driver. |
7244 | ** |
7245 | ** |
7246 | **========================================================== |
7247 | */ |
7248 | |
7249 | void ncr_complete (ncb_p np, ccb_p cp) |
7250 | { |
7251 | Scsi_Cmnd *cmd; |
7252 | tcb_p tp; |
7253 | lcb_p lp; |
7254 | |
7255 | /* |
7256 | ** Sanity check |
7257 | */ |
7258 | if (!cp || !cp->cmd) |
7259 | return; |
7260 | |
7261 | /* |
7262 | ** Print some debugging info. |
7263 | */ |
7264 | |
7265 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) |
7266 | printk ("CCB=%lx STAT=%x/%x\n", (unsigned long)cp, |
7267 | cp->host_statusphys.header.status[1],cp->scsi_statusphys.header.status[2]); |
7268 | |
7269 | /* |
7270 | ** Get command, target and lun pointers. |
7271 | */ |
7272 | |
7273 | cmd = cp->cmd; |
7274 | cp->cmd = NULL((void *) 0); |
7275 | tp = &np->target[cp->target]; |
7276 | lp = ncr_lp(np, tp, cp->lun)(!cp->lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(cp ->lun)] : 0; |
7277 | |
7278 | /* |
7279 | ** We donnot queue more than 1 ccb per target |
7280 | ** with negotiation at any time. If this ccb was |
7281 | ** used for negotiation, clear this info in the tcb. |
7282 | */ |
7283 | |
7284 | if (cp == tp->nego_cp) |
7285 | tp->nego_cp = 0; |
7286 | |
7287 | #ifdef SCSI_NCR_IARB_SUPPORT |
7288 | /* |
7289 | ** We just complete the last queued CCB. |
7290 | ** Clear this info that is no more relevant. |
7291 | */ |
7292 | if (cp == np->last_cp) |
7293 | np->last_cp = 0; |
7294 | #endif |
7295 | |
7296 | /* |
7297 | ** If auto-sense performed, change scsi status, |
7298 | ** Otherwise, compute the residual. |
7299 | */ |
7300 | if (cp->host_flagsphys.header.status[3] & HF_AUTO_SENSE(1u<<4)) { |
7301 | cp->scsi_statusphys.header.status[2] = cp->sv_scsi_status; |
7302 | cp->xerr_status = cp->sv_xerr_status; |
7303 | } |
7304 | else { |
7305 | cp->resid = 0; |
7306 | if (cp->xerr_status || |
7307 | cp->phys.header.lastp != cp->phys.header.goalp) |
7308 | cp->resid = ncr_compute_residual(np, cp); |
7309 | } |
7310 | |
7311 | /* |
7312 | ** Check for extended errors. |
7313 | */ |
7314 | |
7315 | if (cp->xerr_status) { |
7316 | if (cp->xerr_status & XE_PARITY_ERR(4)) { |
7317 | PRINT_ADDR(cmd); |
7318 | printk ("unrecovered SCSI parity error.\n"); |
7319 | } |
7320 | if (cp->xerr_status & XE_EXTRA_DATA(1)) { |
7321 | PRINT_ADDR(cmd); |
7322 | printk ("extraneous data discarded.\n"); |
7323 | } |
7324 | if (cp->xerr_status & XE_BAD_PHASE(2)) { |
7325 | PRINT_ADDR(cmd); |
7326 | printk ("illegal scsi phase (4/5).\n"); |
7327 | } |
7328 | if (cp->xerr_status & XE_SODL_UNRUN(1<<3)) { |
7329 | PRINT_ADDR(cmd); |
7330 | printk ("ODD transfer in DATA OUT phase.\n"); |
7331 | } |
7332 | if (cp->xerr_status & XE_SWIDE_OVRUN(1<<4)){ |
7333 | PRINT_ADDR(cmd); |
7334 | printk ("ODD transfer in DATA IN phase.\n"); |
7335 | } |
7336 | |
7337 | if (cp->host_statusphys.header.status[1]==HS_COMPLETE(4|(0x80))) |
7338 | cp->host_statusphys.header.status[1] = HS_FAIL(9|(0x80)); |
7339 | } |
7340 | |
7341 | /* |
7342 | ** Print out any error for debugging purpose. |
7343 | */ |
7344 | if (DEBUG_FLAGSncr_debug & (DEBUG_RESULT(0x0010)|DEBUG_TINY(0x0080))) { |
7345 | if (cp->host_statusphys.header.status[1]!=HS_COMPLETE(4|(0x80)) || cp->scsi_statusphys.header.status[2]!=S_GOOD(0x00) || |
7346 | cp->resid) { |
7347 | PRINT_ADDR(cmd); |
7348 | printk ("ERROR: cmd=%x host_status=%x scsi_status=%x " |
7349 | "data_len=%d residual=%d\n", |
7350 | cmd->cmnd[0], cp->host_statusphys.header.status[1], cp->scsi_statusphys.header.status[2], |
7351 | cp->data_len, cp->resid); |
7352 | } |
7353 | } |
7354 | |
7355 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,3,99)(((2)<<16)+((3)<<8)+(99)) |
7356 | /* |
7357 | ** Move residual byte count to user structure. |
7358 | */ |
7359 | cmd->resid = cp->resid; |
7360 | #endif |
7361 | /* |
7362 | ** Check the status. |
7363 | */ |
7364 | if ( (cp->host_statusphys.header.status[1] == HS_COMPLETE(4|(0x80))) |
7365 | && (cp->scsi_statusphys.header.status[2] == S_GOOD(0x00) || |
7366 | cp->scsi_statusphys.header.status[2] == S_COND_MET(0x04))) { |
7367 | /* |
7368 | ** All went well (GOOD status). |
7369 | ** CONDITION MET status is returned on |
7370 | ** `Pre-Fetch' or `Search data' success. |
7371 | */ |
7372 | SetScsiResult(cmd, DID_OK, cp->scsi_status)cmd->result = (((0x00) << 16) + ((cp->phys.header .status[2]) & 0x7f)); |
7373 | |
7374 | /* |
7375 | ** Allocate the lcb if not yet. |
7376 | */ |
7377 | if (!lp) |
7378 | ncr_alloc_lcb (np, cp->target, cp->lun); |
7379 | |
7380 | /* |
7381 | ** On standard INQUIRY response (EVPD and CmDt |
7382 | ** not set), setup logical unit according to |
7383 | ** announced capabilities (we need the 1rst 7 bytes). |
7384 | */ |
7385 | if (cmd->cmnd[0] == 0x12 && !(cmd->cmnd[1] & 0x3) && |
7386 | cmd->cmnd[4] >= 7 && !cmd->use_sg) { |
7387 | sync_scsi_data(np, cmd)do {; } while (0); /* SYNC the data */ |
7388 | ncr_setup_lcb (np, cp->target, cp->lun, |
7389 | (char *) cmd->request_buffer); |
7390 | } |
7391 | |
7392 | /* |
7393 | ** If tags was reduced due to queue full, |
7394 | ** increase tags if 1000 good status received. |
7395 | */ |
7396 | if (lp && lp->usetags && lp->numtags < lp->maxtags) { |
7397 | ++lp->num_good; |
7398 | if (lp->num_good >= 1000) { |
7399 | lp->num_good = 0; |
7400 | ++lp->numtags; |
7401 | ncr_setup_tags (np, cp->target, cp->lun); |
7402 | } |
7403 | } |
7404 | } else if ((cp->host_statusphys.header.status[1] == HS_COMPLETE(4|(0x80))) |
7405 | && (cp->scsi_statusphys.header.status[2] == S_CHECK_COND(0x02))) { |
7406 | /* |
7407 | ** Check condition code |
7408 | */ |
7409 | SetScsiResult(cmd, DID_OK, S_CHECK_COND)cmd->result = (((0x00) << 16) + (((0x02)) & 0x7f )); |
7410 | |
7411 | if (DEBUG_FLAGSncr_debug & (DEBUG_RESULT(0x0010)|DEBUG_TINY(0x0080))) { |
7412 | PRINT_ADDR(cmd); |
7413 | ncr_printl_hex("sense data:", cmd->sense_buffer, 14); |
7414 | } |
7415 | } else if ((cp->host_statusphys.header.status[1] == HS_COMPLETE(4|(0x80))) |
7416 | && (cp->scsi_statusphys.header.status[2] == S_CONFLICT(0x18))) { |
7417 | /* |
7418 | ** Reservation Conflict condition code |
7419 | */ |
7420 | SetScsiResult(cmd, DID_OK, S_CONFLICT)cmd->result = (((0x00) << 16) + (((0x18)) & 0x7f )); |
7421 | |
7422 | } else if ((cp->host_statusphys.header.status[1] == HS_COMPLETE(4|(0x80))) |
7423 | && (cp->scsi_statusphys.header.status[2] == S_BUSY(0x08) || |
7424 | cp->scsi_statusphys.header.status[2] == S_QUEUE_FULL(0x28))) { |
7425 | |
7426 | /* |
7427 | ** Target is busy. |
7428 | */ |
7429 | SetScsiResult(cmd, DID_OK, cp->scsi_status)cmd->result = (((0x00) << 16) + ((cp->phys.header .status[2]) & 0x7f)); |
7430 | |
7431 | } else if ((cp->host_statusphys.header.status[1] == HS_SEL_TIMEOUT(5|(0x80))) |
7432 | || (cp->host_statusphys.header.status[1] == HS_TIMEOUT(8|(0x80)))) { |
7433 | |
7434 | /* |
7435 | ** No response |
7436 | */ |
7437 | SetScsiResult(cmd, DID_TIME_OUT, cp->scsi_status)cmd->result = (((0x03) << 16) + ((cp->phys.header .status[2]) & 0x7f)); |
7438 | |
7439 | } else if (cp->host_statusphys.header.status[1] == HS_RESET(6|(0x80))) { |
7440 | |
7441 | /* |
7442 | ** SCSI bus reset |
7443 | */ |
7444 | SetScsiResult(cmd, DID_RESET, cp->scsi_status)cmd->result = (((0x08) << 16) + ((cp->phys.header .status[2]) & 0x7f)); |
7445 | |
7446 | } else if (cp->host_statusphys.header.status[1] == HS_ABORTED(7|(0x80))) { |
7447 | |
7448 | /* |
7449 | ** Transfer aborted |
7450 | */ |
7451 | SetScsiAbortResult(cmd)cmd->result = (((0x05) << 16) + ((0xff) & 0x7f)); |
7452 | |
7453 | } else { |
7454 | int did_status; |
7455 | |
7456 | /* |
7457 | ** Other protocol messes |
7458 | */ |
7459 | PRINT_ADDR(cmd); |
7460 | printk ("COMMAND FAILED (%x %x) @%p.\n", |
7461 | cp->host_statusphys.header.status[1], cp->scsi_statusphys.header.status[2], cp); |
7462 | |
7463 | did_status = DID_ERROR0x07; |
7464 | if (cp->xerr_status & XE_PARITY_ERR(4)) |
7465 | did_status = DID_PARITY0x06; |
7466 | |
7467 | SetScsiResult(cmd, did_status, cp->scsi_status)cmd->result = (((did_status) << 16) + ((cp->phys. header.status[2]) & 0x7f)); |
7468 | } |
7469 | |
7470 | /* |
7471 | ** trace output |
7472 | */ |
7473 | |
7474 | if (tp->usrflag & UF_TRACE(0x01)) { |
7475 | PRINT_ADDR(cmd); |
7476 | printk (" CMD:"); |
7477 | ncr_print_hex(cmd->cmnd, cmd->cmd_len); |
7478 | |
7479 | if (cp->host_statusphys.header.status[1]==HS_COMPLETE(4|(0x80))) { |
7480 | switch (cp->scsi_statusphys.header.status[2]) { |
7481 | case S_GOOD(0x00): |
7482 | printk (" GOOD"); |
7483 | break; |
7484 | case S_CHECK_COND(0x02): |
7485 | printk (" SENSE:"); |
7486 | ncr_print_hex(cmd->sense_buffer, 14); |
7487 | break; |
7488 | default: |
7489 | printk (" STAT: %x\n", cp->scsi_statusphys.header.status[2]); |
7490 | break; |
7491 | } |
7492 | } else printk (" HOSTERROR: %x", cp->host_statusphys.header.status[1]); |
7493 | printk ("\n"); |
7494 | } |
7495 | |
7496 | /* |
7497 | ** Free this ccb |
7498 | */ |
7499 | ncr_free_ccb (np, cp); |
7500 | |
7501 | /* |
7502 | ** requeue awaiting scsi commands for this lun. |
7503 | */ |
7504 | if (lp && lp->queuedccbs < lp->queuedepth && |
7505 | !xpt_que_empty(&lp->wait_ccbq)) |
7506 | ncr_start_next_ccb(np, lp, 2); |
7507 | |
7508 | /* |
7509 | ** requeue awaiting scsi commands for this controller. |
7510 | */ |
7511 | if (np->waiting_list) |
7512 | requeue_waiting_list(np)process_waiting_list((np), 0x00); |
7513 | |
7514 | /* |
7515 | ** signal completion to generic driver. |
7516 | */ |
7517 | ncr_queue_done_cmd(np, cmd); |
7518 | } |
7519 | |
7520 | /*========================================================== |
7521 | ** |
7522 | ** |
7523 | ** Signal all (or one) control block done. |
7524 | ** |
7525 | ** |
7526 | **========================================================== |
7527 | */ |
7528 | |
7529 | /* |
7530 | ** The NCR has completed CCBs. |
7531 | ** Look at the DONE QUEUE. |
7532 | ** |
7533 | ** On architectures that may reorder LOAD/STORE operations, |
7534 | ** a memory barrier may be needed after the reading of the |
7535 | ** so-called `flag' and prior to dealing with the data. |
7536 | */ |
7537 | int ncr_wakeup_done (ncb_p np) |
7538 | { |
7539 | ccb_p cp; |
7540 | int i, n; |
7541 | u_longunsigned long dsa; |
7542 | |
7543 | n = 0; |
7544 | i = np->dqueueget; |
7545 | while (1) { |
7546 | dsa = scr_to_cpu(np->dqueue[i])(np->dqueue[i]); |
7547 | if (!dsa) |
7548 | break; |
7549 | np->dqueue[i] = 0; |
7550 | if ((i = i+2) >= MAX_START((8*(8) + 2*(16)) + 4)*2) |
7551 | i = 0; |
7552 | |
7553 | cp = ncr_ccb_from_dsa(np, dsa); |
7554 | if (cp) { |
7555 | MEMORY_BARRIER()do { ; } while(0); |
7556 | ncr_complete (np, cp); |
7557 | ++n; |
7558 | } |
7559 | else |
7560 | printk (KERN_ERR"<3>" "%s: bad DSA (%lx) in done queue.\n", |
7561 | ncr_name(np), dsa); |
7562 | } |
7563 | np->dqueueget = i; |
7564 | |
7565 | return n; |
7566 | } |
7567 | |
7568 | /* |
7569 | ** Complete all active CCBs. |
7570 | */ |
7571 | void ncr_wakeup (ncb_p np, u_longunsigned long code) |
7572 | { |
7573 | ccb_p cp = np->ccbc; |
7574 | |
7575 | while (cp) { |
7576 | if (cp->host_statusphys.header.status[1] != HS_IDLE(0)) { |
7577 | cp->host_statusphys.header.status[1] = code; |
7578 | ncr_complete (np, cp); |
7579 | } |
7580 | cp = cp->link_ccb; |
7581 | } |
7582 | } |
7583 | |
7584 | /*========================================================== |
7585 | ** |
7586 | ** |
7587 | ** Start NCR chip. |
7588 | ** |
7589 | ** |
7590 | **========================================================== |
7591 | */ |
7592 | |
7593 | void ncr_init (ncb_p np, int reset, char * msg, u_longunsigned long code) |
7594 | { |
7595 | int i; |
7596 | u_longunsigned long phys; |
7597 | |
7598 | /* |
7599 | ** Reset chip if asked, otherwise just clear fifos. |
7600 | */ |
7601 | |
7602 | if (reset) |
7603 | ncr_soft_reset(np); |
7604 | else { |
7605 | OUTB (nc_stest3, TE|CSF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80|0x02 )))); |
7606 | OUTONB (nc_ctest3, CLF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest3))))) | (0x04))))); |
7607 | } |
7608 | |
7609 | /* |
7610 | ** Message. |
7611 | */ |
7612 | |
7613 | if (msg) printk (KERN_INFO"<6>" "%s: restart (%s).\n", ncr_name (np), msg); |
7614 | |
7615 | /* |
7616 | ** Clear Start Queue |
7617 | */ |
7618 | phys = np->p_squeue; |
7619 | np->queuedepth = MAX_START((8*(8) + 2*(16)) + 4) - 1; /* 1 entry needed as end marker */ |
7620 | for (i = 0; i < MAX_START((8*(8) + 2*(16)) + 4)*2; i += 2) { |
7621 | np->squeue[i] = cpu_to_scr(np->p_idletask)(np->p_idletask); |
7622 | np->squeue[i+1] = cpu_to_scr(phys + (i+2)*4)(phys + (i+2)*4); |
7623 | } |
7624 | np->squeue[MAX_START((8*(8) + 2*(16)) + 4)*2-1] = cpu_to_scr(phys)(phys); |
7625 | |
7626 | |
7627 | /* |
7628 | ** Start at first entry. |
7629 | */ |
7630 | np->squeueput = 0; |
7631 | np->scripth0->startpos[0] = cpu_to_scr(phys)(phys); |
7632 | |
7633 | /* |
7634 | ** Clear Done Queue |
7635 | */ |
7636 | phys = vtobus(np->dqueue)virt_to_phys(np->dqueue); |
7637 | for (i = 0; i < MAX_START((8*(8) + 2*(16)) + 4)*2; i += 2) { |
7638 | np->dqueue[i] = 0; |
7639 | np->dqueue[i+1] = cpu_to_scr(phys + (i+2)*4)(phys + (i+2)*4); |
7640 | } |
7641 | np->dqueue[MAX_START((8*(8) + 2*(16)) + 4)*2-1] = cpu_to_scr(phys)(phys); |
7642 | |
7643 | /* |
7644 | ** Start at first entry. |
7645 | */ |
7646 | np->scripth0->done_pos[0] = cpu_to_scr(phys)(phys); |
7647 | np->dqueueget = 0; |
7648 | |
7649 | /* |
7650 | ** Wakeup all pending jobs. |
7651 | */ |
7652 | ncr_wakeup (np, code); |
7653 | |
7654 | /* |
7655 | ** Init chip. |
7656 | */ |
7657 | |
7658 | OUTB (nc_istat, 0x00 )((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x00)))); /* Remove Reset, abort */ |
7659 | UDELAY (2000); /* The 895 needs time for the bus mode to settle */ |
7660 | |
7661 | OUTB (nc_scntl0, np->rv_scntl0 | 0xc0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl0))))) = (((np-> rv_scntl0 | 0xc0)))); |
7662 | /* full arb., ena parity, par->ATN */ |
7663 | OUTB (nc_scntl1, 0x00)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl1))))) = (((0x00))) ); /* odd parity, and remove CRST!! */ |
7664 | |
7665 | ncr_selectclock(np, np->rv_scntl3); /* Select SCSI clock */ |
7666 | |
7667 | OUTB (nc_scid , RRE|np->myaddr)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scid))))) = (((0x40|np-> myaddr)))); /* Adapter SCSI address */ |
7668 | OUTW (nc_respid, 1ul<<np->myaddr)((*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_respid))))) = (((1ul<< np->myaddr)))); /* Id to respond to */ |
7669 | OUTB (nc_istat , SIGP )((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20)))); /* Signal Process */ |
7670 | OUTB (nc_dmode , np->rv_dmode)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dmode))))) = (((np->rv_dmode )))); /* Burst length, dma mode */ |
7671 | OUTB (nc_ctest5, np->rv_ctest5)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest5))))) = (((np-> rv_ctest5)))); /* Large fifo + large burst */ |
7672 | |
7673 | OUTB (nc_dcntl , NOCOM|np->rv_dcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dcntl))))) = (((0x01|np-> rv_dcntl)))); /* Protect SFBR */ |
7674 | OUTB (nc_ctest3, np->rv_ctest3)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = (((np-> rv_ctest3)))); /* Write and invalidate */ |
7675 | OUTB (nc_ctest4, np->rv_ctest4)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest4))))) = (((np-> rv_ctest4)))); /* Master parity checking */ |
7676 | |
7677 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
7678 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)){ |
7679 | OUTB (nc_stest2, EXT|np->rv_stest2)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest2))))) = (((0x02|np ->rv_stest2)))); |
7680 | /* Extended Sreq/Sack filtering, not supported in C1010/C1010_66 */ |
7681 | } |
7682 | OUTB (nc_stest3, TE)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80))) ); /* TolerANT enable */ |
7683 | OUTB (nc_stime0, 0x0c)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stime0))))) = (((0x0c))) ); /* HTH disabled STO 0.25 sec */ |
7684 | |
7685 | /* |
7686 | ** DEL 441 - 53C876 Rev 5 - Part Number 609-0392787/2788 - ITEM 2. |
7687 | ** Disable overlapped arbitration for all dual-function |
7688 | ** devices, regardless revision id. |
7689 | ** We may consider it is a post-chip-design feature. ;-) |
7690 | ** |
7691 | ** Errata applies to all 896 and 1010 parts. |
7692 | */ |
7693 | if (np->device_id == PCI_DEVICE_ID_NCR_53C8750x000f) |
7694 | OUTB (nc_ctest0, (1<<5))((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest0))))) = ((((1<< 5))))); |
7695 | else if (np->device_id == PCI_DEVICE_ID_NCR_53C8960x000b || |
7696 | np->device_id == PCI_DEVICE_ID_LSI_53C10100x20 || |
7697 | np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21 ) |
7698 | np->rv_ccntl0 |= DPR0x01; |
7699 | |
7700 | /* |
7701 | ** C1010_66MHz rev 0 part requies AIPCNTL1 bit 3 to be set. |
7702 | */ |
7703 | if (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21) |
7704 | OUTB(nc_aipcntl1, (1<<3))((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_aipcntl1))))) = ((((1<< 3))))); |
7705 | |
7706 | /* |
7707 | ** If 64 bit (895A/896/1010/1010_66) write the CCNTL1 register to |
7708 | ** enable 40 bit address table indirect addressing for MOVE. |
7709 | ** Also write CCNTL0 if 64 bit chip, since this register seems |
7710 | ** to only be used by 64 bit cores. |
7711 | */ |
7712 | if (np->features & FE_64BIT(1<<17)) { |
7713 | OUTB (nc_ccntl0, np->rv_ccntl0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ccntl0))))) = (((np-> rv_ccntl0)))); |
7714 | OUTB (nc_ccntl1, np->rv_ccntl1)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ccntl1))))) = (((np-> rv_ccntl1)))); |
7715 | } |
7716 | |
7717 | /* |
7718 | ** If phase mismatch handled by scripts (53C895A or 53C896 |
7719 | ** or 53C1010 or 53C1010_66), set PM jump addresses. |
7720 | */ |
7721 | |
7722 | if (np->features & FE_NOPM(1<<19)) { |
7723 | printk(KERN_INFO"<6>" "%s: handling phase mismatch from SCRIPTS.\n", |
7724 | ncr_name(np)); |
7725 | OUTL (nc_pmjad1, NCB_SCRIPTH_PHYS (np, pm_handle))((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_pmjad1))))) = ((((np-> p_scripth + ((size_t) (&((struct scripth *)0)->pm_handle ))))))); |
7726 | OUTL (nc_pmjad2, NCB_SCRIPTH_PHYS (np, pm_handle))((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_pmjad2))))) = ((((np-> p_scripth + ((size_t) (&((struct scripth *)0)->pm_handle ))))))); |
7727 | } |
7728 | |
7729 | /* |
7730 | ** Enable GPIO0 pin for writing if LED support from SCRIPTS. |
7731 | ** Also set GPIO5 and clear GPIO6 if hardware LED control. |
7732 | */ |
7733 | |
7734 | if (np->features & FE_LED0(1<<0)) |
7735 | OUTB(nc_gpcntl, INB(nc_gpcntl) & ~0x01)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpcntl))))) & ~0x01)))); |
7736 | else if (np->features & FE_LEDC(1<<20)) |
7737 | OUTB(nc_gpcntl, (INB(nc_gpcntl) & ~0x41) | 0x20)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpcntl))))) & ~0x41) | 0x20)))); |
7738 | |
7739 | |
7740 | /* |
7741 | ** enable ints |
7742 | */ |
7743 | |
7744 | OUTW (nc_sien , STO|HTH|MA|SGE|UDC|RST|PAR)((*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sien))))) = (((0x0400|0x0100 |0x80|0x08|0x04|0x02|0x01)))); |
7745 | OUTB (nc_dien , MDPE|BF|SSI|SIR|IID)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dien))))) = (((0x40|0x20 |0x08|0x04|0x01)))); |
7746 | |
7747 | /* |
7748 | ** For 895/895A/896/c1010 |
7749 | ** Enable SBMC interrupt and save current SCSI bus mode. |
7750 | */ |
7751 | if ( (np->features & FE_ULTRA2(1<<3)) || (np->features & FE_ULTRA3(1<<22)) ) { |
7752 | OUTONW (nc_sien, SBMC)((*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sien))))) = ((((*(volatile unsigned short *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sien))))) | (0x1000))))); |
7753 | np->scsi_mode = INB (nc_stest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest4))))) & SMODE0xc0; |
7754 | } |
7755 | |
7756 | /* |
7757 | ** Fill in target structure. |
7758 | ** Reinitialize usrsync. |
7759 | ** Reinitialize usrwide. |
7760 | ** Prepare sync negotiation according to actual SCSI bus mode. |
7761 | */ |
7762 | |
7763 | for (i=0;i<MAX_TARGET((16));i++) { |
7764 | tcb_p tp = &np->target[i]; |
7765 | |
7766 | tp->to_reset = 0; |
7767 | |
7768 | tp->sval = 0; |
7769 | tp->wval = np->rv_scntl3; |
7770 | tp->uval = np->rv_scntl4; |
7771 | |
7772 | if (tp->usrsync != 255) { |
7773 | if (tp->usrsync <= np->maxsync) { |
7774 | if (tp->usrsync < np->minsync) { |
7775 | tp->usrsync = np->minsync; |
7776 | } |
7777 | } |
7778 | else |
7779 | tp->usrsync = 255; |
7780 | }; |
7781 | |
7782 | if (tp->usrwide > np->maxwide) |
7783 | tp->usrwide = np->maxwide; |
7784 | |
7785 | ncr_negotiate (np, tp); |
7786 | } |
7787 | |
7788 | /* |
7789 | ** Download SCSI SCRIPTS to on-chip RAM if present, |
7790 | ** and start script processor. |
7791 | ** We do the download preferently from the CPU. |
7792 | ** For platforms that may not support PCI memory mapping, |
7793 | ** we use a simple SCRIPTS that performs MEMORY MOVEs. |
7794 | */ |
7795 | if (np->base2_ba) { |
7796 | if (bootverbose(np->verbose)) |
7797 | printk ("%s: Downloading SCSI SCRIPTS.\n", |
7798 | ncr_name(np)); |
7799 | #ifdef SCSI_NCR_PCI_MEM_NOT_SUPPORTED |
7800 | if (np->base2_ws == 8192) |
7801 | phys = NCB_SCRIPTH0_PHYS (np, start_ram64)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> start_ram64))); |
7802 | else |
7803 | phys = NCB_SCRIPTH_PHYS (np, start_ram)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> start_ram))); |
7804 | #else |
7805 | if (np->base2_ws == 8192) { |
7806 | memcpy_to_pci(np->base2_va + 4096,(__builtin_constant_p(((sizeof(struct scripth)))) ? __constant_memcpy (((void *)((np->base2_va + 4096))),(((np->scripth0))),( ((sizeof(struct scripth))))) : __memcpy(((void *)((np->base2_va + 4096))),(((np->scripth0))),(((sizeof(struct scripth)))) )) |
7807 | np->scripth0, sizeof(struct scripth))(__builtin_constant_p(((sizeof(struct scripth)))) ? __constant_memcpy (((void *)((np->base2_va + 4096))),(((np->scripth0))),( ((sizeof(struct scripth))))) : __memcpy(((void *)((np->base2_va + 4096))),(((np->scripth0))),(((sizeof(struct scripth)))) )); |
7808 | OUTL (nc_mmws, np->scr_ram_seg)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_mmws))))) = (((np->scr_ram_seg )))); |
7809 | OUTL (nc_mmrs, np->scr_ram_seg)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_mmrs))))) = (((np->scr_ram_seg )))); |
7810 | OUTL (nc_sfs, np->scr_ram_seg)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sfs))))) = (((np->scr_ram_seg )))); |
7811 | phys = NCB_SCRIPTH_PHYS (np, start64)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> start64))); |
7812 | } |
7813 | else |
7814 | phys = NCB_SCRIPT_PHYS (np, init)(np->p_script + ((size_t) (&((struct script *)0)->init ))); |
7815 | memcpy_to_pci(np->base2_va, np->script0, sizeof(struct script))(__builtin_constant_p(((sizeof(struct script)))) ? __constant_memcpy (((void *)((np->base2_va))),(((np->script0))),(((sizeof (struct script))))) : __memcpy(((void *)((np->base2_va))), (((np->script0))),(((sizeof(struct script)))))); |
7816 | #endif /* SCSI_NCR_PCI_MEM_NOT_SUPPORTED */ |
7817 | } |
7818 | else |
7819 | phys = NCB_SCRIPT_PHYS (np, init)(np->p_script + ((size_t) (&((struct script *)0)->init ))); |
7820 | |
7821 | np->istat_sem = 0; |
7822 | |
7823 | OUTL (nc_dsa, np->p_ncb)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsa))))) = (((np->p_ncb )))); |
7824 | OUTL_DSP (phys)do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = ((((phys))))); } while (0); |
7825 | } |
7826 | |
7827 | /*========================================================== |
7828 | ** |
7829 | ** Prepare the negotiation values for wide and |
7830 | ** synchronous transfers. |
7831 | ** |
7832 | **========================================================== |
7833 | */ |
7834 | |
7835 | static void ncr_negotiate (struct ncb* np, struct tcb* tp) |
7836 | { |
7837 | /* |
7838 | ** minsync unit is 4ns ! |
7839 | */ |
7840 | |
7841 | u_longunsigned long minsync = tp->usrsync; |
7842 | |
7843 | /* |
7844 | ** SCSI bus mode limit |
7845 | */ |
7846 | |
7847 | if (np->scsi_mode && np->scsi_mode == SMODE_SE0x80) { |
7848 | if (minsync < 12) minsync = 12; |
7849 | } |
7850 | |
7851 | /* |
7852 | ** our limit .. |
7853 | */ |
7854 | |
7855 | if (minsync < np->minsync) |
7856 | minsync = np->minsync; |
7857 | |
7858 | /* |
7859 | ** divider limit |
7860 | */ |
7861 | |
7862 | if (minsync > np->maxsync) |
7863 | minsync = 255; |
7864 | |
7865 | tp->minsync = minsync; |
7866 | tp->maxoffs = (minsync<255 ? np->maxoffs : 0); |
7867 | |
7868 | /* |
7869 | ** period=0: has to negotiate sync transfer |
7870 | */ |
7871 | |
7872 | tp->period=0; |
7873 | |
7874 | /* |
7875 | ** widedone=0: has to negotiate wide transfer |
7876 | */ |
7877 | tp->widedone=0; |
7878 | } |
7879 | |
7880 | /*========================================================== |
7881 | ** |
7882 | ** Get clock factor and sync divisor for a given |
7883 | ** synchronous factor period. |
7884 | ** Returns the clock factor (in sxfer) and scntl3 |
7885 | ** synchronous divisor field. |
7886 | ** |
7887 | **========================================================== |
7888 | */ |
7889 | |
7890 | static void ncr_getsync(ncb_p np, u_charunsigned char sfac, u_charunsigned char *fakp, u_charunsigned char *scntl3p) |
7891 | { |
7892 | u_longunsigned long clk = np->clock_khz; /* SCSI clock frequency in kHz */ |
7893 | int div = np->clock_divn; /* Number of divisors supported */ |
7894 | u_longunsigned long fak; /* Sync factor in sxfer */ |
7895 | u_longunsigned long per; /* Period in tenths of ns */ |
7896 | u_longunsigned long kpc; /* (per * clk) */ |
7897 | |
7898 | /* |
7899 | ** Compute the synchronous period in tenths of nano-seconds |
7900 | ** from sfac. |
7901 | ** |
7902 | ** Note, if sfac == 9, DT is being used. Double the period of 125 |
7903 | ** to 250. |
7904 | */ |
7905 | if (sfac <= 10) per = 250; |
7906 | else if (sfac == 11) per = 303; |
7907 | else if (sfac == 12) per = 500; |
7908 | else per = 40 * sfac; |
7909 | |
7910 | /* |
7911 | ** Look for the greatest clock divisor that allows an |
7912 | ** input speed faster than the period. |
7913 | */ |
7914 | kpc = per * clk; |
7915 | while (--div >= 0) |
7916 | if (kpc >= (div_10M[div] << 2)) break; |
7917 | |
7918 | /* |
7919 | ** Calculate the lowest clock factor that allows an output |
7920 | ** speed not faster than the period. |
7921 | */ |
7922 | fak = (kpc - 1) / div_10M[div] + 1; |
7923 | |
7924 | #if 0 /* This optimization does not seem very usefull */ |
7925 | |
7926 | per = (fak * div_10M[div]) / clk; |
7927 | |
7928 | /* |
7929 | ** Why not to try the immediate lower divisor and to choose |
7930 | ** the one that allows the fastest output speed ? |
7931 | ** We dont want input speed too much greater than output speed. |
7932 | */ |
7933 | if (div >= 1 && fak < 8) { |
7934 | u_longunsigned long fak2, per2; |
7935 | fak2 = (kpc - 1) / div_10M[div-1] + 1; |
7936 | per2 = (fak2 * div_10M[div-1]) / clk; |
7937 | if (per2 < per && fak2 <= 8) { |
7938 | fak = fak2; |
7939 | per = per2; |
7940 | --div; |
7941 | } |
7942 | } |
7943 | #endif |
7944 | |
7945 | if (fak < 4) fak = 4; /* Should never happen, too bad ... */ |
7946 | |
7947 | /* |
7948 | ** Compute and return sync parameters for the ncr |
7949 | */ |
7950 | *fakp = fak - 4; |
7951 | |
7952 | /* |
7953 | ** If sfac < 25, and 8xx parts, desire that the chip operate at |
7954 | ** least at Ultra speeds. Must set bit 7 of scntl3. |
7955 | ** For C1010, do not set this bit. If operating at Ultra3 speeds, |
7956 | ** set the U3EN bit instead. |
7957 | */ |
7958 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
7959 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
7960 | *scntl3p = (div+1) << 4; |
7961 | *fakp = 0; |
7962 | } |
7963 | else { |
7964 | *scntl3p = ((div+1) << 4) + (sfac < 25 ? 0x80 : 0); |
7965 | *fakp = fak - 4; |
7966 | } |
7967 | } |
7968 | |
7969 | /*========================================================== |
7970 | ** |
7971 | ** Utility routine to return the current bus width |
7972 | ** synchronous period and offset. |
7973 | ** Utilizes target sval, wval and uval |
7974 | ** |
7975 | **========================================================== |
7976 | */ |
7977 | static void ncr_get_xfer_info(ncb_p np, tcb_p tp, u_charunsigned char *factor, |
7978 | u_charunsigned char *offset, u_charunsigned char *width) |
7979 | { |
7980 | |
7981 | u_charunsigned char idiv; |
7982 | u_longunsigned long period; |
7983 | |
7984 | *width = (tp->wval & EWS0x08) ? 1 : 0; |
7985 | |
7986 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
7987 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) |
7988 | *offset = (tp->sval & 0x3f); |
7989 | else |
7990 | *offset = (tp->sval & 0x1f); |
7991 | |
7992 | /* |
7993 | * Midlayer signal to the driver that all of the scsi commands |
7994 | * for the integrity check have completed. Save the negotiated |
7995 | * parameters (extracted from sval, wval and uval). |
7996 | * See ncr_setsync for alg. details. |
7997 | */ |
7998 | |
7999 | idiv = (tp->wval>>4) & 0x07; |
8000 | |
8001 | if ( *offset && idiv ) { |
8002 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8003 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)){ |
8004 | if (tp->uval & 0x80) |
8005 | period = (2*div_10M[idiv-1])/np->clock_khz; |
8006 | else |
8007 | period = (4*div_10M[idiv-1])/np->clock_khz; |
8008 | } |
8009 | else |
8010 | period = (((tp->sval>>5)+4)*div_10M[idiv-1])/np->clock_khz; |
8011 | } |
8012 | else |
8013 | period = 0xffff; |
8014 | |
8015 | if (period <= 125) *factor = 9; |
8016 | else if (period <= 250) *factor = 10; |
8017 | else if (period <= 303) *factor = 11; |
8018 | else if (period <= 500) *factor = 12; |
8019 | else *factor = (period + 40 - 1) / 40; |
8020 | |
8021 | } |
8022 | |
8023 | |
8024 | /*========================================================== |
8025 | ** |
8026 | ** Set actual values, sync status and patch all ccbs of |
8027 | ** a target according to new sync/wide agreement. |
8028 | ** |
8029 | **========================================================== |
8030 | */ |
8031 | |
8032 | static void ncr_set_sync_wide_status (ncb_p np, u_charunsigned char target) |
8033 | { |
8034 | ccb_p cp = np->ccbc; |
Value stored to 'cp' during its initialization is never read | |
8035 | tcb_p tp = &np->target[target]; |
8036 | |
8037 | /* |
8038 | ** set actual value and sync_status |
8039 | ** |
8040 | ** TEMP register contains current scripts address |
8041 | ** which is data type/direction/dependent. |
8042 | */ |
8043 | OUTB (nc_sxfer, tp->sval)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sxfer))))) = (((tp->sval )))); |
8044 | OUTB (nc_scntl3, tp->wval)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl3))))) = (((tp-> wval)))); |
8045 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8046 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) |
8047 | OUTB (nc_scntl4, tp->uval)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl4))))) = (((tp-> uval)))); |
8048 | |
8049 | /* |
8050 | ** patch ALL ccbs of this target. |
8051 | */ |
8052 | for (cp = np->ccbc; cp; cp = cp->link_ccb) { |
8053 | if (cp->host_statusphys.header.status[1] == HS_IDLE(0)) |
8054 | continue; |
8055 | if (cp->target != target) |
8056 | continue; |
8057 | cp->phys.select.sel_scntl3 = tp->wval; |
8058 | cp->phys.select.sel_sxfer = tp->sval; |
8059 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8060 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) |
8061 | cp->phys.select.sel_scntl4 = tp->uval; |
8062 | }; |
8063 | } |
8064 | |
8065 | /*========================================================== |
8066 | ** |
8067 | ** Switch sync mode for current job and it's target |
8068 | ** |
8069 | **========================================================== |
8070 | */ |
8071 | |
8072 | static void ncr_setsync (ncb_p np, ccb_p cp, u_charunsigned char scntl3, u_charunsigned char sxfer, |
8073 | u_charunsigned char scntl4) |
8074 | { |
8075 | tcb_p tp; |
8076 | u_charunsigned char target = INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0x0f; |
8077 | u_charunsigned char idiv; |
8078 | u_charunsigned char offset; |
8079 | |
8080 | assert (cp){ if (!(cp)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "cp", "../linux/src/drivers/scsi/sym53c8xx.c", 8080); } }; |
8081 | if (!cp) return; |
8082 | |
8083 | assert (target == (cp->target & 0xf)){ if (!(target == (cp->target & 0xf))) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "target == (cp->target & 0xf)", "../linux/src/drivers/scsi/sym53c8xx.c" , 8083); } }; |
8084 | |
8085 | tp = &np->target[target]; |
8086 | |
8087 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8088 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
8089 | offset = sxfer & 0x3f; /* bits 5-0 */ |
8090 | scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS0x08); |
8091 | scntl4 = (scntl4 & 0x80); |
8092 | } |
8093 | else { |
8094 | offset = sxfer & 0x1f; /* bits 4-0 */ |
8095 | if (!scntl3 || !offset) |
8096 | scntl3 = np->rv_scntl3; |
8097 | |
8098 | scntl3 = (scntl3 & 0xf0) | (tp->wval & EWS0x08) | |
8099 | (np->rv_scntl3 & 0x07); |
8100 | } |
8101 | |
8102 | |
8103 | /* |
8104 | ** Deduce the value of controller sync period from scntl3. |
8105 | ** period is in tenths of nano-seconds. |
8106 | */ |
8107 | |
8108 | idiv = ((scntl3 >> 4) & 0x7); |
8109 | if ( offset && idiv) { |
8110 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8111 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
8112 | /* Note: If extra data hold clocks are used, |
8113 | * the formulas below must be modified. |
8114 | * When scntl4 == 0, ST mode. |
8115 | */ |
8116 | if (scntl4 & 0x80) |
8117 | tp->period = (2*div_10M[idiv-1])/np->clock_khz; |
8118 | else |
8119 | tp->period = (4*div_10M[idiv-1])/np->clock_khz; |
8120 | } |
8121 | else |
8122 | tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz; |
8123 | } |
8124 | else |
8125 | tp->period = 0xffff; |
8126 | |
8127 | |
8128 | /* |
8129 | ** Stop there if sync parameters are unchanged |
8130 | */ |
8131 | if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return; |
8132 | tp->sval = sxfer; |
8133 | tp->wval = scntl3; |
8134 | tp->uval = scntl4; |
8135 | |
8136 | /* |
8137 | ** Bells and whistles ;-) |
8138 | ** Donnot announce negotiations due to auto-sense, |
8139 | ** unless user really want us to be verbose. :) |
8140 | */ |
8141 | if ( bootverbose(np->verbose) < 2 && (cp->host_flagsphys.header.status[3] & HF_AUTO_SENSE(1u<<4))) |
8142 | goto next; |
8143 | PRINT_TARGET(np, target); |
8144 | if (offset) { |
8145 | unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0); |
8146 | unsigned mb10 = (f10 + tp->period/2) / tp->period; |
8147 | char *scsi; |
8148 | |
8149 | /* |
8150 | ** Disable extended Sreq/Sack filtering |
8151 | */ |
8152 | if ((tp->period <= 2000) && |
8153 | (np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
8154 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
8155 | OUTOFFB (nc_stest2, EXT)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest2))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest2))))) & ~(0x02))))); |
8156 | |
8157 | /* |
8158 | ** Bells and whistles ;-) |
8159 | */ |
8160 | if (tp->period < 250) scsi = "FAST-80"; |
8161 | else if (tp->period < 500) scsi = "FAST-40"; |
8162 | else if (tp->period < 1000) scsi = "FAST-20"; |
8163 | else if (tp->period < 2000) scsi = "FAST-10"; |
8164 | else scsi = "FAST-5"; |
8165 | |
8166 | printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi, |
8167 | tp->widedone > 1 ? "WIDE " : "", |
8168 | mb10 / 10, mb10 % 10, tp->period / 10, offset); |
8169 | } else |
8170 | printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : ""); |
8171 | next: |
8172 | /* |
8173 | ** set actual value and sync_status |
8174 | ** patch ALL ccbs of this target. |
8175 | */ |
8176 | ncr_set_sync_wide_status(np, target); |
8177 | } |
8178 | |
8179 | |
8180 | /*========================================================== |
8181 | ** |
8182 | ** Switch wide mode for current job and it's target |
8183 | ** SCSI specs say: a SCSI device that accepts a WDTR |
8184 | ** message shall reset the synchronous agreement to |
8185 | ** asynchronous mode. |
8186 | ** |
8187 | **========================================================== |
8188 | */ |
8189 | |
8190 | static void ncr_setwide (ncb_p np, ccb_p cp, u_charunsigned char wide, u_charunsigned char ack) |
8191 | { |
8192 | u_shortunsigned short target = INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0x0f; |
8193 | tcb_p tp; |
8194 | u_charunsigned char scntl3; |
8195 | u_charunsigned char sxfer; |
8196 | |
8197 | assert (cp){ if (!(cp)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "cp", "../linux/src/drivers/scsi/sym53c8xx.c", 8197); } }; |
8198 | if (!cp) return; |
8199 | |
8200 | assert (target == (cp->target & 0xf)){ if (!(target == (cp->target & 0xf))) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "target == (cp->target & 0xf)", "../linux/src/drivers/scsi/sym53c8xx.c" , 8200); } }; |
8201 | |
8202 | tp = &np->target[target]; |
8203 | tp->widedone = wide+1; |
8204 | scntl3 = (tp->wval & (~EWS0x08)) | (wide ? EWS0x08 : 0); |
8205 | |
8206 | sxfer = ack ? 0 : tp->sval; |
8207 | |
8208 | /* |
8209 | ** Stop there if sync/wide parameters are unchanged |
8210 | */ |
8211 | if (tp->sval == sxfer && tp->wval == scntl3) return; |
8212 | tp->sval = sxfer; |
8213 | tp->wval = scntl3; |
8214 | |
8215 | /* |
8216 | ** Bells and whistles ;-) |
8217 | */ |
8218 | if (bootverbose(np->verbose) >= 2) { |
8219 | PRINT_TARGET(np, target); |
8220 | if (scntl3 & EWS0x08) |
8221 | printk ("WIDE SCSI (16 bit) enabled.\n"); |
8222 | else |
8223 | printk ("WIDE SCSI disabled.\n"); |
8224 | } |
8225 | |
8226 | /* |
8227 | ** set actual value and sync_status |
8228 | ** patch ALL ccbs of this target. |
8229 | */ |
8230 | ncr_set_sync_wide_status(np, target); |
8231 | } |
8232 | |
8233 | |
8234 | /*========================================================== |
8235 | ** |
8236 | ** Switch sync/wide mode for current job and it's target |
8237 | ** PPR negotiations only |
8238 | ** |
8239 | **========================================================== |
8240 | */ |
8241 | |
8242 | static void ncr_setsyncwide (ncb_p np, ccb_p cp, u_charunsigned char scntl3, u_charunsigned char sxfer, |
8243 | u_charunsigned char scntl4, u_charunsigned char wide) |
8244 | { |
8245 | tcb_p tp; |
8246 | u_charunsigned char target = INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0x0f; |
8247 | u_charunsigned char idiv; |
8248 | u_charunsigned char offset; |
8249 | |
8250 | assert (cp){ if (!(cp)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "cp", "../linux/src/drivers/scsi/sym53c8xx.c", 8250); } }; |
8251 | if (!cp) return; |
8252 | |
8253 | assert (target == (cp->target & 0xf)){ if (!(target == (cp->target & 0xf))) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "target == (cp->target & 0xf)", "../linux/src/drivers/scsi/sym53c8xx.c" , 8253); } }; |
8254 | |
8255 | tp = &np->target[target]; |
8256 | tp->widedone = wide+1; |
8257 | |
8258 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8259 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
8260 | offset = sxfer & 0x3f; /* bits 5-0 */ |
8261 | scntl3 = (scntl3 & 0xf0) | (wide ? EWS0x08 : 0); |
8262 | scntl4 = (scntl4 & 0x80); |
8263 | } |
8264 | else { |
8265 | offset = sxfer & 0x1f; /* bits 4-0 */ |
8266 | if (!scntl3 || !offset) |
8267 | scntl3 = np->rv_scntl3; |
8268 | |
8269 | scntl3 = (scntl3 & 0xf0) | (wide ? EWS0x08 : 0) | |
8270 | (np->rv_scntl3 & 0x07); |
8271 | } |
8272 | |
8273 | |
8274 | /* |
8275 | ** Deduce the value of controller sync period from scntl3. |
8276 | ** period is in tenths of nano-seconds. |
8277 | */ |
8278 | |
8279 | idiv = ((scntl3 >> 4) & 0x7); |
8280 | if ( offset && idiv) { |
8281 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8282 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
8283 | /* Note: If extra data hold clocks are used, |
8284 | * the formulas below must be modified. |
8285 | * When scntl4 == 0, ST mode. |
8286 | */ |
8287 | if (scntl4 & 0x80) |
8288 | tp->period = (2*div_10M[idiv-1])/np->clock_khz; |
8289 | else |
8290 | tp->period = (4*div_10M[idiv-1])/np->clock_khz; |
8291 | } |
8292 | else |
8293 | tp->period = (((sxfer>>5)+4)*div_10M[idiv-1])/np->clock_khz; |
8294 | } |
8295 | else |
8296 | tp->period = 0xffff; |
8297 | |
8298 | |
8299 | /* |
8300 | ** Stop there if sync parameters are unchanged |
8301 | */ |
8302 | if (tp->sval == sxfer && tp->wval == scntl3 && tp->uval == scntl4) return; |
8303 | tp->sval = sxfer; |
8304 | tp->wval = scntl3; |
8305 | tp->uval = scntl4; |
8306 | |
8307 | /* |
8308 | ** Bells and whistles ;-) |
8309 | ** Donnot announce negotiations due to auto-sense, |
8310 | ** unless user really want us to be verbose. :) |
8311 | */ |
8312 | if ( bootverbose(np->verbose) < 2 && (cp->host_flagsphys.header.status[3] & HF_AUTO_SENSE(1u<<4))) |
8313 | goto next; |
8314 | PRINT_TARGET(np, target); |
8315 | if (offset) { |
8316 | unsigned f10 = 100000 << (tp->widedone ? tp->widedone -1 : 0); |
8317 | unsigned mb10 = (f10 + tp->period/2) / tp->period; |
8318 | char *scsi; |
8319 | |
8320 | /* |
8321 | ** Disable extended Sreq/Sack filtering |
8322 | */ |
8323 | if ((tp->period <= 2000) && |
8324 | (np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
8325 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
8326 | OUTOFFB (nc_stest2, EXT)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest2))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest2))))) & ~(0x02))))); |
8327 | |
8328 | /* |
8329 | ** Bells and whistles ;-) |
8330 | */ |
8331 | if (tp->period < 250) scsi = "FAST-80"; |
8332 | else if (tp->period < 500) scsi = "FAST-40"; |
8333 | else if (tp->period < 1000) scsi = "FAST-20"; |
8334 | else if (tp->period < 2000) scsi = "FAST-10"; |
8335 | else scsi = "FAST-5"; |
8336 | |
8337 | printk ("%s %sSCSI %d.%d MB/s (%d ns, offset %d)\n", scsi, |
8338 | tp->widedone > 1 ? "WIDE " : "", |
8339 | mb10 / 10, mb10 % 10, tp->period / 10, offset); |
8340 | } else |
8341 | printk ("%sasynchronous.\n", tp->widedone > 1 ? "wide " : ""); |
8342 | next: |
8343 | /* |
8344 | ** set actual value and sync_status |
8345 | ** patch ALL ccbs of this target. |
8346 | */ |
8347 | ncr_set_sync_wide_status(np, target); |
8348 | } |
8349 | |
8350 | |
8351 | |
8352 | |
8353 | /*========================================================== |
8354 | ** |
8355 | ** Switch tagged mode for a target. |
8356 | ** |
8357 | **========================================================== |
8358 | */ |
8359 | |
8360 | static void ncr_setup_tags (ncb_p np, u_charunsigned char tn, u_charunsigned char ln) |
8361 | { |
8362 | tcb_p tp = &np->target[tn]; |
8363 | lcb_p lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
8364 | u_shortunsigned short reqtags, maxdepth; |
8365 | |
8366 | /* |
8367 | ** Just in case ... |
8368 | */ |
8369 | if ((!tp) || (!lp)) |
8370 | return; |
8371 | |
8372 | /* |
8373 | ** If SCSI device queue depth is not yet set, leave here. |
8374 | */ |
8375 | if (!lp->scdev_depth) |
8376 | return; |
8377 | |
8378 | /* |
8379 | ** Donnot allow more tags than the SCSI driver can queue |
8380 | ** for this device. |
8381 | ** Donnot allow more tags than we can handle. |
8382 | */ |
8383 | maxdepth = lp->scdev_depth; |
8384 | if (maxdepth > lp->maxnxs) maxdepth = lp->maxnxs; |
8385 | if (lp->maxtags > maxdepth) lp->maxtags = maxdepth; |
8386 | if (lp->numtags > maxdepth) lp->numtags = maxdepth; |
8387 | |
8388 | /* |
8389 | ** only devices conformant to ANSI Version >= 2 |
8390 | ** only devices capable of tagged commands |
8391 | ** only if enabled by user .. |
8392 | */ |
8393 | if ((lp->inq_byte7 & INQ7_QUEUE(0x02)) && lp->numtags > 1) { |
8394 | reqtags = lp->numtags; |
8395 | } else { |
8396 | reqtags = 1; |
8397 | }; |
8398 | |
8399 | /* |
8400 | ** Update max number of tags |
8401 | */ |
8402 | lp->numtags = reqtags; |
8403 | if (lp->numtags > lp->maxtags) |
8404 | lp->maxtags = lp->numtags; |
8405 | |
8406 | /* |
8407 | ** If we want to switch tag mode, we must wait |
8408 | ** for no CCB to be active. |
8409 | */ |
8410 | if (reqtags > 1 && lp->usetags) { /* Stay in tagged mode */ |
8411 | if (lp->queuedepth == reqtags) /* Already announced */ |
8412 | return; |
8413 | lp->queuedepth = reqtags; |
8414 | } |
8415 | else if (reqtags <= 1 && !lp->usetags) { /* Stay in untagged mode */ |
8416 | lp->queuedepth = reqtags; |
8417 | return; |
8418 | } |
8419 | else { /* Want to switch tag mode */ |
8420 | if (lp->busyccbs) /* If not yet safe, return */ |
8421 | return; |
8422 | lp->queuedepth = reqtags; |
8423 | lp->usetags = reqtags > 1 ? 1 : 0; |
8424 | } |
8425 | |
8426 | /* |
8427 | ** Patch the lun mini-script, according to tag mode. |
8428 | */ |
8429 | lp->resel_task = lp->usetags? |
8430 | cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_tag))((np->p_script + ((size_t) (&((struct script *)0)-> resel_tag)))) : |
8431 | cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag))((np->p_script + ((size_t) (&((struct script *)0)-> resel_notag)))); |
8432 | |
8433 | /* |
8434 | ** Announce change to user. |
8435 | */ |
8436 | if (bootverbose(np->verbose)) { |
8437 | PRINT_LUN(np, tn, ln); |
8438 | if (lp->usetags) |
8439 | printk("tagged command queue depth set to %d\n", reqtags); |
8440 | else |
8441 | printk("tagged command queueing disabled\n"); |
8442 | } |
8443 | } |
8444 | |
8445 | /*---------------------------------------------------- |
8446 | ** |
8447 | ** handle user commands |
8448 | ** |
8449 | **---------------------------------------------------- |
8450 | */ |
8451 | |
8452 | #ifdef SCSI_NCR_USER_COMMAND_SUPPORT |
8453 | |
8454 | static void ncr_usercmd (ncb_p np) |
8455 | { |
8456 | u_charunsigned char t; |
8457 | tcb_p tp; |
8458 | int ln; |
8459 | u_longunsigned long size; |
8460 | |
8461 | switch (np->user.cmd) { |
8462 | case 0: return; |
8463 | |
8464 | case UC_SETDEBUG12: |
8465 | #ifdef SCSI_NCR_DEBUG_INFO_SUPPORT |
8466 | ncr_debug = np->user.data; |
8467 | #endif |
8468 | break; |
8469 | |
8470 | case UC_SETORDER13: |
8471 | np->order = np->user.data; |
8472 | break; |
8473 | |
8474 | case UC_SETVERBOSE17: |
8475 | np->verbose = np->user.data; |
8476 | break; |
8477 | |
8478 | default: |
8479 | /* |
8480 | ** We assume that other commands apply to targets. |
8481 | ** This should always be the case and avoid the below |
8482 | ** 4 lines to be repeated 5 times. |
8483 | */ |
8484 | for (t = 0; t < MAX_TARGET((16)); t++) { |
8485 | if (!((np->user.target >> t) & 1)) |
8486 | continue; |
8487 | tp = &np->target[t]; |
8488 | |
8489 | switch (np->user.cmd) { |
8490 | |
8491 | case UC_SETSYNC10: |
8492 | tp->usrsync = np->user.data; |
8493 | ncr_negotiate (np, tp); |
8494 | break; |
8495 | |
8496 | case UC_SETWIDE14: |
8497 | size = np->user.data; |
8498 | if (size > np->maxwide) |
8499 | size=np->maxwide; |
8500 | tp->usrwide = size; |
8501 | ncr_negotiate (np, tp); |
8502 | break; |
8503 | |
8504 | case UC_SETTAGS11: |
8505 | tp->usrtags = np->user.data; |
8506 | for (ln = 0; ln < MAX_LUN64; ln++) { |
8507 | lcb_p lp; |
8508 | lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
8509 | if (!lp) |
8510 | continue; |
8511 | lp->numtags = np->user.data; |
8512 | lp->maxtags = lp->numtags; |
8513 | ncr_setup_tags (np, t, ln); |
8514 | } |
8515 | break; |
8516 | |
8517 | case UC_RESETDEV18: |
8518 | tp->to_reset = 1; |
8519 | np->istat_sem = SEM0x10; |
8520 | OUTB (nc_istat, SIGP|SEM)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20|0x10 )))); |
8521 | break; |
8522 | |
8523 | case UC_CLEARDEV19: |
8524 | for (ln = 0; ln < MAX_LUN64; ln++) { |
8525 | lcb_p lp; |
8526 | lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
8527 | if (lp) |
8528 | lp->to_clear = 1; |
8529 | } |
8530 | np->istat_sem = SEM0x10; |
8531 | OUTB (nc_istat, SIGP|SEM)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20|0x10 )))); |
8532 | break; |
8533 | |
8534 | case UC_SETFLAG15: |
8535 | tp->usrflag = np->user.data; |
8536 | break; |
8537 | } |
8538 | } |
8539 | break; |
8540 | } |
8541 | np->user.cmd=0; |
8542 | } |
8543 | #endif |
8544 | |
8545 | /*========================================================== |
8546 | ** |
8547 | ** |
8548 | ** ncr timeout handler. |
8549 | ** |
8550 | ** |
8551 | **========================================================== |
8552 | ** |
8553 | ** Misused to keep the driver running when |
8554 | ** interrupts are not configured correctly. |
8555 | ** |
8556 | **---------------------------------------------------------- |
8557 | */ |
8558 | |
8559 | static void ncr_timeout (ncb_p np) |
8560 | { |
8561 | u_longunsigned long thistime = ktime_get(0)(jiffies + (unsigned long) 0); |
8562 | |
8563 | /* |
8564 | ** If release process in progress, let's go |
8565 | ** Set the release stage from 1 to 2 to synchronize |
8566 | ** with the release process. |
8567 | */ |
8568 | |
8569 | if (np->release_stage) { |
8570 | if (np->release_stage == 1) np->release_stage = 2; |
8571 | return; |
8572 | } |
8573 | |
8574 | #ifdef SCSI_NCR_PCIQ_BROKEN_INTR |
8575 | np->timer.expires = ktime_get((HZ+9)/10)(jiffies + (unsigned long) (100 +9)/10); |
8576 | #else |
8577 | np->timer.expires = ktime_get(SCSI_NCR_TIMER_INTERVAL)(jiffies + (unsigned long) (100)); |
8578 | #endif |
8579 | add_timer(&np->timer); |
8580 | |
8581 | /* |
8582 | ** If we are resetting the ncr, wait for settle_time before |
8583 | ** clearing it. Then command processing will be resumed. |
8584 | */ |
8585 | if (np->settle_time) { |
8586 | if (np->settle_time <= thistime) { |
8587 | if (bootverbose(np->verbose) > 1) |
8588 | printk("%s: command processing resumed\n", ncr_name(np)); |
8589 | np->settle_time = 0; |
8590 | requeue_waiting_list(np)process_waiting_list((np), 0x00); |
8591 | } |
8592 | return; |
8593 | } |
8594 | |
8595 | /* |
8596 | ** Nothing to do for now, but that may come. |
8597 | */ |
8598 | if (np->lasttime + 4*HZ100 < thistime) { |
8599 | np->lasttime = thistime; |
8600 | } |
8601 | |
8602 | #ifdef SCSI_NCR_PCIQ_MAY_MISS_COMPLETIONS |
8603 | /* |
8604 | ** Some way-broken PCI bridges may lead to |
8605 | ** completions being lost when the clearing |
8606 | ** of the INTFLY flag by the CPU occurs |
8607 | ** concurrently with the chip raising this flag. |
8608 | ** If this ever happen, lost completions will |
8609 | ** be reaped here. |
8610 | */ |
8611 | ncr_wakeup_done(np); |
8612 | #endif |
8613 | |
8614 | #ifdef SCSI_NCR_PCIQ_BROKEN_INTR |
8615 | if (INB(nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))) & (INTF0x04|SIP0x02|DIP0x01)) { |
8616 | |
8617 | /* |
8618 | ** Process pending interrupts. |
8619 | */ |
8620 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("{"); |
8621 | ncr_exception (np); |
8622 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("}"); |
8623 | } |
8624 | #endif /* SCSI_NCR_PCIQ_BROKEN_INTR */ |
8625 | } |
8626 | |
8627 | /*========================================================== |
8628 | ** |
8629 | ** log message for real hard errors |
8630 | ** |
8631 | ** "ncr0 targ 0?: ERROR (ds:si) (so-si-sd) (sxfer/scntl3) @ name (dsp:dbc)." |
8632 | ** " reg: r0 r1 r2 r3 r4 r5 r6 ..... rf." |
8633 | ** |
8634 | ** exception register: |
8635 | ** ds: dstat |
8636 | ** si: sist |
8637 | ** |
8638 | ** SCSI bus lines: |
8639 | ** so: control lines as driven by NCR. |
8640 | ** si: control lines as seen by NCR. |
8641 | ** sd: scsi data lines as seen by NCR. |
8642 | ** |
8643 | ** wide/fastmode: |
8644 | ** sxfer: (see the manual) |
8645 | ** scntl3: (see the manual) |
8646 | ** |
8647 | ** current script command: |
8648 | ** dsp: script address (relative to start of script). |
8649 | ** dbc: first word of script command. |
8650 | ** |
8651 | ** First 24 register of the chip: |
8652 | ** r0..rf |
8653 | ** |
8654 | **========================================================== |
8655 | */ |
8656 | |
8657 | static void ncr_log_hard_error(ncb_p np, u_shortunsigned short sist, u_charunsigned char dstat) |
8658 | { |
8659 | u_int32 dsp; |
8660 | int script_ofs; |
8661 | int script_size; |
8662 | char *script_name; |
8663 | u_charunsigned char *script_base; |
8664 | int i; |
8665 | |
8666 | dsp = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
8667 | |
8668 | if (dsp > np->p_script && dsp <= np->p_script + sizeof(struct script)) { |
8669 | script_ofs = dsp - np->p_script; |
8670 | script_size = sizeof(struct script); |
8671 | script_base = (u_charunsigned char *) np->script0; |
8672 | script_name = "script"; |
8673 | } |
8674 | else if (np->p_scripth < dsp && |
8675 | dsp <= np->p_scripth + sizeof(struct scripth)) { |
8676 | script_ofs = dsp - np->p_scripth; |
8677 | script_size = sizeof(struct scripth); |
8678 | script_base = (u_charunsigned char *) np->scripth0; |
8679 | script_name = "scripth"; |
8680 | } else { |
8681 | script_ofs = dsp; |
8682 | script_size = 0; |
8683 | script_base = 0; |
8684 | script_name = "mem"; |
8685 | } |
8686 | |
8687 | printk ("%s:%d: ERROR (%x:%x) (%x-%x-%x) (%x/%x) @ (%s %x:%08x).\n", |
8688 | ncr_name (np), (unsigned)INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid)))))&0x0f, dstat, sist, |
8689 | (unsigned)INB (nc_socl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_socl))))), (unsigned)INB (nc_sbcl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbcl))))), (unsigned)INB (nc_sbdl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbdl))))), |
8690 | (unsigned)INB (nc_sxfer)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sxfer))))),(unsigned)INB (nc_scntl3)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl3))))), script_name, script_ofs, |
8691 | (unsigned)INL (nc_dbc)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dbc)))))); |
8692 | |
8693 | if (((script_ofs & 3) == 0) && |
8694 | (unsigned)script_ofs < script_size) { |
8695 | printk ("%s: script cmd = %08x\n", ncr_name(np), |
8696 | scr_to_cpu((int) *(ncrcmd *)(script_base + script_ofs))((int) *(ncrcmd *)(script_base + script_ofs))); |
8697 | } |
8698 | |
8699 | printk ("%s: regdump:", ncr_name(np)); |
8700 | for (i=0; i<24;i++) |
8701 | printk (" %02x", (unsigned)INB_OFF(i)(*(volatile unsigned char *) ((char *)np->reg + (i)))); |
8702 | printk (".\n"); |
8703 | } |
8704 | |
8705 | /*============================================================ |
8706 | ** |
8707 | ** ncr chip exception handler. |
8708 | ** |
8709 | **============================================================ |
8710 | ** |
8711 | ** In normal situations, interrupt conditions occur one at |
8712 | ** a time. But when something bad happens on the SCSI BUS, |
8713 | ** the chip may raise several interrupt flags before |
8714 | ** stopping and interrupting the CPU. The additionnal |
8715 | ** interrupt flags are stacked in some extra registers |
8716 | ** after the SIP and/or DIP flag has been raised in the |
8717 | ** ISTAT. After the CPU has read the interrupt condition |
8718 | ** flag from SIST or DSTAT, the chip unstacks the other |
8719 | ** interrupt flags and sets the corresponding bits in |
8720 | ** SIST or DSTAT. Since the chip starts stacking once the |
8721 | ** SIP or DIP flag is set, there is a small window of time |
8722 | ** where the stacking does not occur. |
8723 | ** |
8724 | ** Typically, multiple interrupt conditions may happen in |
8725 | ** the following situations: |
8726 | ** |
8727 | ** - SCSI parity error + Phase mismatch (PAR|MA) |
8728 | ** When an parity error is detected in input phase |
8729 | ** and the device switches to msg-in phase inside a |
8730 | ** block MOV. |
8731 | ** - SCSI parity error + Unexpected disconnect (PAR|UDC) |
8732 | ** When a stupid device does not want to handle the |
8733 | ** recovery of an SCSI parity error. |
8734 | ** - Some combinations of STO, PAR, UDC, ... |
8735 | ** When using non compliant SCSI stuff, when user is |
8736 | ** doing non compliant hot tampering on the BUS, when |
8737 | ** something really bad happens to a device, etc ... |
8738 | ** |
8739 | ** The heuristic suggested by SYMBIOS to handle |
8740 | ** multiple interrupts is to try unstacking all |
8741 | ** interrupts conditions and to handle them on some |
8742 | ** priority based on error severity. |
8743 | ** This will work when the unstacking has been |
8744 | ** successful, but we cannot be 100 % sure of that, |
8745 | ** since the CPU may have been faster to unstack than |
8746 | ** the chip is able to stack. Hmmm ... But it seems that |
8747 | ** such a situation is very unlikely to happen. |
8748 | ** |
8749 | ** If this happen, for example STO catched by the CPU |
8750 | ** then UDC happenning before the CPU have restarted |
8751 | ** the SCRIPTS, the driver may wrongly complete the |
8752 | ** same command on UDC, since the SCRIPTS didn't restart |
8753 | ** and the DSA still points to the same command. |
8754 | ** We avoid this situation by setting the DSA to an |
8755 | ** invalid value when the CCB is completed and before |
8756 | ** restarting the SCRIPTS. |
8757 | ** |
8758 | ** Another issue is that we need some section of our |
8759 | ** recovery procedures to be somehow uninterruptible and |
8760 | ** that the SCRIPTS processor does not provides such a |
8761 | ** feature. For this reason, we handle recovery preferently |
8762 | ** from the C code and check against some SCRIPTS |
8763 | ** critical sections from the C code. |
8764 | ** |
8765 | ** Hopefully, the interrupt handling of the driver is now |
8766 | ** able to resist to weird BUS error conditions, but donnot |
8767 | ** ask me for any guarantee that it will never fail. :-) |
8768 | ** Use at your own decision and risk. |
8769 | ** |
8770 | **============================================================ |
8771 | */ |
8772 | |
8773 | void ncr_exception (ncb_p np) |
8774 | { |
8775 | u_charunsigned char istat, istatc; |
8776 | u_charunsigned char dstat; |
8777 | u_shortunsigned short sist; |
8778 | int i; |
8779 | |
8780 | /* |
8781 | ** interrupt on the fly ? |
8782 | ** |
8783 | ** A `dummy read' is needed to ensure that the |
8784 | ** clear of the INTF flag reaches the device |
8785 | ** before the scanning of the DONE queue. |
8786 | */ |
8787 | istat = INB (nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))); |
8788 | if (istat & INTF0x04) { |
8789 | OUTB (nc_istat, (istat & SIGP) | INTF | np->istat_sem)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = ((((istat & 0x20) | 0x04 | np->istat_sem)))); |
8790 | istat = INB (nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))); /* DUMMY READ */ |
8791 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("F "); |
8792 | (void)ncr_wakeup_done (np); |
8793 | }; |
8794 | |
8795 | if (!(istat & (SIP0x02|DIP0x01))) |
8796 | return; |
8797 | |
8798 | #if 0 /* We should never get this one */ |
8799 | if (istat & CABRT0x80) |
8800 | OUTB (nc_istat, CABRT)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x80)))); |
8801 | #endif |
8802 | |
8803 | /* |
8804 | ** Steinbach's Guideline for Systems Programming: |
8805 | ** Never test for an error condition you don't know how to handle. |
8806 | */ |
8807 | |
8808 | /*======================================================== |
8809 | ** PAR and MA interrupts may occur at the same time, |
8810 | ** and we need to know of both in order to handle |
8811 | ** this situation properly. We try to unstack SCSI |
8812 | ** interrupts for that reason. BTW, I dislike a LOT |
8813 | ** such a loop inside the interrupt routine. |
8814 | ** Even if DMA interrupt stacking is very unlikely to |
8815 | ** happen, we also try unstacking these ones, since |
8816 | ** this has no performance impact. |
8817 | **========================================================= |
8818 | */ |
8819 | sist = 0; |
8820 | dstat = 0; |
8821 | istatc = istat; |
8822 | do { |
8823 | if (istatc & SIP0x02) |
8824 | sist |= INW (nc_sist)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sist))))); |
8825 | if (istatc & DIP0x01) |
8826 | dstat |= INB (nc_dstat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dstat))))); |
8827 | istatc = INB (nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))); |
8828 | istat |= istatc; |
8829 | } while (istatc & (SIP0x02|DIP0x01)); |
8830 | |
8831 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) |
8832 | printk ("<%d|%x:%x|%x:%x>", |
8833 | (int)INB(nc_scr0)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr0))))), |
8834 | dstat,sist, |
8835 | (unsigned)INL(nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))), |
8836 | (unsigned)INL(nc_dbc)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dbc)))))); |
8837 | |
8838 | /* |
8839 | ** On paper, a memory barrier may be needed here. |
8840 | ** And since we are paranoid ... :) |
8841 | */ |
8842 | MEMORY_BARRIER()do { ; } while(0); |
8843 | |
8844 | /*======================================================== |
8845 | ** First, interrupts we want to service cleanly. |
8846 | ** |
8847 | ** Phase mismatch (MA) is the most frequent interrupt |
8848 | ** for chip earlier than the 896 and so we have to service |
8849 | ** it as quickly as possible. |
8850 | ** A SCSI parity error (PAR) may be combined with a phase |
8851 | ** mismatch condition (MA). |
8852 | ** Programmed interrupts (SIR) are used to call the C code |
8853 | ** from SCRIPTS. |
8854 | ** The single step interrupt (SSI) is not used in this |
8855 | ** driver. |
8856 | **========================================================= |
8857 | */ |
8858 | |
8859 | if (!(sist & (STO0x0400|GEN0x0200|HTH0x0100|SGE0x08|UDC0x04|SBMC0x1000|RST0x02)) && |
8860 | !(dstat & (MDPE0x40|BF0x20|ABRT0x10|IID0x01))) { |
8861 | if (sist & PAR0x01) ncr_int_par (np, sist); |
8862 | else if (sist & MA0x80) ncr_int_ma (np); |
8863 | else if (dstat & SIR0x04) ncr_int_sir (np); |
8864 | else if (dstat & SSI0x08) OUTONB_STD ()do { do { ; } while(0); ((*(volatile unsigned char *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl ))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl))))) | ( (0x04|0x01)))))); } while (0); |
8865 | else goto unknown_int; |
8866 | return; |
8867 | }; |
8868 | |
8869 | /*======================================================== |
8870 | ** Now, interrupts that donnot happen in normal |
8871 | ** situations and that we may need to recover from. |
8872 | ** |
8873 | ** On SCSI RESET (RST), we reset everything. |
8874 | ** On SCSI BUS MODE CHANGE (SBMC), we complete all |
8875 | ** active CCBs with RESET status, prepare all devices |
8876 | ** for negotiating again and restart the SCRIPTS. |
8877 | ** On STO and UDC, we complete the CCB with the corres- |
8878 | ** ponding status and restart the SCRIPTS. |
8879 | **========================================================= |
8880 | */ |
8881 | |
8882 | if (sist & RST0x02) { |
8883 | ncr_init (np, 1, bootverbose(np->verbose) ? "scsi reset" : NULL((void *) 0), HS_RESET(6|(0x80))); |
8884 | return; |
8885 | }; |
8886 | |
8887 | OUTB (nc_ctest3, np->rv_ctest3 | CLF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = (((np-> rv_ctest3 | 0x04)))); /* clear dma fifo */ |
8888 | OUTB (nc_stest3, TE|CSF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80|0x02 )))); /* clear scsi fifo */ |
8889 | |
8890 | if (!(sist & (GEN0x0200|HTH0x0100|SGE0x08)) && |
8891 | !(dstat & (MDPE0x40|BF0x20|ABRT0x10|IID0x01))) { |
8892 | if (sist & SBMC0x1000) ncr_int_sbmc (np); |
8893 | else if (sist & STO0x0400) ncr_int_sto (np); |
8894 | else if (sist & UDC0x04) ncr_int_udc (np); |
8895 | else goto unknown_int; |
8896 | return; |
8897 | }; |
8898 | |
8899 | /*========================================================= |
8900 | ** Now, interrupts we are not able to recover cleanly. |
8901 | ** |
8902 | ** Do the register dump. |
8903 | ** Log message for hard errors. |
8904 | ** Reset everything. |
8905 | **========================================================= |
8906 | */ |
8907 | if (ktime_exp(np->regtime)((long)(jiffies) - (long)(np->regtime) >= 0)) { |
8908 | np->regtime = ktime_get(10*HZ)(jiffies + (unsigned long) 10*100); |
8909 | for (i = 0; i<sizeof(np->regdump); i++) |
8910 | ((char*)&np->regdump)[i] = INB_OFF(i)(*(volatile unsigned char *) ((char *)np->reg + (i))); |
8911 | np->regdump.nc_dstat = dstat; |
8912 | np->regdump.nc_sist = sist; |
8913 | }; |
8914 | |
8915 | ncr_log_hard_error(np, sist, dstat); |
8916 | |
8917 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
8918 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
8919 | u_charunsigned char ctest4_o, ctest4_m; |
8920 | u_charunsigned char shadow; |
8921 | |
8922 | /* |
8923 | * Get shadow register data |
8924 | * Write 1 to ctest4 |
8925 | */ |
8926 | ctest4_o = INB(nc_ctest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest4))))); |
8927 | |
8928 | OUTB(nc_ctest4, ctest4_o | 0x10)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest4))))) = (((ctest4_o | 0x10)))); |
8929 | |
8930 | ctest4_m = INB(nc_ctest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_ctest4))))); |
8931 | shadow = INW_OFF(0x42)(*(volatile unsigned short *) ((char *)np->reg + (0x42))); |
8932 | |
8933 | OUTB(nc_ctest4, ctest4_o)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest4))))) = (((ctest4_o )))); |
8934 | |
8935 | printk("%s: ctest4/sist original 0x%x/0x%X mod: 0x%X/0x%x\n", |
8936 | ncr_name(np), ctest4_o, sist, ctest4_m, shadow); |
8937 | } |
8938 | |
8939 | if ((sist & (GEN0x0200|HTH0x0100|SGE0x08)) || |
8940 | (dstat & (MDPE0x40|BF0x20|ABRT0x10|IID0x01))) { |
8941 | ncr_start_reset(np); |
8942 | return; |
8943 | }; |
8944 | |
8945 | unknown_int: |
8946 | /*========================================================= |
8947 | ** We just miss the cause of the interrupt. :( |
8948 | ** Print a message. The timeout will do the real work. |
8949 | **========================================================= |
8950 | */ |
8951 | printk( "%s: unknown interrupt(s) ignored, " |
8952 | "ISTAT=0x%x DSTAT=0x%x SIST=0x%x\n", |
8953 | ncr_name(np), istat, dstat, sist); |
8954 | } |
8955 | |
8956 | |
8957 | /*========================================================== |
8958 | ** |
8959 | ** generic recovery from scsi interrupt |
8960 | ** |
8961 | **========================================================== |
8962 | ** |
8963 | ** The doc says that when the chip gets an SCSI interrupt, |
8964 | ** it tries to stop in an orderly fashion, by completing |
8965 | ** an instruction fetch that had started or by flushing |
8966 | ** the DMA fifo for a write to memory that was executing. |
8967 | ** Such a fashion is not enough to know if the instruction |
8968 | ** that was just before the current DSP value has been |
8969 | ** executed or not. |
8970 | ** |
8971 | ** There are 3 small SCRIPTS sections that deal with the |
8972 | ** start queue and the done queue that may break any |
8973 | ** assomption from the C code if we are interrupted |
8974 | ** inside, so we reset if it happens. Btw, since these |
8975 | ** SCRIPTS sections are executed while the SCRIPTS hasn't |
8976 | ** started SCSI operations, it is very unlikely to happen. |
8977 | ** |
8978 | ** All the driver data structures are supposed to be |
8979 | ** allocated from the same 4 GB memory window, so there |
8980 | ** is a 1 to 1 relationship between DSA and driver data |
8981 | ** structures. Since we are careful :) to invalidate the |
8982 | ** DSA when we complete a command or when the SCRIPTS |
8983 | ** pushes a DSA into a queue, we can trust it when it |
8984 | ** points to a CCB. |
8985 | ** |
8986 | **---------------------------------------------------------- |
8987 | */ |
8988 | static void ncr_recover_scsi_int (ncb_p np, u_charunsigned char hsts) |
8989 | { |
8990 | u_int32 dsp = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
8991 | u_int32 dsa = INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa))))); |
8992 | ccb_p cp = ncr_ccb_from_dsa(np, dsa); |
8993 | |
8994 | /* |
8995 | ** If we haven't been interrupted inside the SCRIPTS |
8996 | ** critical pathes, we can safely restart the SCRIPTS |
8997 | ** and trust the DSA value if it matches a CCB. |
8998 | */ |
8999 | if ((!(dsp > NCB_SCRIPT_PHYS (np, getjob_begin)(np->p_script + ((size_t) (&((struct script *)0)->getjob_begin ))) && |
9000 | dsp < NCB_SCRIPT_PHYS (np, getjob_end)(np->p_script + ((size_t) (&((struct script *)0)->getjob_end ))) + 1)) && |
9001 | (!(dsp > NCB_SCRIPT_PHYS (np, ungetjob)(np->p_script + ((size_t) (&((struct script *)0)->ungetjob ))) && |
9002 | dsp < NCB_SCRIPT_PHYS (np, reselect)(np->p_script + ((size_t) (&((struct script *)0)->reselect ))) + 1)) && |
9003 | (!(dsp > NCB_SCRIPTH_PHYS (np, sel_for_abort)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> sel_for_abort))) && |
9004 | dsp < NCB_SCRIPTH_PHYS (np, sel_for_abort_1)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> sel_for_abort_1))) + 1)) && |
9005 | (!(dsp > NCB_SCRIPT_PHYS (np, done)(np->p_script + ((size_t) (&((struct script *)0)->done ))) && |
9006 | dsp < NCB_SCRIPT_PHYS (np, done_end)(np->p_script + ((size_t) (&((struct script *)0)->done_end ))) + 1))) { |
9007 | if (cp) { |
9008 | cp->host_statusphys.header.status[1] = hsts; |
9009 | ncr_complete (np, cp); |
9010 | } |
9011 | OUTL (nc_dsa, DSA_INVALID)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsa))))) = (((0xffffffff) ))); |
9012 | OUTB (nc_ctest3, np->rv_ctest3 | CLF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = (((np-> rv_ctest3 | 0x04)))); /* clear dma fifo */ |
9013 | OUTB (nc_stest3, TE|CSF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80|0x02 )))); /* clear scsi fifo */ |
9014 | OUTL_DSP (NCB_SCRIPT_PHYS (np, start))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->start)))))))); } while (0); |
9015 | } |
9016 | else |
9017 | goto reset_all; |
9018 | |
9019 | return; |
9020 | |
9021 | reset_all: |
9022 | ncr_start_reset(np); |
9023 | } |
9024 | |
9025 | /*========================================================== |
9026 | ** |
9027 | ** ncr chip exception handler for selection timeout |
9028 | ** |
9029 | **========================================================== |
9030 | ** |
9031 | ** There seems to be a bug in the 53c810. |
9032 | ** Although a STO-Interrupt is pending, |
9033 | ** it continues executing script commands. |
9034 | ** But it will fail and interrupt (IID) on |
9035 | ** the next instruction where it's looking |
9036 | ** for a valid phase. |
9037 | ** |
9038 | **---------------------------------------------------------- |
9039 | */ |
9040 | |
9041 | void ncr_int_sto (ncb_p np) |
9042 | { |
9043 | u_int32 dsp = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
9044 | |
9045 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("T"); |
9046 | |
9047 | if (dsp == NCB_SCRIPT_PHYS (np, wf_sel_done)(np->p_script + ((size_t) (&((struct script *)0)->wf_sel_done ))) + 8 || |
9048 | !(driver_setup.recovery & 1)) |
9049 | ncr_recover_scsi_int(np, HS_SEL_TIMEOUT(5|(0x80))); |
9050 | else |
9051 | ncr_start_reset(np); |
9052 | } |
9053 | |
9054 | /*========================================================== |
9055 | ** |
9056 | ** ncr chip exception handler for unexpected disconnect |
9057 | ** |
9058 | **========================================================== |
9059 | ** |
9060 | **---------------------------------------------------------- |
9061 | */ |
9062 | void ncr_int_udc (ncb_p np) |
9063 | { |
9064 | u_int32 dsa = INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa))))); |
9065 | ccb_p cp = ncr_ccb_from_dsa(np, dsa); |
9066 | tcb_p tp = &np->target[cp->target]; |
9067 | |
9068 | /* |
9069 | * Fix Up. Some disks respond to a PPR negotation with |
9070 | * a bus free instead of a message reject. |
9071 | * Disable ppr negotiation if this is first time |
9072 | * tried ppr negotiation. |
9073 | */ |
9074 | |
9075 | if (tp->ppr_negotiation == 1) |
9076 | tp->ppr_negotiation = 0; |
9077 | |
9078 | printk ("%s: unexpected disconnect\n", ncr_name(np)); |
9079 | ncr_recover_scsi_int(np, HS_UNEXPECTED(10|(0x80))); |
9080 | } |
9081 | |
9082 | /*========================================================== |
9083 | ** |
9084 | ** ncr chip exception handler for SCSI bus mode change |
9085 | ** |
9086 | **========================================================== |
9087 | ** |
9088 | ** spi2-r12 11.2.3 says a transceiver mode change must |
9089 | ** generate a reset event and a device that detects a reset |
9090 | ** event shall initiate a hard reset. It says also that a |
9091 | ** device that detects a mode change shall set data transfer |
9092 | ** mode to eight bit asynchronous, etc... |
9093 | ** So, just resetting should be enough. |
9094 | ** |
9095 | ** |
9096 | **---------------------------------------------------------- |
9097 | */ |
9098 | |
9099 | static void ncr_int_sbmc (ncb_p np) |
9100 | { |
9101 | u_charunsigned char scsi_mode = INB (nc_stest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest4))))) & SMODE0xc0; |
9102 | |
9103 | printk("%s: SCSI bus mode change from %x to %x.\n", |
9104 | ncr_name(np), np->scsi_mode, scsi_mode); |
9105 | |
9106 | np->scsi_mode = scsi_mode; |
9107 | |
9108 | |
9109 | /* |
9110 | ** Suspend command processing for 1 second and |
9111 | ** reinitialize all except the chip. |
9112 | */ |
9113 | np->settle_time = ktime_get(1*HZ)(jiffies + (unsigned long) 1*100); |
9114 | ncr_init (np, 0, bootverbose(np->verbose) ? "scsi mode change" : NULL((void *) 0), HS_RESET(6|(0x80))); |
9115 | } |
9116 | |
9117 | /*========================================================== |
9118 | ** |
9119 | ** ncr chip exception handler for SCSI parity error. |
9120 | ** |
9121 | **========================================================== |
9122 | ** |
9123 | ** When the chip detects a SCSI parity error and is |
9124 | ** currently executing a (CH)MOV instruction, it does |
9125 | ** not interrupt immediately, but tries to finish the |
9126 | ** transfer of the current scatter entry before |
9127 | ** interrupting. The following situations may occur: |
9128 | ** |
9129 | ** - The complete scatter entry has been transferred |
9130 | ** without the device having changed phase. |
9131 | ** The chip will then interrupt with the DSP pointing |
9132 | ** to the instruction that follows the MOV. |
9133 | ** |
9134 | ** - A phase mismatch occurs before the MOV finished |
9135 | ** and phase errors are to be handled by the C code. |
9136 | ** The chip will then interrupt with both PAR and MA |
9137 | ** conditions set. |
9138 | ** |
9139 | ** - A phase mismatch occurs before the MOV finished and |
9140 | ** phase errors are to be handled by SCRIPTS (895A or 896). |
9141 | ** The chip will load the DSP with the phase mismatch |
9142 | ** JUMP address and interrupt the host processor. |
9143 | ** |
9144 | **---------------------------------------------------------- |
9145 | */ |
9146 | |
9147 | static void ncr_int_par (ncb_p np, u_shortunsigned short sist) |
9148 | { |
9149 | u_charunsigned char hsts = INB (HS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr1))))); |
9150 | u_int32 dsp = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
9151 | u_int32 dbc = INL (nc_dbc)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dbc))))); |
9152 | u_int32 dsa = INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa))))); |
9153 | u_charunsigned char sbcl = INB (nc_sbcl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbcl))))); |
9154 | u_charunsigned char cmd = dbc >> 24; |
9155 | int phase = cmd & 7; |
9156 | ccb_p cp = ncr_ccb_from_dsa(np, dsa); |
9157 | |
9158 | printk("%s: SCSI parity error detected: SCR1=%d DBC=%x SBCL=%x\n", |
9159 | ncr_name(np), hsts, dbc, sbcl); |
9160 | |
9161 | /* |
9162 | ** Check that the chip is connected to the SCSI BUS. |
9163 | */ |
9164 | if (!(INB (nc_scntl1)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl1))))) & ISCON0x10)) { |
9165 | if (!(driver_setup.recovery & 1)) { |
9166 | ncr_recover_scsi_int(np, HS_FAIL(9|(0x80))); |
9167 | return; |
9168 | } |
9169 | goto reset_all; |
9170 | } |
9171 | |
9172 | /* |
9173 | ** If the nexus is not clearly identified, reset the bus. |
9174 | ** We will try to do better later. |
9175 | */ |
9176 | if (!cp) |
9177 | goto reset_all; |
9178 | |
9179 | /* |
9180 | ** Check instruction was a MOV, direction was INPUT and |
9181 | ** ATN is asserted. |
9182 | */ |
9183 | if ((cmd & 0xc0) || !(phase & 1) || !(sbcl & 0x8)) |
9184 | goto reset_all; |
9185 | |
9186 | /* |
9187 | ** Keep track of the parity error. |
9188 | */ |
9189 | OUTONB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) | ((1u<<7)))))); |
9190 | cp->xerr_status |= XE_PARITY_ERR(4); |
9191 | |
9192 | /* |
9193 | ** Prepare the message to send to the device. |
9194 | */ |
9195 | np->msgout[0] = (phase == 7) ? M_PARITY(0x09) : M_ID_ERROR(0x05); |
9196 | |
9197 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
9198 | /* |
9199 | ** Save error message. For integrity check use only. |
9200 | */ |
9201 | if (np->check_integrity) |
9202 | np->check_integ_par = np->msgout[0]; |
9203 | #endif |
9204 | |
9205 | /* |
9206 | ** If the old phase was DATA IN or DT DATA IN phase, |
9207 | ** we have to deal with the 3 situations described above. |
9208 | ** For other input phases (MSG IN and STATUS), the device |
9209 | ** must resend the whole thing that failed parity checking |
9210 | ** or signal error. So, jumping to dispatcher should be OK. |
9211 | */ |
9212 | if ((phase == 1) || (phase == 5)) { |
9213 | /* Phase mismatch handled by SCRIPTS */ |
9214 | if (dsp == NCB_SCRIPTH_PHYS (np, pm_handle)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> pm_handle)))) |
9215 | OUTL_DSP (dsp)do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = ((((dsp))))); } while (0); |
9216 | /* Phase mismatch handled by the C code */ |
9217 | else if (sist & MA0x80) |
9218 | ncr_int_ma (np); |
9219 | /* No phase mismatch occurred */ |
9220 | else { |
9221 | OUTL (nc_temp, dsp)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_temp))))) = (((dsp)))); |
9222 | OUTL_DSP (NCB_SCRIPT_PHYS (np, dispatch))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->dispatch)))))))); } while (0); |
9223 | } |
9224 | } |
9225 | else |
9226 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
9227 | return; |
9228 | |
9229 | reset_all: |
9230 | ncr_start_reset(np); |
9231 | return; |
9232 | } |
9233 | |
9234 | /*========================================================== |
9235 | ** |
9236 | ** |
9237 | ** ncr chip exception handler for phase errors. |
9238 | ** |
9239 | ** |
9240 | **========================================================== |
9241 | ** |
9242 | ** We have to construct a new transfer descriptor, |
9243 | ** to transfer the rest of the current block. |
9244 | ** |
9245 | **---------------------------------------------------------- |
9246 | */ |
9247 | |
9248 | static void ncr_int_ma (ncb_p np) |
9249 | { |
9250 | u_int32 dbc; |
9251 | u_int32 rest; |
9252 | u_int32 dsp; |
9253 | u_int32 dsa; |
9254 | u_int32 nxtdsp; |
9255 | u_int32 *vdsp; |
9256 | u_int32 oadr, olen; |
9257 | u_int32 *tblp; |
9258 | u_int32 newcmd; |
9259 | u_intunsigned int delta; |
9260 | u_charunsigned char cmd; |
9261 | u_charunsigned char hflags, hflags0; |
9262 | struct pm_ctx *pm; |
9263 | ccb_p cp; |
9264 | |
9265 | dsp = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
9266 | dbc = INL (nc_dbc)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dbc))))); |
9267 | dsa = INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa))))); |
9268 | |
9269 | cmd = dbc >> 24; |
9270 | rest = dbc & 0xffffff; |
9271 | delta = 0; |
9272 | |
9273 | /* |
9274 | ** locate matching cp. |
9275 | */ |
9276 | cp = ncr_ccb_from_dsa(np, dsa); |
9277 | |
9278 | if (DEBUG_FLAGSncr_debug & DEBUG_PHASE(0x0002)) |
9279 | printk("CCB = %2x %2x %2x %2x %2x %2x\n", |
9280 | cp->cmd->cmnd[0], cp->cmd->cmnd[1], cp->cmd->cmnd[2], |
9281 | cp->cmd->cmnd[3], cp->cmd->cmnd[4], cp->cmd->cmnd[5]); |
9282 | |
9283 | /* |
9284 | ** Donnot take into account dma fifo and various buffers in |
9285 | ** INPUT phase since the chip flushes everything before |
9286 | ** raising the MA interrupt for interrupted INPUT phases. |
9287 | ** For DATA IN phase, we will check for the SWIDE later. |
9288 | */ |
9289 | if ((cmd & 7) != 1 && (cmd & 7) != 5) { |
9290 | u_int32 dfifo; |
9291 | u_charunsigned char ss0, ss2; |
9292 | |
9293 | /* |
9294 | ** If C1010, DFBC contains number of bytes in DMA fifo. |
9295 | ** else read DFIFO, CTEST[4-6] using 1 PCI bus ownership. |
9296 | */ |
9297 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
9298 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) |
9299 | delta = INL(nc_dfbc)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dfbc))))) & 0xffff; |
9300 | else { |
9301 | dfifo = INL(nc_dfifo)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dfifo))))); |
9302 | |
9303 | /* |
9304 | ** Calculate remaining bytes in DMA fifo. |
9305 | ** C1010 - always large fifo, value in dfbc |
9306 | ** Otherwise, (CTEST5 = dfifo >> 16) |
9307 | */ |
9308 | if (dfifo & (DFS0x20 << 16)) |
9309 | delta = ((((dfifo >> 8) & 0x300) | |
9310 | (dfifo & 0xff)) - rest) & 0x3ff; |
9311 | else |
9312 | delta = ((dfifo & 0xff) - rest) & 0x7f; |
9313 | |
9314 | /* |
9315 | ** The data in the dma fifo has not been |
9316 | ** transferred to the target -> add the amount |
9317 | ** to the rest and clear the data. |
9318 | ** Check the sstat2 register in case of wide |
9319 | ** transfer. |
9320 | */ |
9321 | |
9322 | } |
9323 | |
9324 | rest += delta; |
9325 | ss0 = INB (nc_sstat0)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sstat0))))); |
9326 | if (ss0 & OLF0x20) rest++; |
9327 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
9328 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21) && (ss0 & ORF0x40)) |
9329 | rest++; |
9330 | if (cp && (cp->phys.select.sel_scntl3 & EWS0x08)) { |
9331 | ss2 = INB (nc_sstat2)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sstat2))))); |
9332 | if (ss2 & OLF10x20) rest++; |
9333 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
9334 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21) && (ss2 & ORF0x40)) |
9335 | rest++; |
9336 | }; |
9337 | |
9338 | /* |
9339 | ** Clear fifos. |
9340 | */ |
9341 | OUTB (nc_ctest3, np->rv_ctest3 | CLF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_ctest3))))) = (((np-> rv_ctest3 | 0x04)))); /* dma fifo */ |
9342 | OUTB (nc_stest3, TE|CSF)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x80|0x02 )))); /* scsi fifo */ |
9343 | } |
9344 | |
9345 | /* |
9346 | ** log the information |
9347 | */ |
9348 | |
9349 | if (DEBUG_FLAGSncr_debug & (DEBUG_TINY(0x0080)|DEBUG_PHASE(0x0002))) |
9350 | printk ("P%x%x RL=%d D=%d ", cmd&7, INB(nc_sbcl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbcl)))))&7, |
9351 | (unsigned) rest, (unsigned) delta); |
9352 | |
9353 | /* |
9354 | ** try to find the interrupted script command, |
9355 | ** and the address at which to continue. |
9356 | */ |
9357 | vdsp = 0; |
9358 | nxtdsp = 0; |
9359 | if (dsp > np->p_script && |
9360 | dsp <= np->p_script + sizeof(struct script)) { |
9361 | vdsp = (u_int32 *)((char*)np->script0 + (dsp-np->p_script-8)); |
9362 | nxtdsp = dsp; |
9363 | } |
9364 | else if (dsp > np->p_scripth && |
9365 | dsp <= np->p_scripth + sizeof(struct scripth)) { |
9366 | vdsp = (u_int32 *)((char*)np->scripth0 + (dsp-np->p_scripth-8)); |
9367 | nxtdsp = dsp; |
9368 | } |
9369 | |
9370 | /* |
9371 | ** log the information |
9372 | */ |
9373 | if (DEBUG_FLAGSncr_debug & DEBUG_PHASE(0x0002)) { |
9374 | printk ("\nCP=%p DSP=%x NXT=%x VDSP=%p CMD=%x ", |
9375 | cp, (unsigned)dsp, (unsigned)nxtdsp, vdsp, cmd); |
9376 | }; |
9377 | |
9378 | if (!vdsp) { |
9379 | printk ("%s: interrupted SCRIPT address not found.\n", |
9380 | ncr_name (np)); |
9381 | goto reset_all; |
9382 | } |
9383 | |
9384 | if (!cp) { |
9385 | printk ("%s: SCSI phase error fixup: CCB already dequeued.\n", |
9386 | ncr_name (np)); |
9387 | goto reset_all; |
9388 | } |
9389 | |
9390 | /* |
9391 | ** get old startaddress and old length. |
9392 | */ |
9393 | |
9394 | oadr = scr_to_cpu(vdsp[1])(vdsp[1]); |
9395 | |
9396 | if (cmd & 0x10) { /* Table indirect */ |
9397 | tblp = (u_int32 *) ((char*) &cp->phys + oadr); |
9398 | olen = scr_to_cpu(tblp[0])(tblp[0]); |
9399 | oadr = scr_to_cpu(tblp[1])(tblp[1]); |
9400 | } else { |
9401 | tblp = (u_int32 *) 0; |
9402 | olen = scr_to_cpu(vdsp[0])(vdsp[0]) & 0xffffff; |
9403 | }; |
9404 | |
9405 | if (DEBUG_FLAGSncr_debug & DEBUG_PHASE(0x0002)) { |
9406 | printk ("OCMD=%x\nTBLP=%p OLEN=%x OADR=%x\n", |
9407 | (unsigned) (scr_to_cpu(vdsp[0])(vdsp[0]) >> 24), |
9408 | tblp, |
9409 | (unsigned) olen, |
9410 | (unsigned) oadr); |
9411 | }; |
9412 | |
9413 | /* |
9414 | ** check cmd against assumed interrupted script command. |
9415 | ** If dt data phase, the MOVE instruction hasn't bit 4 of |
9416 | ** the phase. |
9417 | */ |
9418 | |
9419 | if (((cmd & 2) ? cmd : (cmd & ~4)) != (scr_to_cpu(vdsp[0])(vdsp[0]) >> 24)) { |
9420 | PRINT_ADDR(cp->cmd); |
9421 | printk ("internal error: cmd=%02x != %02x=(vdsp[0] >> 24)\n", |
9422 | (unsigned)cmd, (unsigned)scr_to_cpu(vdsp[0])(vdsp[0]) >> 24); |
9423 | |
9424 | goto reset_all; |
9425 | }; |
9426 | |
9427 | /* |
9428 | ** if old phase not dataphase, leave here. |
9429 | ** C/D line is low if data. |
9430 | */ |
9431 | |
9432 | if (cmd & 0x02) { |
9433 | PRINT_ADDR(cp->cmd); |
9434 | printk ("phase change %x-%x %d@%08x resid=%d.\n", |
9435 | cmd&7, INB(nc_sbcl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sbcl)))))&7, (unsigned)olen, |
9436 | (unsigned)oadr, (unsigned)rest); |
9437 | goto unexpected_phase; |
9438 | }; |
9439 | |
9440 | /* |
9441 | ** Choose the correct PM save area. |
9442 | ** |
9443 | ** Look at the PM_SAVE SCRIPT if you want to understand |
9444 | ** this stuff. The equivalent code is implemented in |
9445 | ** SCRIPTS for the 895A and 896 that are able to handle |
9446 | ** PM from the SCRIPTS processor. |
9447 | */ |
9448 | |
9449 | hflags0 = INB (HF_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))); |
9450 | hflags = hflags0; |
9451 | |
9452 | if (hflags & (HF_IN_PM01u | HF_IN_PM1(1u<<1) | HF_DP_SAVED(1u<<3))) { |
9453 | if (hflags & HF_IN_PM01u) |
9454 | nxtdsp = scr_to_cpu(cp->phys.pm0.ret)(cp->phys.pm0.ret); |
9455 | else if (hflags & HF_IN_PM1(1u<<1)) |
9456 | nxtdsp = scr_to_cpu(cp->phys.pm1.ret)(cp->phys.pm1.ret); |
9457 | |
9458 | if (hflags & HF_DP_SAVED(1u<<3)) |
9459 | hflags ^= HF_ACT_PM(1u<<2); |
9460 | } |
9461 | |
9462 | if (!(hflags & HF_ACT_PM(1u<<2))) { |
9463 | pm = &cp->phys.pm0; |
9464 | newcmd = NCB_SCRIPT_PHYS(np, pm0_data)(np->p_script + ((size_t) (&((struct script *)0)->pm0_data ))); |
9465 | } |
9466 | else { |
9467 | pm = &cp->phys.pm1; |
9468 | newcmd = NCB_SCRIPT_PHYS(np, pm1_data)(np->p_script + ((size_t) (&((struct script *)0)->pm1_data ))); |
9469 | } |
9470 | |
9471 | hflags &= ~(HF_IN_PM01u | HF_IN_PM1(1u<<1) | HF_DP_SAVED(1u<<3)); |
9472 | if (hflags != hflags0) |
9473 | OUTB (HF_PRT, hflags)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = (((hflags))) ); |
9474 | |
9475 | /* |
9476 | ** fillin the phase mismatch context |
9477 | */ |
9478 | |
9479 | pm->sg.addr = cpu_to_scr(oadr + olen - rest)(oadr + olen - rest); |
9480 | pm->sg.size = cpu_to_scr(rest)(rest); |
9481 | pm->ret = cpu_to_scr(nxtdsp)(nxtdsp); |
9482 | |
9483 | /* |
9484 | ** If we have a SWIDE, |
9485 | ** - prepare the address to write the SWIDE from SCRIPTS, |
9486 | ** - compute the SCRIPTS address to restart from, |
9487 | ** - move current data pointer context by one byte. |
9488 | */ |
9489 | nxtdsp = NCB_SCRIPT_PHYS (np, dispatch)(np->p_script + ((size_t) (&((struct script *)0)->dispatch ))); |
9490 | if ( ((cmd & 7) == 1 || (cmd & 7) == 5) |
9491 | && cp && (cp->phys.select.sel_scntl3 & EWS0x08) && |
9492 | (INB (nc_scntl2)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scntl2))))) & WSR0x01)) { |
9493 | u32 tmp; |
9494 | |
9495 | #ifdef SYM_DEBUG_PM_WITH_WSR |
9496 | PRINT_ADDR(cp); |
9497 | printf ("MA interrupt with WSR set - " |
9498 | "pm->sg.addr=%x - pm->sg.size=%d\n", |
9499 | pm->sg.addr, pm->sg.size); |
9500 | #endif |
9501 | /* |
9502 | * Set up the table indirect for the MOVE |
9503 | * of the residual byte and adjust the data |
9504 | * pointer context. |
9505 | */ |
9506 | tmp = scr_to_cpu(pm->sg.addr)(pm->sg.addr); |
9507 | cp->phys.wresid.addr = cpu_to_scr(tmp)(tmp); |
9508 | pm->sg.addr = cpu_to_scr(tmp + 1)(tmp + 1); |
9509 | tmp = scr_to_cpu(pm->sg.size)(pm->sg.size); |
9510 | cp->phys.wresid.size = cpu_to_scr((tmp&0xff000000) | 1)((tmp&0xff000000) | 1); |
9511 | pm->sg.size = cpu_to_scr(tmp - 1)(tmp - 1); |
9512 | |
9513 | /* |
9514 | * If only the residual byte is to be moved, |
9515 | * no PM context is needed. |
9516 | */ |
9517 | if ((tmp&0xffffff) == 1) |
9518 | newcmd = pm->ret; |
9519 | |
9520 | /* |
9521 | * Prepare the address of SCRIPTS that will |
9522 | * move the residual byte to memory. |
9523 | */ |
9524 | nxtdsp = NCB_SCRIPTH_PHYS (np, wsr_ma_helper)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> wsr_ma_helper))); |
9525 | } |
9526 | |
9527 | if (DEBUG_FLAGSncr_debug & DEBUG_PHASE(0x0002)) { |
9528 | PRINT_ADDR(cp->cmd); |
9529 | printk ("PM %x %x %x / %x %x %x.\n", |
9530 | hflags0, hflags, newcmd, |
9531 | (unsigned)scr_to_cpu(pm->sg.addr)(pm->sg.addr), |
9532 | (unsigned)scr_to_cpu(pm->sg.size)(pm->sg.size), |
9533 | (unsigned)scr_to_cpu(pm->ret)(pm->ret)); |
9534 | } |
9535 | |
9536 | /* |
9537 | ** Restart the SCRIPTS processor. |
9538 | */ |
9539 | |
9540 | OUTL (nc_temp, newcmd)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_temp))))) = (((newcmd)))); |
9541 | OUTL_DSP (nxtdsp)do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = ((((nxtdsp))))); } while (0); |
9542 | return; |
9543 | |
9544 | /* |
9545 | ** Unexpected phase changes that occurs when the current phase |
9546 | ** is not a DATA IN or DATA OUT phase are due to error conditions. |
9547 | ** Such event may only happen when the SCRIPTS is using a |
9548 | ** multibyte SCSI MOVE. |
9549 | ** |
9550 | ** Phase change Some possible cause |
9551 | ** |
9552 | ** COMMAND --> MSG IN SCSI parity error detected by target. |
9553 | ** COMMAND --> STATUS Bad command or refused by target. |
9554 | ** MSG OUT --> MSG IN Message rejected by target. |
9555 | ** MSG OUT --> COMMAND Bogus target that discards extended |
9556 | ** negotiation messages. |
9557 | ** |
9558 | ** The code below does not care of the new phase and so |
9559 | ** trusts the target. Why to annoy it ? |
9560 | ** If the interrupted phase is COMMAND phase, we restart at |
9561 | ** dispatcher. |
9562 | ** If a target does not get all the messages after selection, |
9563 | ** the code assumes blindly that the target discards extended |
9564 | ** messages and clears the negotiation status. |
9565 | ** If the target does not want all our response to negotiation, |
9566 | ** we force a SIR_NEGO_PROTO interrupt (it is a hack that avoids |
9567 | ** bloat for such a should_not_happen situation). |
9568 | ** In all other situation, we reset the BUS. |
9569 | ** Are these assumptions reasonnable ? (Wait and see ...) |
9570 | */ |
9571 | unexpected_phase: |
9572 | dsp -= 8; |
9573 | nxtdsp = 0; |
9574 | |
9575 | switch (cmd & 7) { |
9576 | case 2: /* COMMAND phase */ |
9577 | nxtdsp = NCB_SCRIPT_PHYS (np, dispatch)(np->p_script + ((size_t) (&((struct script *)0)->dispatch ))); |
9578 | break; |
9579 | #if 0 |
9580 | case 3: /* STATUS phase */ |
9581 | nxtdsp = NCB_SCRIPT_PHYS (np, dispatch)(np->p_script + ((size_t) (&((struct script *)0)->dispatch ))); |
9582 | break; |
9583 | #endif |
9584 | case 6: /* MSG OUT phase */ |
9585 | /* |
9586 | ** If the device may want to use untagged when we want |
9587 | ** tagged, we prepare an IDENTIFY without disc. granted, |
9588 | ** since we will not be able to handle reselect. |
9589 | ** Otherwise, we just don't care. |
9590 | */ |
9591 | if (dsp == NCB_SCRIPT_PHYS (np, send_ident)(np->p_script + ((size_t) (&((struct script *)0)->send_ident )))) { |
9592 | if (cp->tag != NO_TAG(256) && olen - rest <= 3) { |
9593 | cp->host_statusphys.header.status[1] = HS_BUSY(1); |
9594 | np->msgout[0] = M_IDENTIFY(0x80) | cp->lun; |
9595 | nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break_atn)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> ident_break_atn))); |
9596 | } |
9597 | else |
9598 | nxtdsp = NCB_SCRIPTH_PHYS (np, ident_break)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> ident_break))); |
9599 | } |
9600 | else if (dsp == NCB_SCRIPTH_PHYS (np, send_wdtr)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> send_wdtr))) || |
9601 | dsp == NCB_SCRIPTH_PHYS (np, send_sdtr)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> send_sdtr))) || |
9602 | dsp == NCB_SCRIPTH_PHYS (np, send_ppr)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> send_ppr)))) { |
9603 | nxtdsp = NCB_SCRIPTH_PHYS (np, nego_bad_phase)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> nego_bad_phase))); |
9604 | } |
9605 | break; |
9606 | #if 0 |
9607 | case 7: /* MSG IN phase */ |
9608 | nxtdsp = NCB_SCRIPT_PHYS (np, clrack)(np->p_script + ((size_t) (&((struct script *)0)->clrack ))); |
9609 | break; |
9610 | #endif |
9611 | } |
9612 | |
9613 | if (nxtdsp) { |
9614 | OUTL_DSP (nxtdsp)do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = ((((nxtdsp))))); } while (0); |
9615 | return; |
9616 | } |
9617 | |
9618 | reset_all: |
9619 | ncr_start_reset(np); |
9620 | } |
9621 | |
9622 | /*========================================================== |
9623 | ** |
9624 | ** ncr chip handler for QUEUE FULL and CHECK CONDITION |
9625 | ** |
9626 | **========================================================== |
9627 | ** |
9628 | ** On QUEUE FULL status, we set the actual tagged command |
9629 | ** queue depth to the number of disconnected CCBs that is |
9630 | ** hopefully a good value to avoid further QUEUE FULL. |
9631 | ** |
9632 | ** On CHECK CONDITION or COMMAND TERMINATED, we use the |
9633 | ** CCB of the failed command for performing a REQUEST |
9634 | ** SENSE SCSI command. |
9635 | ** |
9636 | ** We do not want to change the order commands will be |
9637 | ** actually queued to the device after we received a |
9638 | ** QUEUE FULL status. We also want to properly deal with |
9639 | ** contingent allegiance condition. For these reasons, |
9640 | ** we remove from the start queue all commands for this |
9641 | ** LUN that haven't been yet queued to the device and |
9642 | ** put them back in the correponding LUN queue, then |
9643 | ** requeue the CCB that failed in front of the LUN queue. |
9644 | ** I just hope this not to be performed too often. :) |
9645 | ** |
9646 | ** If we are using IMMEDIATE ARBITRATION, we clear the |
9647 | ** IARB hint for every commands we encounter in order not |
9648 | ** to be stuck with a won arbitration and no job to queue |
9649 | ** to a device. |
9650 | **---------------------------------------------------------- |
9651 | */ |
9652 | |
9653 | static void ncr_sir_to_redo(ncb_p np, int num, ccb_p cp) |
9654 | { |
9655 | Scsi_Cmnd *cmd = cp->cmd; |
9656 | tcb_p tp = &np->target[cp->target]; |
9657 | lcb_p lp = ncr_lp(np, tp, cp->lun)(!cp->lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(cp ->lun)] : 0; |
9658 | ccb_p cp2; |
9659 | int busyccbs = 1; |
9660 | u_int32 startp; |
9661 | u_charunsigned char s_status = INB (SS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr2))))); |
9662 | int msglen; |
9663 | int i, j; |
9664 | |
9665 | |
9666 | /* |
9667 | ** If the LCB is not yet available, then only |
9668 | ** 1 IO is accepted, so we should have it. |
9669 | */ |
9670 | if (!lp) |
9671 | goto next; |
9672 | /* |
9673 | ** Remove all CCBs queued to the chip for that LUN and put |
9674 | ** them back in the LUN CCB wait queue. |
9675 | */ |
9676 | busyccbs = lp->queuedccbs; |
9677 | i = (INL (nc_scratcha)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_scratcha))))) - np->p_squeue) / 4; |
9678 | j = i; |
9679 | while (i != np->squeueput) { |
9680 | cp2 = ncr_ccb_from_dsa(np, scr_to_cpu(np->squeue[i])(np->squeue[i])); |
9681 | assert(cp2){ if (!(cp2)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "cp2", "../linux/src/drivers/scsi/sym53c8xx.c", 9681); } }; |
9682 | #ifdef SCSI_NCR_IARB_SUPPORT |
9683 | /* IARB hints may not be relevant any more. Forget them. */ |
9684 | cp2->host_flagsphys.header.status[3] &= ~HF_HINT_IARB; |
9685 | #endif |
9686 | if (cp2 && cp2->target == cp->target && cp2->lun == cp->lun) { |
9687 | xpt_remque(&cp2->link_ccbq)__xpt_que_del((&cp2->link_ccbq)->blink, (&cp2-> link_ccbq)->flink); |
9688 | xpt_insque_head(&cp2->link_ccbq, &lp->wait_ccbq)__xpt_que_add(&cp2->link_ccbq, &lp->wait_ccbq, ( &lp->wait_ccbq)->flink); |
9689 | --lp->queuedccbs; |
9690 | cp2->queued = 0; |
9691 | } |
9692 | else { |
9693 | if (i != j) |
9694 | np->squeue[j] = np->squeue[i]; |
9695 | if ((j += 2) >= MAX_START((8*(8) + 2*(16)) + 4)*2) j = 0; |
9696 | } |
9697 | if ((i += 2) >= MAX_START((8*(8) + 2*(16)) + 4)*2) i = 0; |
9698 | } |
9699 | if (i != j) /* Copy back the idle task if needed */ |
9700 | np->squeue[j] = np->squeue[i]; |
9701 | np->squeueput = j; /* Update our current start queue pointer */ |
9702 | |
9703 | /* |
9704 | ** Requeue the interrupted CCB in front of the |
9705 | ** LUN CCB wait queue to preserve ordering. |
9706 | */ |
9707 | xpt_remque(&cp->link_ccbq)__xpt_que_del((&cp->link_ccbq)->blink, (&cp-> link_ccbq)->flink); |
9708 | xpt_insque_head(&cp->link_ccbq, &lp->wait_ccbq)__xpt_que_add(&cp->link_ccbq, &lp->wait_ccbq, ( &lp->wait_ccbq)->flink); |
9709 | --lp->queuedccbs; |
9710 | cp->queued = 0; |
9711 | |
9712 | next: |
9713 | |
9714 | #ifdef SCSI_NCR_IARB_SUPPORT |
9715 | /* IARB hint may not be relevant any more. Forget it. */ |
9716 | cp->host_flagsphys.header.status[3] &= ~HF_HINT_IARB; |
9717 | if (np->last_cp) |
9718 | np->last_cp = 0; |
9719 | #endif |
9720 | |
9721 | /* |
9722 | ** Now we can restart the SCRIPTS processor safely. |
9723 | */ |
9724 | OUTL_DSP (NCB_SCRIPT_PHYS (np, start))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->start)))))))); } while (0); |
9725 | |
9726 | switch(s_status) { |
9727 | default: |
9728 | case S_BUSY(0x08): |
9729 | ncr_complete(np, cp); |
9730 | break; |
9731 | case S_QUEUE_FULL(0x28): |
9732 | if (!lp || !lp->queuedccbs) { |
9733 | ncr_complete(np, cp); |
9734 | break; |
9735 | } |
9736 | if (bootverbose(np->verbose) >= 1) { |
9737 | PRINT_ADDR(cmd); |
9738 | printk ("QUEUE FULL! %d busy, %d disconnected CCBs\n", |
9739 | busyccbs, lp->queuedccbs); |
9740 | } |
9741 | /* |
9742 | ** Decrease number of tags to the number of |
9743 | ** disconnected commands. |
9744 | */ |
9745 | if (lp->queuedccbs < lp->numtags) { |
9746 | lp->numtags = lp->queuedccbs; |
9747 | lp->num_good = 0; |
9748 | ncr_setup_tags (np, cp->target, cp->lun); |
9749 | } |
9750 | /* |
9751 | ** Repair the offending CCB. |
9752 | */ |
9753 | cp->phys.header.savep = cp->startp; |
9754 | cp->phys.header.lastp = cp->lastp0; |
9755 | cp->host_statusphys.header.status[1] = HS_BUSY(1); |
9756 | cp->scsi_statusphys.header.status[2] = S_ILLEGAL(0xff); |
9757 | cp->xerr_status = 0; |
9758 | cp->extra_bytes = 0; |
9759 | cp->host_flagsphys.header.status[3] &= (HF_PM_TO_C(1u<<6)|HF_DATA_IN(1u<<5)); |
9760 | |
9761 | break; |
9762 | |
9763 | case S_TERMINATED(0x20): |
9764 | case S_CHECK_COND(0x02): |
9765 | /* |
9766 | ** If we were requesting sense, give up. |
9767 | */ |
9768 | if (cp->host_flagsphys.header.status[3] & HF_AUTO_SENSE(1u<<4)) { |
9769 | ncr_complete(np, cp); |
9770 | break; |
9771 | } |
9772 | |
9773 | /* |
9774 | ** Save SCSI status and extended error. |
9775 | ** Compute the data residual now. |
9776 | */ |
9777 | cp->sv_scsi_status = cp->scsi_statusphys.header.status[2]; |
9778 | cp->sv_xerr_status = cp->xerr_status; |
9779 | cp->resid = ncr_compute_residual(np, cp); |
9780 | |
9781 | /* |
9782 | ** Device returned CHECK CONDITION status. |
9783 | ** Prepare all needed data strutures for getting |
9784 | ** sense data. |
9785 | */ |
9786 | |
9787 | /* |
9788 | ** identify message |
9789 | */ |
9790 | cp->scsi_smsg2[0] = M_IDENTIFY(0x80) | cp->lun; |
9791 | msglen = 1; |
9792 | |
9793 | /* |
9794 | ** If we are currently using anything different from |
9795 | ** async. 8 bit data transfers with that target, |
9796 | ** start a negotiation, since the device may want |
9797 | ** to report us a UNIT ATTENTION condition due to |
9798 | ** a cause we currently ignore, and we donnot want |
9799 | ** to be stuck with WIDE and/or SYNC data transfer. |
9800 | ** |
9801 | ** cp->nego_status is filled by ncr_prepare_nego(). |
9802 | ** |
9803 | ** Do NOT negotiate if performing integrity check |
9804 | ** or if integrity check has completed, all check |
9805 | ** conditions will have been cleared. |
9806 | */ |
9807 | |
9808 | #ifdef SCSI_NCR_INTEGRITY_CHECKING |
9809 | if (DEBUG_FLAGSncr_debug & DEBUG_IC(0x0800)) { |
9810 | printk("%s: ncr_sir_to_redo: ic_done %2X, in_progress %2X\n", |
9811 | ncr_name(np), tp->ic_done, cp->cmd->ic_in_progress); |
9812 | } |
9813 | |
9814 | /* |
9815 | ** If parity error during integrity check, |
9816 | ** set the target width to narrow. Otherwise, |
9817 | ** do not negotiate on a request sense. |
9818 | */ |
9819 | if ( np->check_integ_par && np->check_integrity |
9820 | && cp->cmd->ic_in_progress ) { |
9821 | cp->nego_status = 0; |
9822 | msglen += |
9823 | ncr_ic_nego (np, cp, cmd ,&cp->scsi_smsg2[msglen]); |
9824 | } |
9825 | |
9826 | if (!np->check_integrity || |
9827 | (np->check_integrity && |
9828 | (!cp->cmd->ic_in_progress && !tp->ic_done)) ) { |
9829 | ncr_negotiate(np, tp); |
9830 | cp->nego_status = 0; |
9831 | { |
9832 | u_charunsigned char sync_offset; |
9833 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
9834 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) |
9835 | sync_offset = tp->sval & 0x3f; |
9836 | else |
9837 | sync_offset = tp->sval & 0x1f; |
9838 | |
9839 | if ((tp->wval & EWS0x08) || sync_offset) |
9840 | msglen += |
9841 | ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]); |
9842 | } |
9843 | |
9844 | } |
9845 | #else |
9846 | ncr_negotiate(np, tp); |
9847 | cp->nego_status = 0; |
9848 | if ((tp->wval & EWS0x08) || (tp->sval & 0x1f)) |
9849 | msglen += |
9850 | ncr_prepare_nego (np, cp, &cp->scsi_smsg2[msglen]); |
9851 | #endif /* SCSI_NCR_INTEGRITY_CHECKING */ |
9852 | |
9853 | /* |
9854 | ** Message table indirect structure. |
9855 | */ |
9856 | cp->phys.smsg.addr = cpu_to_scr(CCB_PHYS (cp, scsi_smsg2))((cp->p_ccb + ((size_t) (&((struct ccb *)0)->scsi_smsg2 )))); |
9857 | cp->phys.smsg.size = cpu_to_scr(msglen)(msglen); |
9858 | |
9859 | /* |
9860 | ** sense command |
9861 | */ |
9862 | cp->phys.cmd.addr = cpu_to_scr(CCB_PHYS (cp, sensecmd))((cp->p_ccb + ((size_t) (&((struct ccb *)0)->sensecmd )))); |
9863 | cp->phys.cmd.size = cpu_to_scr(6)(6); |
9864 | |
9865 | /* |
9866 | ** patch requested size into sense command |
9867 | */ |
9868 | cp->sensecmd[0] = 0x03; |
9869 | cp->sensecmd[1] = cp->lun << 5; |
9870 | cp->sensecmd[4] = sizeof(cp->sense_buf); |
9871 | |
9872 | /* |
9873 | ** sense data |
9874 | */ |
9875 | bzero(cp->sense_buf, sizeof(cp->sense_buf))(__builtin_constant_p(0) ? (__builtin_constant_p(((sizeof(cp-> sense_buf)))) ? __constant_c_and_count_memset((((cp->sense_buf ))),((0x01010101UL*(unsigned char)(0))),(((sizeof(cp->sense_buf ))))) : __constant_c_memset((((cp->sense_buf))),((0x01010101UL *(unsigned char)(0))),(((sizeof(cp->sense_buf)))))) : (__builtin_constant_p (((sizeof(cp->sense_buf)))) ? __memset_generic(((((cp-> sense_buf)))),(((0))),((((sizeof(cp->sense_buf)))))) : __memset_generic ((((cp->sense_buf))),((0)),(((sizeof(cp->sense_buf))))) )); |
9876 | cp->phys.sense.addr = cpu_to_scr(CCB_PHYS(cp,sense_buf[0]))((cp->p_ccb + ((size_t) (&((struct ccb *)0)->sense_buf [0])))); |
9877 | cp->phys.sense.size = cpu_to_scr(sizeof(cp->sense_buf))(sizeof(cp->sense_buf)); |
9878 | |
9879 | /* |
9880 | ** requeue the command. |
9881 | */ |
9882 | startp = NCB_SCRIPTH_PHYS (np, sdata_in)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> sdata_in))); |
9883 | |
9884 | cp->phys.header.savep = cpu_to_scr(startp)(startp); |
9885 | cp->phys.header.goalp = cpu_to_scr(startp + 16)(startp + 16); |
9886 | cp->phys.header.lastp = cpu_to_scr(startp)(startp); |
9887 | cp->phys.header.wgoalp = cpu_to_scr(startp + 16)(startp + 16); |
9888 | cp->phys.header.wlastp = cpu_to_scr(startp)(startp); |
9889 | |
9890 | cp->host_statusphys.header.status[1] = cp->nego_status ? HS_NEGOTIATE(2) : HS_BUSY(1); |
9891 | cp->scsi_statusphys.header.status[2] = S_ILLEGAL(0xff); |
9892 | cp->host_flagsphys.header.status[3] = (HF_AUTO_SENSE(1u<<4)|HF_DATA_IN(1u<<5)); |
9893 | |
9894 | cp->phys.header.go.start = |
9895 | cpu_to_scr(NCB_SCRIPT_PHYS (np, select))((np->p_script + ((size_t) (&((struct script *)0)-> select)))); |
9896 | |
9897 | /* |
9898 | ** If lp not yet allocated, requeue the command. |
9899 | */ |
9900 | if (!lp) |
9901 | ncr_put_start_queue(np, cp); |
9902 | break; |
9903 | } |
9904 | |
9905 | /* |
9906 | ** requeue awaiting scsi commands for this lun. |
9907 | */ |
9908 | if (lp) |
9909 | ncr_start_next_ccb(np, lp, 1); |
9910 | |
9911 | return; |
9912 | } |
9913 | |
9914 | /*---------------------------------------------------------- |
9915 | ** |
9916 | ** After a device has accepted some management message |
9917 | ** as BUS DEVICE RESET, ABORT TASK, etc ..., or when |
9918 | ** a device signals a UNIT ATTENTION condition, some |
9919 | ** tasks are thrown away by the device. We are required |
9920 | ** to reflect that on our tasks list since the device |
9921 | ** will never complete these tasks. |
9922 | ** |
9923 | ** This function completes all disconnected CCBs for a |
9924 | ** given target that matches the following criteria: |
9925 | ** - lun=-1 means any logical UNIT otherwise a given one. |
9926 | ** - task=-1 means any task, otherwise a given one. |
9927 | **---------------------------------------------------------- |
9928 | */ |
9929 | static int ncr_clear_tasks(ncb_p np, u_charunsigned char hsts, |
9930 | int target, int lun, int task) |
9931 | { |
9932 | int i = 0; |
9933 | ccb_p cp; |
9934 | |
9935 | for (cp = np->ccbc; cp; cp = cp->link_ccb) { |
9936 | if (cp->host_statusphys.header.status[1] != HS_DISCONNECT(3)) |
9937 | continue; |
9938 | if (cp->target != target) |
9939 | continue; |
9940 | if (lun != -1 && cp->lun != lun) |
9941 | continue; |
9942 | if (task != -1 && cp->tag != NO_TAG(256) && cp->scsi_smsg[2] != task) |
9943 | continue; |
9944 | cp->host_statusphys.header.status[1] = hsts; |
9945 | cp->scsi_statusphys.header.status[2] = S_ILLEGAL(0xff); |
9946 | ncr_complete(np, cp); |
9947 | ++i; |
9948 | } |
9949 | return i; |
9950 | } |
9951 | |
9952 | /*========================================================== |
9953 | ** |
9954 | ** ncr chip handler for TASKS recovery. |
9955 | ** |
9956 | **========================================================== |
9957 | ** |
9958 | ** We cannot safely abort a command, while the SCRIPTS |
9959 | ** processor is running, since we just would be in race |
9960 | ** with it. |
9961 | ** |
9962 | ** As long as we have tasks to abort, we keep the SEM |
9963 | ** bit set in the ISTAT. When this bit is set, the |
9964 | ** SCRIPTS processor interrupts (SIR_SCRIPT_STOPPED) |
9965 | ** each time it enters the scheduler. |
9966 | ** |
9967 | ** If we have to reset a target, clear tasks of a unit, |
9968 | ** or to perform the abort of a disconnected job, we |
9969 | ** restart the SCRIPTS for selecting the target. Once |
9970 | ** selected, the SCRIPTS interrupts (SIR_TARGET_SELECTED). |
9971 | ** If it loses arbitration, the SCRIPTS will interrupt again |
9972 | ** the next time it will enter its scheduler, and so on ... |
9973 | ** |
9974 | ** On SIR_TARGET_SELECTED, we scan for the more |
9975 | ** appropriate thing to do: |
9976 | ** |
9977 | ** - If nothing, we just sent a M_ABORT message to the |
9978 | ** target to get rid of the useless SCSI bus ownership. |
9979 | ** According to the specs, no tasks shall be affected. |
9980 | ** - If the target is to be reset, we send it a M_RESET |
9981 | ** message. |
9982 | ** - If a logical UNIT is to be cleared , we send the |
9983 | ** IDENTIFY(lun) + M_ABORT. |
9984 | ** - If an untagged task is to be aborted, we send the |
9985 | ** IDENTIFY(lun) + M_ABORT. |
9986 | ** - If a tagged task is to be aborted, we send the |
9987 | ** IDENTIFY(lun) + task attributes + M_ABORT_TAG. |
9988 | ** |
9989 | ** Once our 'kiss of death' :) message has been accepted |
9990 | ** by the target, the SCRIPTS interrupts again |
9991 | ** (SIR_ABORT_SENT). On this interrupt, we complete |
9992 | ** all the CCBs that should have been aborted by the |
9993 | ** target according to our message. |
9994 | ** |
9995 | **---------------------------------------------------------- |
9996 | */ |
9997 | static void ncr_sir_task_recovery(ncb_p np, int num) |
9998 | { |
9999 | ccb_p cp; |
10000 | tcb_p tp; |
10001 | int target=-1, lun=-1, task; |
10002 | int i, k; |
10003 | u_charunsigned char *p; |
10004 | |
10005 | switch(num) { |
10006 | /* |
10007 | ** The SCRIPTS processor stopped before starting |
10008 | ** the next command in order to allow us to perform |
10009 | ** some task recovery. |
10010 | */ |
10011 | case SIR_SCRIPT_STOPPED(7): |
10012 | |
10013 | /* |
10014 | ** Do we have any target to reset or unit to clear ? |
10015 | */ |
10016 | for (i = 0 ; i < MAX_TARGET((16)) ; i++) { |
10017 | tp = &np->target[i]; |
10018 | if (tp->to_reset || (tp->l0p && tp->l0p->to_clear)) { |
10019 | target = i; |
10020 | break; |
10021 | } |
10022 | if (!tp->lmp) |
10023 | continue; |
10024 | for (k = 1 ; k < MAX_LUN64 ; k++) { |
10025 | if (tp->lmp[k] && tp->lmp[k]->to_clear) { |
10026 | target = i; |
10027 | break; |
10028 | } |
10029 | } |
10030 | if (target != -1) |
10031 | break; |
10032 | } |
10033 | |
10034 | /* |
10035 | ** If not, look at the CCB list for any |
10036 | ** disconnected CCB to be aborted. |
10037 | */ |
10038 | if (target == -1) { |
10039 | for (cp = np->ccbc; cp; cp = cp->link_ccb) { |
10040 | if (cp->host_statusphys.header.status[1] != HS_DISCONNECT(3)) |
10041 | continue; |
10042 | if (cp->to_abort) { |
10043 | target = cp->target; |
10044 | break; |
10045 | } |
10046 | } |
10047 | } |
10048 | |
10049 | /* |
10050 | ** If some target is to be selected, |
10051 | ** prepare and start the selection. |
10052 | */ |
10053 | if (target != -1) { |
10054 | tp = &np->target[target]; |
10055 | np->abrt_sel.sel_id = target; |
10056 | np->abrt_sel.sel_scntl3 = tp->wval; |
10057 | np->abrt_sel.sel_sxfer = tp->sval; |
10058 | np->abrt_sel.sel_scntl4 = tp->uval; |
10059 | OUTL(nc_dsa, np->p_ncb)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsa))))) = (((np->p_ncb )))); |
10060 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, sel_for_abort))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->sel_for_abort)))))))); } while (0); |
10061 | return; |
10062 | } |
10063 | |
10064 | /* |
10065 | ** Nothing is to be selected, so we donnot need |
10066 | ** to synchronize with the SCRIPTS anymore. |
10067 | ** Remove the SEM flag from the ISTAT. |
10068 | */ |
10069 | np->istat_sem = 0; |
10070 | OUTB (nc_istat, SIGP)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_istat))))) = (((0x20)))); |
10071 | |
10072 | /* |
10073 | ** Now look at CCBs to abort that haven't started yet. |
10074 | ** Remove all those CCBs from the start queue and |
10075 | ** complete them with appropriate status. |
10076 | ** Btw, the SCRIPTS processor is still stopped, so |
10077 | ** we are not in race. |
10078 | */ |
10079 | for (cp = np->ccbc; cp; cp = cp->link_ccb) { |
10080 | if (cp->host_statusphys.header.status[1] != HS_BUSY(1) && |
10081 | cp->host_statusphys.header.status[1] != HS_NEGOTIATE(2)) |
10082 | continue; |
10083 | if (!cp->to_abort) |
10084 | continue; |
10085 | #ifdef SCSI_NCR_IARB_SUPPORT |
10086 | /* |
10087 | ** If we are using IMMEDIATE ARBITRATION, we donnot |
10088 | ** want to cancel the last queued CCB, since the |
10089 | ** SCRIPTS may have anticipated the selection. |
10090 | */ |
10091 | if (cp == np->last_cp) { |
10092 | cp->to_abort = 0; |
10093 | continue; |
10094 | } |
10095 | #endif |
10096 | /* |
10097 | ** Compute index of next position in the start |
10098 | ** queue the SCRIPTS will schedule. |
10099 | */ |
10100 | i = (INL (nc_scratcha)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_scratcha))))) - np->p_squeue) / 4; |
10101 | |
10102 | /* |
10103 | ** Remove the job from the start queue. |
10104 | */ |
10105 | k = -1; |
10106 | while (1) { |
10107 | if (i == np->squeueput) |
10108 | break; |
10109 | if (k == -1) { /* Not found yet */ |
10110 | if (cp == ncr_ccb_from_dsa(np, |
10111 | scr_to_cpu(np->squeue[i])(np->squeue[i]))) |
10112 | k = i; /* Found */ |
10113 | } |
10114 | else { |
10115 | /* |
10116 | ** Once found, we have to move |
10117 | ** back all jobs by 1 position. |
10118 | */ |
10119 | np->squeue[k] = np->squeue[i]; |
10120 | k += 2; |
10121 | if (k >= MAX_START((8*(8) + 2*(16)) + 4)*2) |
10122 | k = 0; |
10123 | } |
10124 | |
10125 | i += 2; |
10126 | if (i >= MAX_START((8*(8) + 2*(16)) + 4)*2) |
10127 | i = 0; |
10128 | } |
10129 | if (k != -1) { |
10130 | np->squeue[k] = np->squeue[i]; /* Idle task */ |
10131 | np->squeueput = k; /* Start queue pointer */ |
10132 | } |
10133 | cp->host_statusphys.header.status[1] = HS_ABORTED(7|(0x80)); |
10134 | cp->scsi_statusphys.header.status[2] = S_ILLEGAL(0xff); |
10135 | ncr_complete(np, cp); |
10136 | } |
10137 | break; |
10138 | /* |
10139 | ** The SCRIPTS processor has selected a target |
10140 | ** we may have some manual recovery to perform for. |
10141 | */ |
10142 | case SIR_TARGET_SELECTED(14): |
10143 | target = (INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0xf); |
10144 | tp = &np->target[target]; |
10145 | |
10146 | np->abrt_tbl.addr = vtobus(np->abrt_msg)virt_to_phys(np->abrt_msg); |
10147 | |
10148 | /* |
10149 | ** If the target is to be reset, prepare a |
10150 | ** M_RESET message and clear the to_reset flag |
10151 | ** since we donnot expect this operation to fail. |
10152 | */ |
10153 | if (tp->to_reset) { |
10154 | np->abrt_msg[0] = M_RESET(0x0c); |
10155 | np->abrt_tbl.size = 1; |
10156 | tp->to_reset = 0; |
10157 | break; |
10158 | } |
10159 | |
10160 | /* |
10161 | ** Otherwise, look for some logical unit to be cleared. |
10162 | */ |
10163 | if (tp->l0p && tp->l0p->to_clear) |
10164 | lun = 0; |
10165 | else if (tp->lmp) { |
10166 | for (k = 1 ; k < MAX_LUN64 ; k++) { |
10167 | if (tp->lmp[k] && tp->lmp[k]->to_clear) { |
10168 | lun = k; |
10169 | break; |
10170 | } |
10171 | } |
10172 | } |
10173 | |
10174 | /* |
10175 | ** If a logical unit is to be cleared, prepare |
10176 | ** an IDENTIFY(lun) + ABORT MESSAGE. |
10177 | */ |
10178 | if (lun != -1) { |
10179 | lcb_p lp = ncr_lp(np, tp, lun)(!lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(lun)] : 0; |
10180 | lp->to_clear = 0; /* We donnot expect to fail here */ |
10181 | np->abrt_msg[0] = M_IDENTIFY(0x80) | lun; |
10182 | np->abrt_msg[1] = M_ABORT(0x06); |
10183 | np->abrt_tbl.size = 2; |
10184 | break; |
10185 | } |
10186 | |
10187 | /* |
10188 | ** Otherwise, look for some disconnected job to |
10189 | ** abort for this target. |
10190 | */ |
10191 | for (cp = np->ccbc; cp; cp = cp->link_ccb) { |
10192 | if (cp->host_statusphys.header.status[1] != HS_DISCONNECT(3)) |
10193 | continue; |
10194 | if (cp->target != target) |
10195 | continue; |
10196 | if (cp->to_abort) |
10197 | break; |
10198 | } |
10199 | |
10200 | /* |
10201 | ** If we have none, probably since the device has |
10202 | ** completed the command before we won abitration, |
10203 | ** send a M_ABORT message without IDENTIFY. |
10204 | ** According to the specs, the device must just |
10205 | ** disconnect the BUS and not abort any task. |
10206 | */ |
10207 | if (!cp) { |
10208 | np->abrt_msg[0] = M_ABORT(0x06); |
10209 | np->abrt_tbl.size = 1; |
10210 | break; |
10211 | } |
10212 | |
10213 | /* |
10214 | ** We have some task to abort. |
10215 | ** Set the IDENTIFY(lun) |
10216 | */ |
10217 | np->abrt_msg[0] = M_IDENTIFY(0x80) | cp->lun; |
10218 | |
10219 | /* |
10220 | ** If we want to abort an untagged command, we |
10221 | ** will send a IDENTIFY + M_ABORT. |
10222 | ** Otherwise (tagged command), we will send |
10223 | ** a IDENTITFY + task attributes + ABORT TAG. |
10224 | */ |
10225 | if (cp->tag == NO_TAG(256)) { |
10226 | np->abrt_msg[1] = M_ABORT(0x06); |
10227 | np->abrt_tbl.size = 2; |
10228 | } |
10229 | else { |
10230 | np->abrt_msg[1] = cp->scsi_smsg[1]; |
10231 | np->abrt_msg[2] = cp->scsi_smsg[2]; |
10232 | np->abrt_msg[3] = M_ABORT_TAG(0x0d); |
10233 | np->abrt_tbl.size = 4; |
10234 | } |
10235 | cp->to_abort = 0; /* We donnot expect to fail here */ |
10236 | break; |
10237 | |
10238 | /* |
10239 | ** The target has accepted our message and switched |
10240 | ** to BUS FREE phase as we expected. |
10241 | */ |
10242 | case SIR_ABORT_SENT(17): |
10243 | target = (INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0xf); |
10244 | tp = &np->target[target]; |
10245 | |
10246 | /* |
10247 | ** If we didn't abort anything, leave here. |
10248 | */ |
10249 | if (np->abrt_msg[0] == M_ABORT(0x06)) |
10250 | break; |
10251 | |
10252 | /* |
10253 | ** If we sent a M_RESET, then a hardware reset has |
10254 | ** been performed by the target. |
10255 | ** - Reset everything to async 8 bit |
10256 | ** - Tell ourself to negotiate next time :-) |
10257 | ** - Prepare to clear all disconnected CCBs for |
10258 | ** this target from our task list (lun=task=-1) |
10259 | */ |
10260 | lun = -1; |
10261 | task = -1; |
10262 | if (np->abrt_msg[0] == M_RESET(0x0c)) { |
10263 | tp->sval = 0; |
10264 | tp->wval = np->rv_scntl3; |
10265 | tp->uval = np->rv_scntl4; |
10266 | ncr_set_sync_wide_status(np, target); |
10267 | ncr_negotiate(np, tp); |
10268 | } |
10269 | |
10270 | /* |
10271 | ** Otherwise, check for the LUN and TASK(s) |
10272 | ** concerned by the cancelation. |
10273 | ** If it is not ABORT_TAG then it is CLEAR_QUEUE |
10274 | ** or an ABORT message :-) |
10275 | */ |
10276 | else { |
10277 | lun = np->abrt_msg[0] & 0x3f; |
10278 | if (np->abrt_msg[1] == M_ABORT_TAG(0x0d)) |
10279 | task = np->abrt_msg[2]; |
10280 | } |
10281 | |
10282 | /* |
10283 | ** Complete all the CCBs the device should have |
10284 | ** aborted due to our 'kiss of death' message. |
10285 | */ |
10286 | (void) ncr_clear_tasks(np, HS_ABORTED(7|(0x80)), target, lun, task); |
10287 | break; |
10288 | |
10289 | /* |
10290 | ** We have performed a auto-sense that succeeded. |
10291 | ** If the device reports a UNIT ATTENTION condition |
10292 | ** due to a RESET condition, we must complete all |
10293 | ** disconnect CCBs for this unit since the device |
10294 | ** shall have thrown them away. |
10295 | ** Since I haven't time to guess what the specs are |
10296 | ** expecting for other UNIT ATTENTION conditions, I |
10297 | ** decided to only care about RESET conditions. :) |
10298 | */ |
10299 | case SIR_AUTO_SENSE_DONE(20): |
10300 | cp = ncr_ccb_from_dsa(np, INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa)))))); |
10301 | if (!cp) |
10302 | break; |
10303 | memcpy(cp->cmd->sense_buffer, cp->sense_buf,(__builtin_constant_p(sizeof(cp->cmd->sense_buffer)) ? __constant_memcpy ((cp->cmd->sense_buffer),(cp->sense_buf),(sizeof(cp-> cmd->sense_buffer))) : __memcpy((cp->cmd->sense_buffer ),(cp->sense_buf),(sizeof(cp->cmd->sense_buffer)))) |
10304 | sizeof(cp->cmd->sense_buffer))(__builtin_constant_p(sizeof(cp->cmd->sense_buffer)) ? __constant_memcpy ((cp->cmd->sense_buffer),(cp->sense_buf),(sizeof(cp-> cmd->sense_buffer))) : __memcpy((cp->cmd->sense_buffer ),(cp->sense_buf),(sizeof(cp->cmd->sense_buffer)))); |
10305 | p = &cp->cmd->sense_buffer[0]; |
10306 | |
10307 | if (p[0] != 0x70 || p[2] != 0x6 || p[12] != 0x29) |
10308 | break; |
10309 | #if 0 |
10310 | (void) ncr_clear_tasks(np, HS_RESET(6|(0x80)), cp->target, cp->lun, -1); |
10311 | #endif |
10312 | break; |
10313 | } |
10314 | |
10315 | /* |
10316 | ** Print to the log the message we intend to send. |
10317 | */ |
10318 | if (num == SIR_TARGET_SELECTED(14)) { |
10319 | PRINT_TARGET(np, target); |
10320 | ncr_printl_hex("control msgout:", np->abrt_msg, |
10321 | np->abrt_tbl.size); |
10322 | np->abrt_tbl.size = cpu_to_scr(np->abrt_tbl.size)(np->abrt_tbl.size); |
10323 | } |
10324 | |
10325 | /* |
10326 | ** Let the SCRIPTS processor continue. |
10327 | */ |
10328 | OUTONB_STD ()do { do { ; } while(0); ((*(volatile unsigned char *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl ))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl))))) | ( (0x04|0x01)))))); } while (0); |
10329 | } |
10330 | |
10331 | |
10332 | /*========================================================== |
10333 | ** |
10334 | ** Gérard's alchemy:) that deals with with the data |
10335 | ** pointer for both MDP and the residual calculation. |
10336 | ** |
10337 | **========================================================== |
10338 | ** |
10339 | ** I didn't want to bloat the code by more than 200 |
10340 | ** lignes for the handling of both MDP and the residual. |
10341 | ** This has been achieved by using a data pointer |
10342 | ** representation consisting in an index in the data |
10343 | ** array (dp_sg) and a negative offset (dp_ofs) that |
10344 | ** have the following meaning: |
10345 | ** |
10346 | ** - dp_sg = MAX_SCATTER |
10347 | ** we are at the end of the data script. |
10348 | ** - dp_sg < MAX_SCATTER |
10349 | ** dp_sg points to the next entry of the scatter array |
10350 | ** we want to transfer. |
10351 | ** - dp_ofs < 0 |
10352 | ** dp_ofs represents the residual of bytes of the |
10353 | ** previous entry scatter entry we will send first. |
10354 | ** - dp_ofs = 0 |
10355 | ** no residual to send first. |
10356 | ** |
10357 | ** The function ncr_evaluate_dp() accepts an arbitray |
10358 | ** offset (basically from the MDP message) and returns |
10359 | ** the corresponding values of dp_sg and dp_ofs. |
10360 | ** |
10361 | **---------------------------------------------------------- |
10362 | */ |
10363 | |
10364 | static int ncr_evaluate_dp(ncb_p np, ccb_p cp, u_int32 scr, int *ofs) |
10365 | { |
10366 | u_int32 dp_scr; |
10367 | int dp_ofs, dp_sg, dp_sgmin; |
10368 | int tmp; |
10369 | struct pm_ctx *pm; |
10370 | |
10371 | /* |
10372 | ** Compute the resulted data pointer in term of a script |
10373 | ** address within some DATA script and a signed byte offset. |
10374 | */ |
10375 | dp_scr = scr; |
10376 | dp_ofs = *ofs; |
10377 | if (dp_scr == NCB_SCRIPT_PHYS (np, pm0_data)(np->p_script + ((size_t) (&((struct script *)0)->pm0_data )))) |
10378 | pm = &cp->phys.pm0; |
10379 | else if (dp_scr == NCB_SCRIPT_PHYS (np, pm1_data)(np->p_script + ((size_t) (&((struct script *)0)->pm1_data )))) |
10380 | pm = &cp->phys.pm1; |
10381 | else |
10382 | pm = 0; |
10383 | |
10384 | if (pm) { |
10385 | dp_scr = scr_to_cpu(pm->ret)(pm->ret); |
10386 | dp_ofs -= scr_to_cpu(pm->sg.size)(pm->sg.size); |
10387 | } |
10388 | |
10389 | /* |
10390 | ** Deduce the index of the sg entry. |
10391 | ** Keep track of the index of the first valid entry. |
10392 | ** If result is dp_sg = MAX_SCATTER, then we are at the |
10393 | ** end of the data and vice-versa. |
10394 | */ |
10395 | tmp = scr_to_cpu(cp->phys.header.goalp)(cp->phys.header.goalp); |
10396 | dp_sg = MAX_SCATTER((127)); |
10397 | if (dp_scr != tmp) |
10398 | dp_sg -= (tmp - 8 - (int)dp_scr) / (SCR_SG_SIZE(2)*4); |
10399 | dp_sgmin = MAX_SCATTER((127)) - cp->segments; |
10400 | |
10401 | /* |
10402 | ** Move to the sg entry the data pointer belongs to. |
10403 | ** |
10404 | ** If we are inside the data area, we expect result to be: |
10405 | ** |
10406 | ** Either, |
10407 | ** dp_ofs = 0 and dp_sg is the index of the sg entry |
10408 | ** the data pointer belongs to (or the end of the data) |
10409 | ** Or, |
10410 | ** dp_ofs < 0 and dp_sg is the index of the sg entry |
10411 | ** the data pointer belongs to + 1. |
10412 | */ |
10413 | if (dp_ofs < 0) { |
10414 | int n; |
10415 | while (dp_sg > dp_sgmin) { |
10416 | --dp_sg; |
10417 | tmp = scr_to_cpu(cp->phys.data[dp_sg].size)(cp->phys.data[dp_sg].size); |
10418 | n = dp_ofs + (tmp & 0xffffff); |
10419 | if (n > 0) { |
10420 | ++dp_sg; |
10421 | break; |
10422 | } |
10423 | dp_ofs = n; |
10424 | } |
10425 | } |
10426 | else if (dp_ofs > 0) { |
10427 | while (dp_sg < MAX_SCATTER((127))) { |
10428 | tmp = scr_to_cpu(cp->phys.data[dp_sg].size)(cp->phys.data[dp_sg].size); |
10429 | dp_ofs -= (tmp & 0xffffff); |
10430 | ++dp_sg; |
10431 | if (dp_ofs <= 0) |
10432 | break; |
10433 | } |
10434 | } |
10435 | |
10436 | /* |
10437 | ** Make sure the data pointer is inside the data area. |
10438 | ** If not, return some error. |
10439 | */ |
10440 | if (dp_sg < dp_sgmin || (dp_sg == dp_sgmin && dp_ofs < 0)) |
10441 | goto out_err; |
10442 | else if (dp_sg > MAX_SCATTER((127)) || (dp_sg == MAX_SCATTER((127)) && dp_ofs > 0)) |
10443 | goto out_err; |
10444 | |
10445 | /* |
10446 | ** Save the extreme pointer if needed. |
10447 | */ |
10448 | if (dp_sg > cp->ext_sg || |
10449 | (dp_sg == cp->ext_sg && dp_ofs > cp->ext_ofs)) { |
10450 | cp->ext_sg = dp_sg; |
10451 | cp->ext_ofs = dp_ofs; |
10452 | } |
10453 | |
10454 | /* |
10455 | ** Return data. |
10456 | */ |
10457 | *ofs = dp_ofs; |
10458 | return dp_sg; |
10459 | |
10460 | out_err: |
10461 | return -1; |
10462 | } |
10463 | |
10464 | /*========================================================== |
10465 | ** |
10466 | ** ncr chip handler for MODIFY DATA POINTER MESSAGE |
10467 | ** |
10468 | **========================================================== |
10469 | ** |
10470 | ** We also call this function on IGNORE WIDE RESIDUE |
10471 | ** messages that do not match a SWIDE full condition. |
10472 | ** Btw, we assume in that situation that such a message |
10473 | ** is equivalent to a MODIFY DATA POINTER (offset=-1). |
10474 | ** |
10475 | **---------------------------------------------------------- |
10476 | */ |
10477 | |
10478 | static void ncr_modify_dp(ncb_p np, tcb_p tp, ccb_p cp, int ofs) |
10479 | { |
10480 | int dp_ofs = ofs; |
10481 | u_int32 dp_scr = INL (nc_temp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_temp))))); |
10482 | u_int32 dp_ret; |
10483 | u_int32 tmp; |
10484 | u_charunsigned char hflags; |
10485 | int dp_sg; |
10486 | struct pm_ctx *pm; |
10487 | |
10488 | /* |
10489 | ** Not supported for auto_sense; |
10490 | */ |
10491 | if (cp->host_flagsphys.header.status[3] & HF_AUTO_SENSE(1u<<4)) |
10492 | goto out_reject; |
10493 | |
10494 | /* |
10495 | ** Apply our alchemy:) (see comments in ncr_evaluate_dp()), |
10496 | ** to the resulted data pointer. |
10497 | */ |
10498 | dp_sg = ncr_evaluate_dp(np, cp, dp_scr, &dp_ofs); |
10499 | if (dp_sg < 0) |
10500 | goto out_reject; |
10501 | |
10502 | /* |
10503 | ** And our alchemy:) allows to easily calculate the data |
10504 | ** script address we want to return for the next data phase. |
10505 | */ |
10506 | dp_ret = cpu_to_scr(cp->phys.header.goalp)(cp->phys.header.goalp); |
10507 | dp_ret = dp_ret - 8 - (MAX_SCATTER((127)) - dp_sg) * (SCR_SG_SIZE(2)*4); |
10508 | |
10509 | /* |
10510 | ** If offset / scatter entry is zero we donnot need |
10511 | ** a context for the new current data pointer. |
10512 | */ |
10513 | if (dp_ofs == 0) { |
10514 | dp_scr = dp_ret; |
10515 | goto out_ok; |
10516 | } |
10517 | |
10518 | /* |
10519 | ** Get a context for the new current data pointer. |
10520 | */ |
10521 | hflags = INB (HF_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))); |
10522 | |
10523 | if (hflags & HF_DP_SAVED(1u<<3)) |
10524 | hflags ^= HF_ACT_PM(1u<<2); |
10525 | |
10526 | if (!(hflags & HF_ACT_PM(1u<<2))) { |
10527 | pm = &cp->phys.pm0; |
10528 | dp_scr = NCB_SCRIPT_PHYS (np, pm0_data)(np->p_script + ((size_t) (&((struct script *)0)->pm0_data ))); |
10529 | } |
10530 | else { |
10531 | pm = &cp->phys.pm1; |
10532 | dp_scr = NCB_SCRIPT_PHYS (np, pm1_data)(np->p_script + ((size_t) (&((struct script *)0)->pm1_data ))); |
10533 | } |
10534 | |
10535 | hflags &= ~(HF_DP_SAVED(1u<<3)); |
10536 | |
10537 | OUTB (HF_PRT, hflags)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = (((hflags))) ); |
10538 | |
10539 | /* |
10540 | ** Set up the new current data pointer. |
10541 | ** ofs < 0 there, and for the next data phase, we |
10542 | ** want to transfer part of the data of the sg entry |
10543 | ** corresponding to index dp_sg-1 prior to returning |
10544 | ** to the main data script. |
10545 | */ |
10546 | pm->ret = cpu_to_scr(dp_ret)(dp_ret); |
10547 | tmp = scr_to_cpu(cp->phys.data[dp_sg-1].addr)(cp->phys.data[dp_sg-1].addr); |
10548 | tmp += scr_to_cpu(cp->phys.data[dp_sg-1].size)(cp->phys.data[dp_sg-1].size) + dp_ofs; |
10549 | pm->sg.addr = cpu_to_scr(tmp)(tmp); |
10550 | pm->sg.size = cpu_to_scr(-dp_ofs)(-dp_ofs); |
10551 | |
10552 | out_ok: |
10553 | OUTL (nc_temp, dp_scr)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_temp))))) = (((dp_scr)))); |
10554 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
10555 | return; |
10556 | |
10557 | out_reject: |
10558 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
10559 | } |
10560 | |
10561 | |
10562 | /*========================================================== |
10563 | ** |
10564 | ** ncr chip calculation of the data residual. |
10565 | ** |
10566 | **========================================================== |
10567 | ** |
10568 | ** As I used to say, the requirement of data residual |
10569 | ** in SCSI is broken, useless and cannot be achieved |
10570 | ** without huge complexity. |
10571 | ** But most OSes and even the official CAM require it. |
10572 | ** When stupidity happens to be so widely spread inside |
10573 | ** a community, it gets hard to convince. |
10574 | ** |
10575 | ** Anyway, I don't care, since I am not going to use |
10576 | ** any software that considers this data residual as |
10577 | ** a relevant information. :) |
10578 | ** |
10579 | **---------------------------------------------------------- |
10580 | */ |
10581 | |
10582 | static int ncr_compute_residual(ncb_p np, ccb_p cp) |
10583 | { |
10584 | int dp_sg, dp_sgmin, tmp; |
10585 | int resid=0; |
10586 | int dp_ofs = 0; |
10587 | |
10588 | /* |
10589 | * Check for some data lost or just thrown away. |
10590 | * We are not required to be quite accurate in this |
10591 | * situation. Btw, if we are odd for output and the |
10592 | * device claims some more data, it may well happen |
10593 | * than our residual be zero. :-) |
10594 | */ |
10595 | if (cp->xerr_status & (XE_EXTRA_DATA(1)|XE_SODL_UNRUN(1<<3)|XE_SWIDE_OVRUN(1<<4))) { |
10596 | if (cp->xerr_status & XE_EXTRA_DATA(1)) |
10597 | resid -= cp->extra_bytes; |
10598 | if (cp->xerr_status & XE_SODL_UNRUN(1<<3)) |
10599 | ++resid; |
10600 | if (cp->xerr_status & XE_SWIDE_OVRUN(1<<4)) |
10601 | --resid; |
10602 | } |
10603 | |
10604 | |
10605 | /* |
10606 | ** If SCRIPTS reaches its goal point, then |
10607 | ** there is no additionnal residual. |
10608 | */ |
10609 | if (cp->phys.header.lastp == cp->phys.header.goalp) |
10610 | return resid; |
10611 | |
10612 | /* |
10613 | ** If the last data pointer is data_io (direction |
10614 | ** unknown), then no data transfer should have |
10615 | ** taken place. |
10616 | */ |
10617 | if (cp->phys.header.lastp == NCB_SCRIPTH_PHYS (np, data_io)(np->p_scripth + ((size_t) (&((struct scripth *)0)-> data_io)))) |
10618 | return cp->data_len; |
10619 | |
10620 | /* |
10621 | ** If no data transfer occurs, or if the data |
10622 | ** pointer is weird, return full residual. |
10623 | */ |
10624 | if (cp->startp == cp->phys.header.lastp || |
10625 | ncr_evaluate_dp(np, cp, scr_to_cpu(cp->phys.header.lastp)(cp->phys.header.lastp), |
10626 | &dp_ofs) < 0) { |
10627 | return cp->data_len; |
10628 | } |
10629 | |
10630 | /* |
10631 | ** We are now full comfortable in the computation |
10632 | ** of the data residual (2's complement). |
10633 | */ |
10634 | dp_sgmin = MAX_SCATTER((127)) - cp->segments; |
10635 | resid = -cp->ext_ofs; |
10636 | for (dp_sg = cp->ext_sg; dp_sg < MAX_SCATTER((127)); ++dp_sg) { |
10637 | tmp = scr_to_cpu(cp->phys.data[dp_sg].size)(cp->phys.data[dp_sg].size); |
10638 | resid += (tmp & 0xffffff); |
10639 | } |
10640 | |
10641 | /* |
10642 | ** Hopefully, the result is not too wrong. |
10643 | */ |
10644 | return resid; |
10645 | } |
10646 | |
10647 | /*========================================================== |
10648 | ** |
10649 | ** Print out the containt of a SCSI message. |
10650 | ** |
10651 | **========================================================== |
10652 | */ |
10653 | |
10654 | static int ncr_show_msg (u_charunsigned char * msg) |
10655 | { |
10656 | u_charunsigned char i; |
10657 | printk ("%x",*msg); |
10658 | if (*msg==M_EXTENDED(0x01)) { |
10659 | for (i=1;i<8;i++) { |
10660 | if (i-1>msg[1]) break; |
10661 | printk ("-%x",msg[i]); |
10662 | }; |
10663 | return (i+1); |
10664 | } else if ((*msg & 0xf0) == 0x20) { |
10665 | printk ("-%x",msg[1]); |
10666 | return (2); |
10667 | }; |
10668 | return (1); |
10669 | } |
10670 | |
10671 | static void ncr_print_msg (ccb_p cp, char *label, u_charunsigned char *msg) |
10672 | { |
10673 | if (cp) |
10674 | PRINT_ADDR(cp->cmd); |
10675 | if (label) |
10676 | printk ("%s: ", label); |
10677 | |
10678 | (void) ncr_show_msg (msg); |
10679 | printk (".\n"); |
10680 | } |
10681 | |
10682 | /*=================================================================== |
10683 | ** |
10684 | ** Negotiation for WIDE and SYNCHRONOUS DATA TRANSFER. |
10685 | ** |
10686 | **=================================================================== |
10687 | ** |
10688 | ** Was Sie schon immer ueber transfermode negotiation wissen wollten ... |
10689 | ** |
10690 | ** We try to negotiate sync and wide transfer only after |
10691 | ** a successfull inquire command. We look at byte 7 of the |
10692 | ** inquire data to determine the capabilities of the target. |
10693 | ** |
10694 | ** When we try to negotiate, we append the negotiation message |
10695 | ** to the identify and (maybe) simple tag message. |
10696 | ** The host status field is set to HS_NEGOTIATE to mark this |
10697 | ** situation. |
10698 | ** |
10699 | ** If the target doesn't answer this message immediately |
10700 | ** (as required by the standard), the SIR_NEGO_FAILED interrupt |
10701 | ** will be raised eventually. |
10702 | ** The handler removes the HS_NEGOTIATE status, and sets the |
10703 | ** negotiated value to the default (async / nowide). |
10704 | ** |
10705 | ** If we receive a matching answer immediately, we check it |
10706 | ** for validity, and set the values. |
10707 | ** |
10708 | ** If we receive a Reject message immediately, we assume the |
10709 | ** negotiation has failed, and fall back to standard values. |
10710 | ** |
10711 | ** If we receive a negotiation message while not in HS_NEGOTIATE |
10712 | ** state, it's a target initiated negotiation. We prepare a |
10713 | ** (hopefully) valid answer, set our parameters, and send back |
10714 | ** this answer to the target. |
10715 | ** |
10716 | ** If the target doesn't fetch the answer (no message out phase), |
10717 | ** we assume the negotiation has failed, and fall back to default |
10718 | ** settings (SIR_NEGO_PROTO interrupt). |
10719 | ** |
10720 | ** When we set the values, we adjust them in all ccbs belonging |
10721 | ** to this target, in the controller's register, and in the "phys" |
10722 | ** field of the controller's struct ncb. |
10723 | ** |
10724 | **--------------------------------------------------------------------- |
10725 | */ |
10726 | |
10727 | /*========================================================== |
10728 | ** |
10729 | ** ncr chip handler for SYNCHRONOUS DATA TRANSFER |
10730 | ** REQUEST (SDTR) message. |
10731 | ** |
10732 | **========================================================== |
10733 | ** |
10734 | ** Read comments above. |
10735 | ** |
10736 | **---------------------------------------------------------- |
10737 | */ |
10738 | static void ncr_sync_nego(ncb_p np, tcb_p tp, ccb_p cp) |
10739 | { |
10740 | u_charunsigned char scntl3, scntl4; |
10741 | u_charunsigned char chg, ofs, per, fak; |
10742 | |
10743 | /* |
10744 | ** Synchronous request message received. |
10745 | */ |
10746 | |
10747 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10748 | ncr_print_msg(cp, "sync msg in", np->msgin); |
10749 | }; |
10750 | |
10751 | /* |
10752 | ** get requested values. |
10753 | */ |
10754 | |
10755 | chg = 0; |
10756 | per = np->msgin[3]; |
10757 | ofs = np->msgin[4]; |
10758 | if (ofs==0) per=255; |
10759 | |
10760 | /* |
10761 | ** if target sends SDTR message, |
10762 | ** it CAN transfer synch. |
10763 | */ |
10764 | |
10765 | if (ofs) |
10766 | tp->inq_byte7 |= INQ7_SYNC(0x10); |
10767 | |
10768 | /* |
10769 | ** check values against driver limits. |
10770 | */ |
10771 | |
10772 | if (per < np->minsync) |
10773 | {chg = 1; per = np->minsync;} |
10774 | if (per < tp->minsync) |
10775 | {chg = 1; per = tp->minsync;} |
10776 | if (ofs > tp->maxoffs) |
10777 | {chg = 1; ofs = tp->maxoffs;} |
10778 | |
10779 | /* |
10780 | ** Check against controller limits. |
10781 | */ |
10782 | fak = 7; |
10783 | scntl3 = 0; |
10784 | scntl4 = 0; |
10785 | if (ofs != 0) { |
10786 | ncr_getsync(np, per, &fak, &scntl3); |
10787 | if (fak > 7) { |
10788 | chg = 1; |
10789 | ofs = 0; |
10790 | } |
10791 | } |
10792 | if (ofs == 0) { |
10793 | fak = 7; |
10794 | per = 0; |
10795 | scntl3 = 0; |
10796 | scntl4 = 0; |
10797 | tp->minsync = 0; |
10798 | } |
10799 | |
10800 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10801 | PRINT_ADDR(cp->cmd); |
10802 | printk ("sync: per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n", |
10803 | per, scntl3, scntl4, ofs, fak, chg); |
10804 | } |
10805 | |
10806 | if (INB (HS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr1))))) == HS_NEGOTIATE(2)) { |
10807 | OUTB (HS_PRT, HS_BUSY)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr1))))) = ((((1))))); |
10808 | switch (cp->nego_status) { |
10809 | case NS_SYNC(1): |
10810 | /* |
10811 | ** This was an answer message |
10812 | */ |
10813 | if (chg) { |
10814 | /* |
10815 | ** Answer wasn't acceptable. |
10816 | */ |
10817 | ncr_setsync (np, cp, 0, 0xe0, 0); |
10818 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
10819 | } else { |
10820 | /* |
10821 | ** Answer is ok. |
10822 | */ |
10823 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
10824 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
10825 | ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0); |
10826 | else |
10827 | ncr_setsync (np, cp, scntl3, ofs, scntl4); |
10828 | |
10829 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
10830 | }; |
10831 | return; |
10832 | |
10833 | case NS_WIDE(2): |
10834 | ncr_setwide (np, cp, 0, 0); |
10835 | break; |
10836 | }; |
10837 | }; |
10838 | |
10839 | /* |
10840 | ** It was a request. Set value and |
10841 | ** prepare an answer message |
10842 | */ |
10843 | |
10844 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
10845 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
10846 | ncr_setsync (np, cp, scntl3, (fak<<5)|ofs,0); |
10847 | else |
10848 | ncr_setsync (np, cp, scntl3, ofs, scntl4); |
10849 | |
10850 | np->msgout[0] = M_EXTENDED(0x01); |
10851 | np->msgout[1] = 3; |
10852 | np->msgout[2] = M_X_SYNC_REQ(0x01); |
10853 | np->msgout[3] = per; |
10854 | np->msgout[4] = ofs; |
10855 | |
10856 | cp->nego_status = NS_SYNC(1); |
10857 | |
10858 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10859 | ncr_print_msg(cp, "sync msgout", np->msgout); |
10860 | } |
10861 | |
10862 | np->msgin [0] = M_NOOP(0x08); |
10863 | |
10864 | if (!ofs) |
10865 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
10866 | else |
10867 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, sdtr_resp))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->sdtr_resp)))))))); } while (0); |
10868 | } |
10869 | |
10870 | /*========================================================== |
10871 | ** |
10872 | ** ncr chip handler for WIDE DATA TRANSFER REQUEST |
10873 | ** (WDTR) message. |
10874 | ** |
10875 | **========================================================== |
10876 | ** |
10877 | ** Read comments above. |
10878 | ** |
10879 | **---------------------------------------------------------- |
10880 | */ |
10881 | static void ncr_wide_nego(ncb_p np, tcb_p tp, ccb_p cp) |
10882 | { |
10883 | u_charunsigned char chg, wide; |
10884 | |
10885 | /* |
10886 | ** Wide request message received. |
10887 | */ |
10888 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10889 | ncr_print_msg(cp, "wide msgin", np->msgin); |
10890 | }; |
10891 | |
10892 | /* |
10893 | ** get requested values. |
10894 | */ |
10895 | |
10896 | chg = 0; |
10897 | wide = np->msgin[3]; |
10898 | |
10899 | /* |
10900 | ** if target sends WDTR message, |
10901 | ** it CAN transfer wide. |
10902 | */ |
10903 | |
10904 | if (wide) |
10905 | tp->inq_byte7 |= INQ7_WIDE16(0x20); |
10906 | |
10907 | /* |
10908 | ** check values against driver limits. |
10909 | */ |
10910 | |
10911 | if (wide > tp->usrwide) |
10912 | {chg = 1; wide = tp->usrwide;} |
10913 | |
10914 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10915 | PRINT_ADDR(cp->cmd); |
10916 | printk ("wide: wide=%d chg=%d.\n", wide, chg); |
10917 | } |
10918 | |
10919 | if (INB (HS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr1))))) == HS_NEGOTIATE(2)) { |
10920 | OUTB (HS_PRT, HS_BUSY)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr1))))) = ((((1))))); |
10921 | switch (cp->nego_status) { |
10922 | case NS_WIDE(2): |
10923 | /* |
10924 | ** This was an answer message |
10925 | */ |
10926 | if (chg) { |
10927 | /* |
10928 | ** Answer wasn't acceptable. |
10929 | */ |
10930 | ncr_setwide (np, cp, 0, 1); |
10931 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
10932 | } else { |
10933 | /* |
10934 | ** Answer is ok. |
10935 | */ |
10936 | ncr_setwide (np, cp, wide, 1); |
10937 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
10938 | }; |
10939 | return; |
10940 | |
10941 | case NS_SYNC(1): |
10942 | ncr_setsync (np, cp, 0, 0xe0, 0); |
10943 | break; |
10944 | }; |
10945 | }; |
10946 | |
10947 | /* |
10948 | ** It was a request, set value and |
10949 | ** prepare an answer message |
10950 | */ |
10951 | |
10952 | ncr_setwide (np, cp, wide, 1); |
10953 | |
10954 | np->msgout[0] = M_EXTENDED(0x01); |
10955 | np->msgout[1] = 2; |
10956 | np->msgout[2] = M_X_WIDE_REQ(0x03); |
10957 | np->msgout[3] = wide; |
10958 | |
10959 | np->msgin [0] = M_NOOP(0x08); |
10960 | |
10961 | cp->nego_status = NS_WIDE(2); |
10962 | |
10963 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10964 | ncr_print_msg(cp, "wide msgout", np->msgout); |
10965 | } |
10966 | |
10967 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, wdtr_resp))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->wdtr_resp)))))))); } while (0); |
10968 | } |
10969 | /*========================================================== |
10970 | ** |
10971 | ** ncr chip handler for PARALLEL PROTOCOL REQUEST |
10972 | ** (PPR) message. |
10973 | ** |
10974 | **========================================================== |
10975 | ** |
10976 | ** Read comments above. |
10977 | ** |
10978 | **---------------------------------------------------------- |
10979 | */ |
10980 | static void ncr_ppr_nego(ncb_p np, tcb_p tp, ccb_p cp) |
10981 | { |
10982 | u_charunsigned char scntl3, scntl4; |
10983 | u_charunsigned char chg, ofs, per, fak, wth, dt; |
10984 | |
10985 | /* |
10986 | ** PPR message received. |
10987 | */ |
10988 | |
10989 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
10990 | ncr_print_msg(cp, "ppr msg in", np->msgin); |
10991 | }; |
10992 | |
10993 | /* |
10994 | ** get requested values. |
10995 | */ |
10996 | |
10997 | chg = 0; |
10998 | per = np->msgin[3]; |
10999 | ofs = np->msgin[5]; |
11000 | wth = np->msgin[6]; |
11001 | dt = np->msgin[7]; |
11002 | if (ofs==0) per=255; |
11003 | |
11004 | /* |
11005 | ** if target sends sync (wide), |
11006 | ** it CAN transfer synch (wide). |
11007 | */ |
11008 | |
11009 | if (ofs) |
11010 | tp->inq_byte7 |= INQ7_SYNC(0x10); |
11011 | |
11012 | if (wth) |
11013 | tp->inq_byte7 |= INQ7_WIDE16(0x20); |
11014 | |
11015 | /* |
11016 | ** check values against driver limits. |
11017 | */ |
11018 | |
11019 | if (wth > tp->usrwide) |
11020 | {chg = 1; wth = tp->usrwide;} |
11021 | if (per < np->minsync) |
11022 | {chg = 1; per = np->minsync;} |
11023 | if (per < tp->minsync) |
11024 | {chg = 1; per = tp->minsync;} |
11025 | if (ofs > tp->maxoffs) |
11026 | {chg = 1; ofs = tp->maxoffs;} |
11027 | |
11028 | /* |
11029 | ** Check against controller limits. |
11030 | */ |
11031 | fak = 7; |
11032 | scntl3 = 0; |
11033 | scntl4 = 0; |
11034 | if (ofs != 0) { |
11035 | scntl4 = dt ? 0x80 : 0; |
11036 | ncr_getsync(np, per, &fak, &scntl3); |
11037 | if (fak > 7) { |
11038 | chg = 1; |
11039 | ofs = 0; |
11040 | } |
11041 | } |
11042 | if (ofs == 0) { |
11043 | fak = 7; |
11044 | per = 0; |
11045 | scntl3 = 0; |
11046 | scntl4 = 0; |
11047 | tp->minsync = 0; |
11048 | } |
11049 | |
11050 | /* |
11051 | ** If target responds with Ultra 3 speed |
11052 | ** but narrow or not DT, reject. |
11053 | ** If target responds with DT request |
11054 | ** but not Ultra3 speeds, reject message, |
11055 | ** reset min sync for target to 0x0A and |
11056 | ** set flags to re-negotiate. |
11057 | */ |
11058 | |
11059 | if ((per == 0x09) && ofs && (!wth || !dt)) |
11060 | chg = 1; |
11061 | else if (( (per > 0x09) && dt) ) |
11062 | chg = 2; |
11063 | |
11064 | |
11065 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
11066 | PRINT_ADDR(cp->cmd); |
11067 | printk ("ppr: wth=%d per=%d scntl3=0x%x scntl4=0x%x ofs=%d fak=%d chg=%d.\n", |
11068 | wth, per, scntl3, scntl4, ofs, fak, chg); |
11069 | } |
11070 | |
11071 | if (INB (HS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr1))))) == HS_NEGOTIATE(2)) { |
11072 | OUTB (HS_PRT, HS_BUSY)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr1))))) = ((((1))))); |
11073 | switch (cp->nego_status) { |
11074 | case NS_PPR(4): |
11075 | /* |
11076 | ** This was an answer message |
11077 | */ |
11078 | if (chg) { |
11079 | /* |
11080 | ** Answer wasn't acceptable. |
11081 | */ |
11082 | if (chg == 2) { |
11083 | /* Send message reject and reset flags for |
11084 | ** host to re-negotiate with min period 0x0A. |
11085 | */ |
11086 | tp->minsync = 0x0A; |
11087 | tp->period = 0; |
11088 | tp->widedone = 0; |
11089 | } |
11090 | ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0); |
11091 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
11092 | } else { |
11093 | /* |
11094 | ** Answer is ok. |
11095 | */ |
11096 | |
11097 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
11098 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
11099 | ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth); |
11100 | else |
11101 | ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth); |
11102 | |
11103 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
11104 | |
11105 | }; |
11106 | return; |
11107 | |
11108 | case NS_SYNC(1): |
11109 | ncr_setsync (np, cp, 0, 0xe0, 0); |
11110 | break; |
11111 | |
11112 | case NS_WIDE(2): |
11113 | ncr_setwide (np, cp, 0, 0); |
11114 | break; |
11115 | }; |
11116 | }; |
11117 | |
11118 | /* |
11119 | ** It was a request. Set value and |
11120 | ** prepare an answer message |
11121 | ** |
11122 | ** If narrow or not DT and requesting Ultra3 |
11123 | ** slow the bus down and force ST. If not |
11124 | ** requesting Ultra3, force ST. |
11125 | ** Max offset is 31=0x1f if ST mode. |
11126 | */ |
11127 | |
11128 | if ((per == 0x09) && ofs && (!wth || !dt)) { |
11129 | per = 0x0A; |
11130 | dt = 0; |
11131 | ofs &= 0x1f; |
11132 | } |
11133 | else if ( (per > 0x09) && dt) { |
11134 | dt = 0; |
11135 | ofs &= 0x1f; |
11136 | } |
11137 | |
11138 | if ((np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
11139 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21)) |
11140 | ncr_setsyncwide (np, cp, scntl3, (fak<<5)|ofs,0, wth); |
11141 | else |
11142 | ncr_setsyncwide (np, cp, scntl3, ofs, scntl4, wth); |
11143 | |
11144 | np->msgout[0] = M_EXTENDED(0x01); |
11145 | np->msgout[1] = 6; |
11146 | np->msgout[2] = M_X_PPR_REQ(0x04); |
11147 | np->msgout[3] = per; |
11148 | np->msgout[4] = 0; |
11149 | np->msgout[5] = ofs; |
11150 | np->msgout[6] = wth; |
11151 | np->msgout[7] = dt; |
11152 | |
11153 | cp->nego_status = NS_PPR(4); |
11154 | |
11155 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
11156 | ncr_print_msg(cp, "ppr msgout", np->msgout); |
11157 | } |
11158 | |
11159 | np->msgin [0] = M_NOOP(0x08); |
11160 | |
11161 | if (!ofs) |
11162 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
11163 | else |
11164 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, ppr_resp))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->ppr_resp)))))))); } while (0); |
11165 | } |
11166 | |
11167 | |
11168 | |
11169 | /* |
11170 | ** Reset SYNC or WIDE to default settings. |
11171 | ** Called when a negotiation does not succeed either |
11172 | ** on rejection or on protocol error. |
11173 | */ |
11174 | static void ncr_nego_default(ncb_p np, tcb_p tp, ccb_p cp) |
11175 | { |
11176 | /* |
11177 | ** any error in negotiation: |
11178 | ** fall back to default mode. |
11179 | */ |
11180 | switch (cp->nego_status) { |
11181 | |
11182 | case NS_SYNC(1): |
11183 | ncr_setsync (np, cp, 0, 0xe0, 0); |
11184 | break; |
11185 | |
11186 | case NS_WIDE(2): |
11187 | ncr_setwide (np, cp, 0, 0); |
11188 | break; |
11189 | |
11190 | case NS_PPR(4): |
11191 | /* |
11192 | * ppr_negotiation is set to 1 on the first ppr nego command. |
11193 | * If ppr is successful, it is reset to 2. |
11194 | * If unsuccessful it is reset to 0. |
11195 | */ |
11196 | if (DEBUG_FLAGSncr_debug & DEBUG_NEGO(0x0200)) { |
11197 | tcb_p tp=&np->target[cp->target]; |
11198 | u_charunsigned char factor, offset, width; |
11199 | |
11200 | ncr_get_xfer_info ( np, tp, &factor, &offset, &width); |
11201 | |
11202 | printk("Current factor %d offset %d width %d\n", |
11203 | factor, offset, width); |
11204 | } |
11205 | if (tp->ppr_negotiation == 2) |
11206 | ncr_setsyncwide (np, cp, 0, 0xe0, 0, 0); |
11207 | else if (tp->ppr_negotiation == 1) { |
11208 | |
11209 | /* First ppr command has received a M REJECT. |
11210 | * Do not change the existing wide/sync parameter |
11211 | * values (asyn/narrow if this as the first nego; |
11212 | * may be different if target initiates nego.). |
11213 | */ |
11214 | tp->ppr_negotiation = 0; |
11215 | } |
11216 | else |
11217 | { |
11218 | tp->ppr_negotiation = 0; |
11219 | ncr_setwide (np, cp, 0, 0); |
11220 | } |
11221 | break; |
11222 | }; |
11223 | np->msgin [0] = M_NOOP(0x08); |
11224 | np->msgout[0] = M_NOOP(0x08); |
11225 | cp->nego_status = 0; |
11226 | } |
11227 | |
11228 | /*========================================================== |
11229 | ** |
11230 | ** ncr chip handler for MESSAGE REJECT received for |
11231 | ** a WIDE or SYNCHRONOUS negotiation. |
11232 | ** |
11233 | ** clear the PPR negotiation flag, all future nego. |
11234 | ** will be SDTR and WDTR |
11235 | ** |
11236 | **========================================================== |
11237 | ** |
11238 | ** Read comments above. |
11239 | ** |
11240 | **---------------------------------------------------------- |
11241 | */ |
11242 | static void ncr_nego_rejected(ncb_p np, tcb_p tp, ccb_p cp) |
11243 | { |
11244 | ncr_nego_default(np, tp, cp); |
11245 | OUTB (HS_PRT, HS_BUSY)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr1))))) = ((((1))))); |
11246 | } |
11247 | |
11248 | |
11249 | /*========================================================== |
11250 | ** |
11251 | ** |
11252 | ** ncr chip exception handler for programmed interrupts. |
11253 | ** |
11254 | ** |
11255 | **========================================================== |
11256 | */ |
11257 | |
11258 | void ncr_int_sir (ncb_p np) |
11259 | { |
11260 | u_charunsigned char num = INB (nc_dsps)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsps))))); |
11261 | u_longunsigned long dsa = INL (nc_dsa)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsa))))); |
11262 | ccb_p cp = ncr_ccb_from_dsa(np, dsa); |
11263 | u_charunsigned char target = INB (nc_sdid)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_sdid))))) & 0x0f; |
11264 | tcb_p tp = &np->target[target]; |
11265 | int tmp; |
11266 | |
11267 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("I#%d", num); |
11268 | |
11269 | switch (num) { |
11270 | /* |
11271 | ** See comments in the SCRIPTS code. |
11272 | */ |
11273 | #ifdef SCSI_NCR_PCIQ_SYNC_ON_INTR |
11274 | case SIR_DUMMY_INTERRUPT(21): |
11275 | goto out; |
11276 | #endif |
11277 | |
11278 | /* |
11279 | ** The C code is currently trying to recover from something. |
11280 | ** Typically, user want to abort some command. |
11281 | */ |
11282 | case SIR_SCRIPT_STOPPED(7): |
11283 | case SIR_TARGET_SELECTED(14): |
11284 | case SIR_ABORT_SENT(17): |
11285 | case SIR_AUTO_SENSE_DONE(20): |
11286 | ncr_sir_task_recovery(np, num); |
11287 | return; |
11288 | /* |
11289 | ** The device didn't go to MSG OUT phase after having |
11290 | ** been selected with ATN. We donnot want to handle |
11291 | ** that. |
11292 | */ |
11293 | case SIR_SEL_ATN_NO_MSG_OUT(2): |
11294 | printk ("%s:%d: No MSG OUT phase after selection with ATN.\n", |
11295 | ncr_name (np), target); |
11296 | goto out_stuck; |
11297 | /* |
11298 | ** The device didn't switch to MSG IN phase after |
11299 | ** having reseleted the initiator. |
11300 | */ |
11301 | case SIR_RESEL_NO_MSG_IN(11): |
11302 | /* |
11303 | ** After reselection, the device sent a message that wasn't |
11304 | ** an IDENTIFY. |
11305 | */ |
11306 | case SIR_RESEL_NO_IDENTIFY(12): |
11307 | /* |
11308 | ** If devices reselecting without sending an IDENTIFY |
11309 | ** message still exist, this should help. |
11310 | ** We just assume lun=0, 1 CCB, no tag. |
11311 | */ |
11312 | if (tp->l0p) { |
11313 | OUTL (nc_dsa, scr_to_cpu(tp->l0p->tasktbl[0]))((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsa))))) = ((((tp->l0p ->tasktbl[0]))))); |
11314 | OUTL_DSP (NCB_SCRIPT_PHYS (np, resel_go))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->resel_go)))))))); } while (0); |
11315 | return; |
11316 | } |
11317 | /* |
11318 | ** The device reselected a LUN we donnot know of. |
11319 | */ |
11320 | case SIR_RESEL_BAD_LUN(13): |
11321 | np->msgout[0] = M_RESET(0x0c); |
11322 | goto out; |
11323 | /* |
11324 | ** The device reselected for an untagged nexus and we |
11325 | ** haven't any. |
11326 | */ |
11327 | case SIR_RESEL_BAD_I_T_L(15): |
11328 | np->msgout[0] = M_ABORT(0x06); |
11329 | goto out; |
11330 | /* |
11331 | ** The device reselected for a tagged nexus that we donnot |
11332 | ** have. |
11333 | */ |
11334 | case SIR_RESEL_BAD_I_T_L_Q(16): |
11335 | np->msgout[0] = M_ABORT_TAG(0x0d); |
11336 | goto out; |
11337 | /* |
11338 | ** The SCRIPTS let us know that the device has grabbed |
11339 | ** our message and will abort the job. |
11340 | */ |
11341 | case SIR_RESEL_ABORTED(18): |
11342 | np->lastmsg = np->msgout[0]; |
11343 | np->msgout[0] = M_NOOP(0x08); |
11344 | printk ("%s:%d: message %x sent on bad reselection.\n", |
11345 | ncr_name (np), target, np->lastmsg); |
11346 | goto out; |
11347 | /* |
11348 | ** The SCRIPTS let us know that a message has been |
11349 | ** successfully sent to the device. |
11350 | */ |
11351 | case SIR_MSG_OUT_DONE(19): |
11352 | np->lastmsg = np->msgout[0]; |
11353 | np->msgout[0] = M_NOOP(0x08); |
11354 | /* Should we really care of that */ |
11355 | if (np->lastmsg == M_PARITY(0x09) || np->lastmsg == M_ID_ERROR(0x05)) { |
11356 | if (cp) { |
11357 | cp->xerr_status &= ~XE_PARITY_ERR(4); |
11358 | if (!cp->xerr_status) |
11359 | OUTOFFB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) & ~((1u<<7)))))); |
11360 | } |
11361 | } |
11362 | goto out; |
11363 | /* |
11364 | ** The device didn't send a GOOD SCSI status. |
11365 | ** We may have some work to do prior to allow |
11366 | ** the SCRIPTS processor to continue. |
11367 | */ |
11368 | case SIR_BAD_STATUS(1): |
11369 | if (!cp) |
11370 | goto out; |
11371 | ncr_sir_to_redo(np, num, cp); |
11372 | return; |
11373 | /* |
11374 | ** We are asked by the SCRIPTS to prepare a |
11375 | ** REJECT message. |
11376 | */ |
11377 | case SIR_REJECT_TO_SEND(8): |
11378 | ncr_print_msg(cp, "M_REJECT to send for ", np->msgin); |
11379 | np->msgout[0] = M_REJECT(0x07); |
11380 | goto out; |
11381 | /* |
11382 | ** We have been ODD at the end of a DATA IN |
11383 | ** transfer and the device didn't send a |
11384 | ** IGNORE WIDE RESIDUE message. |
11385 | ** It is a data overrun condition. |
11386 | */ |
11387 | case SIR_SWIDE_OVERRUN(9): |
11388 | if (cp) { |
11389 | OUTONB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) | ((1u<<7)))))); |
11390 | cp->xerr_status |= XE_SWIDE_OVRUN(1<<4); |
11391 | } |
11392 | goto out; |
11393 | /* |
11394 | ** We have been ODD at the end of a DATA OUT |
11395 | ** transfer. |
11396 | ** It is a data underrun condition. |
11397 | */ |
11398 | case SIR_SODL_UNDERRUN(10): |
11399 | if (cp) { |
11400 | OUTONB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) | ((1u<<7)))))); |
11401 | cp->xerr_status |= XE_SODL_UNRUN(1<<3); |
11402 | } |
11403 | goto out; |
11404 | /* |
11405 | ** The device wants us to tranfer more data than |
11406 | ** expected or in the wrong direction. |
11407 | ** The number of extra bytes is in scratcha. |
11408 | ** It is a data overrun condition. |
11409 | */ |
11410 | case SIR_DATA_OVERRUN(22): |
11411 | if (cp) { |
11412 | OUTONB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) | ((1u<<7)))))); |
11413 | cp->xerr_status |= XE_EXTRA_DATA(1); |
11414 | cp->extra_bytes += INL (nc_scratcha)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_scratcha))))); |
11415 | } |
11416 | goto out; |
11417 | /* |
11418 | ** The device switched to an illegal phase (4/5). |
11419 | */ |
11420 | case SIR_BAD_PHASE(23): |
11421 | if (cp) { |
11422 | OUTONB (HF_PRT, HF_EXT_ERR)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr3))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr3))))) | ((1u<<7)))))); |
11423 | cp->xerr_status |= XE_BAD_PHASE(2); |
11424 | } |
11425 | goto out; |
11426 | /* |
11427 | ** We received a message. |
11428 | */ |
11429 | case SIR_MSG_RECEIVED(3): |
11430 | if (!cp) |
11431 | goto out_stuck; |
11432 | switch (np->msgin [0]) { |
11433 | /* |
11434 | ** We received an extended message. |
11435 | ** We handle MODIFY DATA POINTER, SDTR, WDTR |
11436 | ** and reject all other extended messages. |
11437 | */ |
11438 | case M_EXTENDED(0x01): |
11439 | switch (np->msgin [2]) { |
11440 | case M_X_MODIFY_DP(0x00): |
11441 | if (DEBUG_FLAGSncr_debug & DEBUG_POINTER(0x0020)) |
11442 | ncr_print_msg(cp,"modify DP",np->msgin); |
11443 | tmp = (np->msgin[3]<<24) + (np->msgin[4]<<16) + |
11444 | (np->msgin[5]<<8) + (np->msgin[6]); |
11445 | ncr_modify_dp(np, tp, cp, tmp); |
11446 | return; |
11447 | case M_X_SYNC_REQ(0x01): |
11448 | ncr_sync_nego(np, tp, cp); |
11449 | return; |
11450 | case M_X_WIDE_REQ(0x03): |
11451 | ncr_wide_nego(np, tp, cp); |
11452 | return; |
11453 | case M_X_PPR_REQ(0x04): |
11454 | ncr_ppr_nego(np, tp, cp); |
11455 | return; |
11456 | default: |
11457 | goto out_reject; |
11458 | } |
11459 | break; |
11460 | /* |
11461 | ** We received a 1/2 byte message not handled from SCRIPTS. |
11462 | ** We are only expecting MESSAGE REJECT and IGNORE WIDE |
11463 | ** RESIDUE messages that haven't been anticipated by |
11464 | ** SCRIPTS on SWIDE full condition. Unanticipated IGNORE |
11465 | ** WIDE RESIDUE messages are aliased as MODIFY DP (-1). |
11466 | */ |
11467 | case M_IGN_RESIDUE(0x23): |
11468 | if (DEBUG_FLAGSncr_debug & DEBUG_POINTER(0x0020)) |
11469 | ncr_print_msg(cp,"ign wide residue", np->msgin); |
11470 | ncr_modify_dp(np, tp, cp, -1); |
11471 | return; |
11472 | case M_REJECT(0x07): |
11473 | if (INB (HS_PRT)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_scr1))))) == HS_NEGOTIATE(2)) |
11474 | ncr_nego_rejected(np, tp, cp); |
11475 | else { |
11476 | PRINT_ADDR(cp->cmd); |
11477 | printk ("M_REJECT received (%x:%x).\n", |
11478 | scr_to_cpu(np->lastmsg)(np->lastmsg), np->msgout[0]); |
11479 | } |
11480 | goto out_clrack; |
11481 | break; |
11482 | default: |
11483 | goto out_reject; |
11484 | } |
11485 | break; |
11486 | /* |
11487 | ** We received an unknown message. |
11488 | ** Ignore all MSG IN phases and reject it. |
11489 | */ |
11490 | case SIR_MSG_WEIRD(4): |
11491 | ncr_print_msg(cp, "WEIRD message received", np->msgin); |
11492 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_weird))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_weird)))))))); } while (0); |
11493 | return; |
11494 | /* |
11495 | ** Negotiation failed. |
11496 | ** Target does not send us the reply. |
11497 | ** Remove the HS_NEGOTIATE status. |
11498 | */ |
11499 | case SIR_NEGO_FAILED(5): |
11500 | OUTB (HS_PRT, HS_BUSY)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scr1))))) = ((((1))))); |
11501 | /* |
11502 | ** Negotiation failed. |
11503 | ** Target does not want answer message. |
11504 | */ |
11505 | case SIR_NEGO_PROTO(6): |
11506 | ncr_nego_default(np, tp, cp); |
11507 | goto out; |
11508 | }; |
11509 | |
11510 | out: |
11511 | OUTONB_STD ()do { do { ; } while(0); ((*(volatile unsigned char *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl ))))) = ((((*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dcntl))))) | ( (0x04|0x01)))))); } while (0); |
11512 | return; |
11513 | out_reject: |
11514 | OUTL_DSP (NCB_SCRIPTH_PHYS (np, msg_bad))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_scripth + ((size_t) (&((struct scripth *)0)->msg_bad)))))))); } while (0); |
11515 | return; |
11516 | out_clrack: |
11517 | OUTL_DSP (NCB_SCRIPT_PHYS (np, clrack))do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = (((((np->p_script + ((size_t) (&((struct script *)0)->clrack)))))))); } while (0); |
11518 | return; |
11519 | out_stuck: |
11520 | return; |
11521 | } |
11522 | |
11523 | |
11524 | /*========================================================== |
11525 | ** |
11526 | ** |
11527 | ** Aquire a control block |
11528 | ** |
11529 | ** |
11530 | **========================================================== |
11531 | */ |
11532 | |
11533 | static ccb_p ncr_get_ccb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln) |
11534 | { |
11535 | tcb_p tp = &np->target[tn]; |
11536 | lcb_p lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
11537 | u_shortunsigned short tag = NO_TAG(256); |
11538 | XPT_QUEHEAD *qp; |
11539 | ccb_p cp = (ccb_p) 0; |
11540 | |
11541 | /* |
11542 | ** Allocate a new CCB if needed. |
11543 | */ |
11544 | if (xpt_que_empty(&np->free_ccbq)) |
11545 | (void) ncr_alloc_ccb(np); |
11546 | |
11547 | /* |
11548 | ** Look for a free CCB |
11549 | */ |
11550 | qp = xpt_remque_head(&np->free_ccbq); |
11551 | if (!qp) |
11552 | goto out; |
11553 | cp = xpt_que_entry(qp, struct ccb, link_ccbq)((struct ccb *)((char *)(qp)-(unsigned long)(&((struct ccb *)0)->link_ccbq))); |
11554 | |
11555 | /* |
11556 | ** If the LCB is not yet available and we already |
11557 | ** have queued a CCB for a LUN without LCB, |
11558 | ** give up. Otherwise all is fine. :-) |
11559 | */ |
11560 | if (!lp) { |
11561 | if (xpt_que_empty(&np->b0_ccbq)) |
11562 | xpt_insque_head(&cp->link_ccbq, &np->b0_ccbq)__xpt_que_add(&cp->link_ccbq, &np->b0_ccbq, (& np->b0_ccbq)->flink); |
11563 | else |
11564 | goto out_free; |
11565 | } else { |
11566 | /* |
11567 | ** Tune tag mode if asked by user. |
11568 | */ |
11569 | if (lp->queuedepth != lp->numtags) { |
11570 | ncr_setup_tags(np, tn, ln); |
11571 | } |
11572 | |
11573 | /* |
11574 | ** Get a tag for this nexus if required. |
11575 | ** Keep from using more tags than we can handle. |
11576 | */ |
11577 | if (lp->usetags) { |
11578 | if (lp->busyccbs < lp->maxnxs) { |
11579 | tag = lp->cb_tags[lp->ia_tag]; |
11580 | ++lp->ia_tag; |
11581 | if (lp->ia_tag == MAX_TAGS(8)) |
11582 | lp->ia_tag = 0; |
11583 | cp->tags_si = lp->tags_si; |
11584 | ++lp->tags_sum[cp->tags_si]; |
11585 | } |
11586 | else |
11587 | goto out_free; |
11588 | } |
11589 | |
11590 | /* |
11591 | ** Put the CCB in the LUN wait queue and |
11592 | ** count it as busy. |
11593 | */ |
11594 | xpt_insque_tail(&cp->link_ccbq, &lp->wait_ccbq)__xpt_que_add(&cp->link_ccbq, (&lp->wait_ccbq)-> blink, &lp->wait_ccbq); |
11595 | ++lp->busyccbs; |
11596 | } |
11597 | |
11598 | /* |
11599 | ** Remember all informations needed to free this CCB. |
11600 | */ |
11601 | cp->to_abort = 0; |
11602 | cp->tag = tag; |
11603 | cp->target = tn; |
11604 | cp->lun = ln; |
11605 | |
11606 | if (DEBUG_FLAGSncr_debug & DEBUG_TAGS(0x0400)) { |
11607 | PRINT_LUN(np, tn, ln); |
11608 | printk ("ccb @%p using tag %d.\n", cp, tag); |
11609 | } |
11610 | |
11611 | out: |
11612 | return cp; |
11613 | out_free: |
11614 | xpt_insque_head(&cp->link_ccbq, &np->free_ccbq)__xpt_que_add(&cp->link_ccbq, &np->free_ccbq, ( &np->free_ccbq)->flink); |
11615 | return (ccb_p) 0; |
11616 | } |
11617 | |
11618 | /*========================================================== |
11619 | ** |
11620 | ** |
11621 | ** Release one control block |
11622 | ** |
11623 | ** |
11624 | **========================================================== |
11625 | */ |
11626 | |
11627 | static void ncr_free_ccb (ncb_p np, ccb_p cp) |
11628 | { |
11629 | tcb_p tp = &np->target[cp->target]; |
11630 | lcb_p lp = ncr_lp(np, tp, cp->lun)(!cp->lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(cp ->lun)] : 0; |
11631 | |
11632 | if (DEBUG_FLAGSncr_debug & DEBUG_TAGS(0x0400)) { |
11633 | PRINT_LUN(np, cp->target, cp->lun); |
11634 | printk ("ccb @%p freeing tag %d.\n", cp, cp->tag); |
11635 | } |
11636 | |
11637 | /* |
11638 | ** If lun control block available, make available |
11639 | ** the task slot and the tag if any. |
11640 | ** Decrement counters. |
11641 | */ |
11642 | if (lp) { |
11643 | if (cp->tag != NO_TAG(256)) { |
11644 | lp->cb_tags[lp->if_tag++] = cp->tag; |
11645 | if (lp->if_tag == MAX_TAGS(8)) |
11646 | lp->if_tag = 0; |
11647 | --lp->tags_sum[cp->tags_si]; |
11648 | lp->tasktbl[cp->tag] = cpu_to_scr(np->p_bad_i_t_l_q)(np->p_bad_i_t_l_q); |
11649 | } else { |
11650 | lp->tasktbl[0] = cpu_to_scr(np->p_bad_i_t_l)(np->p_bad_i_t_l); |
11651 | } |
11652 | --lp->busyccbs; |
11653 | if (cp->queued) { |
11654 | --lp->queuedccbs; |
11655 | } |
11656 | } |
11657 | |
11658 | /* |
11659 | ** Make this CCB available. |
11660 | */ |
11661 | xpt_remque(&cp->link_ccbq)__xpt_que_del((&cp->link_ccbq)->blink, (&cp-> link_ccbq)->flink); |
11662 | xpt_insque_head(&cp->link_ccbq, &np->free_ccbq)__xpt_que_add(&cp->link_ccbq, &np->free_ccbq, ( &np->free_ccbq)->flink); |
11663 | cp -> host_statusphys.header.status[1] = HS_IDLE(0); |
11664 | cp -> queued = 0; |
11665 | } |
11666 | |
11667 | /*------------------------------------------------------------------------ |
11668 | ** Allocate a CCB and initialize its fixed part. |
11669 | **------------------------------------------------------------------------ |
11670 | **------------------------------------------------------------------------ |
11671 | */ |
11672 | static ccb_p ncr_alloc_ccb(ncb_p np) |
11673 | { |
11674 | ccb_p cp = 0; |
11675 | int hcode; |
11676 | |
11677 | /* |
11678 | ** Allocate memory for this CCB. |
11679 | */ |
11680 | cp = m_calloc_dma(sizeof(struct ccb), "CCB")m_calloc(sizeof(struct ccb), "CCB"); |
11681 | if (!cp) |
11682 | return 0; |
11683 | |
11684 | /* |
11685 | ** Count it and initialyze it. |
11686 | */ |
11687 | np->actccbs++; |
11688 | |
11689 | /* |
11690 | ** Remember virtual and bus address of this ccb. |
11691 | */ |
11692 | cp->p_ccb = vtobus(cp)virt_to_phys(cp); |
11693 | |
11694 | /* |
11695 | ** Insert this ccb into the hashed list. |
11696 | */ |
11697 | hcode = CCB_HASH_CODE(cp->p_ccb)(((cp->p_ccb) >> 11) & ((1UL << 8)-1)); |
11698 | cp->link_ccbh = np->ccbh[hcode]; |
11699 | np->ccbh[hcode] = cp; |
11700 | |
11701 | /* |
11702 | ** Initialyze the start and restart actions. |
11703 | */ |
11704 | cp->phys.header.go.start = cpu_to_scr(NCB_SCRIPT_PHYS (np, idle))((np->p_script + ((size_t) (&((struct script *)0)-> idle)))); |
11705 | cp->phys.header.go.restart = cpu_to_scr(NCB_SCRIPTH_PHYS(np,bad_i_t_l))((np->p_scripth + ((size_t) (&((struct scripth *)0)-> bad_i_t_l)))); |
11706 | |
11707 | /* |
11708 | ** Initilialyze some other fields. |
11709 | */ |
11710 | cp->phys.smsg_ext.addr = cpu_to_scr(NCB_PHYS(np, msgin[2]))((np->p_ncb + ((size_t) (&((struct ncb *)0)->msgin[ 2])))); |
11711 | |
11712 | /* |
11713 | ** Chain into wakeup list and free ccb queue. |
11714 | */ |
11715 | cp->link_ccb = np->ccbc; |
11716 | np->ccbc = cp; |
11717 | |
11718 | xpt_insque_head(&cp->link_ccbq, &np->free_ccbq)__xpt_que_add(&cp->link_ccbq, &np->free_ccbq, ( &np->free_ccbq)->flink); |
11719 | |
11720 | return cp; |
11721 | } |
11722 | |
11723 | /*------------------------------------------------------------------------ |
11724 | ** Look up a CCB from a DSA value. |
11725 | **------------------------------------------------------------------------ |
11726 | **------------------------------------------------------------------------ |
11727 | */ |
11728 | static ccb_p ncr_ccb_from_dsa(ncb_p np, u_longunsigned long dsa) |
11729 | { |
11730 | int hcode; |
11731 | ccb_p cp; |
11732 | |
11733 | hcode = CCB_HASH_CODE(dsa)(((dsa) >> 11) & ((1UL << 8)-1)); |
11734 | cp = np->ccbh[hcode]; |
11735 | while (cp) { |
11736 | if (cp->p_ccb == dsa) |
11737 | break; |
11738 | cp = cp->link_ccbh; |
11739 | } |
11740 | |
11741 | return cp; |
11742 | } |
11743 | |
11744 | /*========================================================== |
11745 | ** |
11746 | ** |
11747 | ** Allocation of resources for Targets/Luns/Tags. |
11748 | ** |
11749 | ** |
11750 | **========================================================== |
11751 | */ |
11752 | |
11753 | |
11754 | /*------------------------------------------------------------------------ |
11755 | ** Target control block initialisation. |
11756 | **------------------------------------------------------------------------ |
11757 | ** This data structure is fully initialized after a SCSI command |
11758 | ** has been successfully completed for this target. |
11759 | **------------------------------------------------------------------------ |
11760 | */ |
11761 | static void ncr_init_tcb (ncb_p np, u_charunsigned char tn) |
11762 | { |
11763 | /* |
11764 | ** Check some alignments required by the chip. |
11765 | */ |
11766 | assert (( (offsetof(struct ncr_reg, nc_sxfer) ^{ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_sxfer )) ^ ((size_t) (&((struct tcb *)0)->sval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_sxfer) ^ offsetof(struct tcb , sval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11767); } } |
11767 | offsetof(struct tcb , sval )) &3) == 0){ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_sxfer )) ^ ((size_t) (&((struct tcb *)0)->sval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_sxfer) ^ offsetof(struct tcb , sval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11767); } }; |
11768 | assert (( (offsetof(struct ncr_reg, nc_scntl3) ^{ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_scntl3 )) ^ ((size_t) (&((struct tcb *)0)->wval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_scntl3) ^ offsetof(struct tcb , wval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11769); } } |
11769 | offsetof(struct tcb , wval )) &3) == 0){ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_scntl3 )) ^ ((size_t) (&((struct tcb *)0)->wval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_scntl3) ^ offsetof(struct tcb , wval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11769); } }; |
11770 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
11771 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)){ |
11772 | assert (( (offsetof(struct ncr_reg, nc_scntl4) ^{ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_scntl4 )) ^ ((size_t) (&((struct tcb *)0)->uval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_scntl4) ^ offsetof(struct tcb , uval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11773); } } |
11773 | offsetof(struct tcb , uval )) &3) == 0){ if (!(( (((size_t) (&((struct ncr_reg *)0)->nc_scntl4 )) ^ ((size_t) (&((struct tcb *)0)->uval))) &3) == 0)) { (void)panic( "assertion \"%s\" failed: file \"%s\", line %d\n" , "( (offsetof(struct ncr_reg, nc_scntl4) ^ offsetof(struct tcb , uval )) &3) == 0" , "../linux/src/drivers/scsi/sym53c8xx.c", 11773); } }; |
11774 | } |
11775 | } |
11776 | |
11777 | /*------------------------------------------------------------------------ |
11778 | ** Lun control block allocation and initialization. |
11779 | **------------------------------------------------------------------------ |
11780 | ** This data structure is allocated and initialized after a SCSI |
11781 | ** command has been successfully completed for this target/lun. |
11782 | **------------------------------------------------------------------------ |
11783 | */ |
11784 | static lcb_p ncr_alloc_lcb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln) |
11785 | { |
11786 | tcb_p tp = &np->target[tn]; |
11787 | lcb_p lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
11788 | |
11789 | /* |
11790 | ** Already done, return. |
11791 | */ |
11792 | if (lp) |
11793 | return lp; |
11794 | |
11795 | /* |
11796 | ** Initialize the target control block if not yet. |
11797 | */ |
11798 | ncr_init_tcb(np, tn); |
11799 | |
11800 | /* |
11801 | ** Allocate the lcb bus address array. |
11802 | ** Compute the bus address of this table. |
11803 | */ |
11804 | if (ln && !tp->luntbl) { |
11805 | int i; |
11806 | |
11807 | tp->luntbl = m_calloc_dma(256, "LUNTBL")m_calloc(256, "LUNTBL"); |
11808 | if (!tp->luntbl) |
11809 | goto fail; |
11810 | for (i = 0 ; i < 64 ; i++) |
11811 | tp->luntbl[i] = cpu_to_scr(NCB_PHYS(np, resel_badlun))((np->p_ncb + ((size_t) (&((struct ncb *)0)->resel_badlun )))); |
11812 | tp->b_luntbl = cpu_to_scr(vtobus(tp->luntbl))(virt_to_phys(tp->luntbl)); |
11813 | } |
11814 | |
11815 | /* |
11816 | ** Allocate the table of pointers for LUN(s) > 0, if needed. |
11817 | */ |
11818 | if (ln && !tp->lmp) { |
11819 | tp->lmp = m_calloc(MAX_LUN64 * sizeof(lcb_p), "LMP"); |
11820 | if (!tp->lmp) |
11821 | goto fail; |
11822 | } |
11823 | |
11824 | /* |
11825 | ** Allocate the lcb. |
11826 | ** Make it available to the chip. |
11827 | */ |
11828 | lp = m_calloc_dma(sizeof(struct lcb), "LCB")m_calloc(sizeof(struct lcb), "LCB"); |
11829 | if (!lp) |
11830 | goto fail; |
11831 | if (ln) { |
11832 | tp->lmp[ln] = lp; |
11833 | tp->luntbl[ln] = cpu_to_scr(vtobus(lp))(virt_to_phys(lp)); |
11834 | } |
11835 | else { |
11836 | tp->l0p = lp; |
11837 | tp->b_lun0 = cpu_to_scr(vtobus(lp))(virt_to_phys(lp)); |
11838 | } |
11839 | |
11840 | /* |
11841 | ** Initialize the CCB queue headers. |
11842 | */ |
11843 | xpt_que_init(&lp->busy_ccbq)do { (&lp->busy_ccbq)->flink = (&lp->busy_ccbq ); (&lp->busy_ccbq)->blink = (&lp->busy_ccbq ); } while (0); |
11844 | xpt_que_init(&lp->wait_ccbq)do { (&lp->wait_ccbq)->flink = (&lp->wait_ccbq ); (&lp->wait_ccbq)->blink = (&lp->wait_ccbq ); } while (0); |
11845 | |
11846 | /* |
11847 | ** Set max CCBs to 1 and use the default task array |
11848 | ** by default. |
11849 | */ |
11850 | lp->maxnxs = 1; |
11851 | lp->tasktbl = &lp->tasktbl_0; |
11852 | lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl))(virt_to_phys(lp->tasktbl)); |
11853 | lp->tasktbl[0] = cpu_to_scr(np->p_notask)(np->p_notask); |
11854 | lp->resel_task = cpu_to_scr(NCB_SCRIPT_PHYS(np, resel_notag))((np->p_script + ((size_t) (&((struct script *)0)-> resel_notag)))); |
11855 | |
11856 | /* |
11857 | ** Initialize command queuing control. |
11858 | */ |
11859 | lp->busyccbs = 1; |
11860 | lp->queuedccbs = 1; |
11861 | lp->queuedepth = 1; |
11862 | fail: |
11863 | return lp; |
11864 | } |
11865 | |
11866 | |
11867 | /*------------------------------------------------------------------------ |
11868 | ** Lun control block setup on INQUIRY data received. |
11869 | **------------------------------------------------------------------------ |
11870 | ** We only support WIDE, SYNC for targets and CMDQ for logical units. |
11871 | ** This setup is done on each INQUIRY since we are expecting user |
11872 | ** will play with CHANGE DEFINITION commands. :-) |
11873 | **------------------------------------------------------------------------ |
11874 | */ |
11875 | static lcb_p ncr_setup_lcb (ncb_p np, u_charunsigned char tn, u_charunsigned char ln, u_charunsigned char *inq_data) |
11876 | { |
11877 | tcb_p tp = &np->target[tn]; |
11878 | lcb_p lp = ncr_lp(np, tp, ln)(!ln) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp[(ln)] : 0; |
11879 | u_charunsigned char inq_byte7; |
11880 | int i; |
11881 | |
11882 | /* |
11883 | ** If no lcb, try to allocate it. |
11884 | */ |
11885 | if (!lp && !(lp = ncr_alloc_lcb(np, tn, ln))) |
11886 | goto fail; |
11887 | |
11888 | #if 0 /* No more used. Left here as provision */ |
11889 | /* |
11890 | ** Get device quirks. |
11891 | */ |
11892 | tp->quirks = 0; |
11893 | if (tp->quirks && bootverbose(np->verbose)) { |
11894 | PRINT_LUN(np, tn, ln); |
11895 | printk ("quirks=%x.\n", tp->quirks); |
11896 | } |
11897 | #endif |
11898 | |
11899 | /* |
11900 | ** Evaluate trustable target/unit capabilities. |
11901 | ** We only believe device version >= SCSI-2 that |
11902 | ** use appropriate response data format (2). |
11903 | ** But it seems that some CCS devices also |
11904 | ** support SYNC and I donnot want to frustrate |
11905 | ** anybody. ;-) |
11906 | */ |
11907 | inq_byte7 = 0; |
11908 | if ((inq_data[2] & 0x7) >= 2 && (inq_data[3] & 0xf) == 2) |
11909 | inq_byte7 = inq_data[7]; |
11910 | else if ((inq_data[2] & 0x7) == 1 && (inq_data[3] & 0xf) == 1) |
11911 | inq_byte7 = INQ7_SYNC(0x10); |
11912 | |
11913 | /* |
11914 | ** Throw away announced LUN capabilities if we are told |
11915 | ** that there is no real device supported by the logical unit. |
11916 | */ |
11917 | if ((inq_data[0] & 0xe0) > 0x20 || (inq_data[0] & 0x1f) == 0x1f) |
11918 | inq_byte7 &= (INQ7_SYNC(0x10) | INQ7_WIDE16(0x20)); |
11919 | |
11920 | /* |
11921 | ** If user is wanting SYNC, force this feature. |
11922 | */ |
11923 | if (driver_setup.force_sync_nego) |
11924 | inq_byte7 |= INQ7_SYNC(0x10); |
11925 | |
11926 | /* |
11927 | ** Prepare negotiation if SIP capabilities have changed. |
11928 | */ |
11929 | tp->inq_done = 1; |
11930 | if ((inq_byte7 ^ tp->inq_byte7) & (INQ7_SYNC(0x10) | INQ7_WIDE16(0x20))) { |
11931 | tp->inq_byte7 = inq_byte7; |
11932 | ncr_negotiate(np, tp); |
11933 | } |
11934 | |
11935 | /* |
11936 | ** If unit supports tagged commands, allocate and |
11937 | ** initialyze the task table if not yet. |
11938 | */ |
11939 | if ((inq_byte7 & INQ7_QUEUE(0x02)) && lp->tasktbl == &lp->tasktbl_0) { |
11940 | lp->tasktbl = m_calloc_dma(MAX_TASKS*4, "TASKTBL")m_calloc((256/4)*4, "TASKTBL"); |
11941 | if (!lp->tasktbl) { |
11942 | lp->tasktbl = &lp->tasktbl_0; |
11943 | goto fail; |
11944 | } |
11945 | lp->b_tasktbl = cpu_to_scr(vtobus(lp->tasktbl))(virt_to_phys(lp->tasktbl)); |
11946 | for (i = 0 ; i < MAX_TASKS(256/4) ; i++) |
11947 | lp->tasktbl[i] = cpu_to_scr(np->p_notask)(np->p_notask); |
11948 | |
11949 | lp->cb_tags = m_calloc(MAX_TAGS(8), "CB_TAGS"); |
11950 | if (!lp->cb_tags) |
11951 | goto fail; |
11952 | for (i = 0 ; i < MAX_TAGS(8) ; i++) |
11953 | lp->cb_tags[i] = i; |
11954 | |
11955 | lp->maxnxs = MAX_TAGS(8); |
11956 | lp->tags_stime = ktime_get(3*HZ)(jiffies + (unsigned long) 3*100); |
11957 | } |
11958 | |
11959 | /* |
11960 | ** Adjust tagged queueing status if needed. |
11961 | */ |
11962 | if ((inq_byte7 ^ lp->inq_byte7) & INQ7_QUEUE(0x02)) { |
11963 | lp->inq_byte7 = inq_byte7; |
11964 | lp->numtags = lp->maxtags; |
11965 | ncr_setup_tags (np, tn, ln); |
11966 | } |
11967 | |
11968 | fail: |
11969 | return lp; |
11970 | } |
11971 | |
11972 | /*========================================================== |
11973 | ** |
11974 | ** |
11975 | ** Build Scatter Gather Block |
11976 | ** |
11977 | ** |
11978 | **========================================================== |
11979 | ** |
11980 | ** The transfer area may be scattered among |
11981 | ** several non adjacent physical pages. |
11982 | ** |
11983 | ** We may use MAX_SCATTER blocks. |
11984 | ** |
11985 | **---------------------------------------------------------- |
11986 | */ |
11987 | |
11988 | /* |
11989 | ** We try to reduce the number of interrupts caused |
11990 | ** by unexpected phase changes due to disconnects. |
11991 | ** A typical harddisk may disconnect before ANY block. |
11992 | ** If we wanted to avoid unexpected phase changes at all |
11993 | ** we had to use a break point every 512 bytes. |
11994 | ** Of course the number of scatter/gather blocks is |
11995 | ** limited. |
11996 | ** Under Linux, the scatter/gatter blocks are provided by |
11997 | ** the generic driver. We just have to copy addresses and |
11998 | ** sizes to the data segment array. |
11999 | */ |
12000 | |
12001 | /* |
12002 | ** For 64 bit systems, we use the 8 upper bits of the size field |
12003 | ** to provide bus address bits 32-39 to the SCRIPTS processor. |
12004 | ** This allows the 895A and 896 to address up to 1 TB of memory. |
12005 | ** For 32 bit chips on 64 bit systems, we must be provided with |
12006 | ** memory addresses that fit into the first 32 bit bus address |
12007 | ** range and so, this does not matter and we expect an error from |
12008 | ** the chip if this ever happen. |
12009 | ** |
12010 | ** We use a separate function for the case Linux does not provide |
12011 | ** a scatter list in order to allow better code optimization |
12012 | ** for the case we have a scatter list (BTW, for now this just wastes |
12013 | ** about 40 bytes of code for x86, but my guess is that the scatter |
12014 | ** code will get more complex later). |
12015 | */ |
12016 | |
12017 | #ifdef SCSI_NCR_USE_64BIT_DAC |
12018 | #define SCATTER_ONE(data, badd, len)(data)->addr = (badd); (data)->size = (len); \ |
12019 | (data)->addr = cpu_to_scr(badd)(badd); \ |
12020 | (data)->size = cpu_to_scr((((badd) >> 8) & 0xff000000) + len)((((badd) >> 8) & 0xff000000) + len); |
12021 | #else |
12022 | #define SCATTER_ONE(data, badd, len)(data)->addr = (badd); (data)->size = (len); \ |
12023 | (data)->addr = cpu_to_scr(badd)(badd); \ |
12024 | (data)->size = cpu_to_scr(len)(len); |
12025 | #endif |
12026 | |
12027 | #define CROSS_16MB(p, n)(((((unsigned long) p) + n - 1) ^ ((unsigned long) p)) & ~ 0xffffff) (((((u_longunsigned long) p) + n - 1) ^ ((u_longunsigned long) p)) & ~0xffffff) |
12028 | |
12029 | static int ncr_scatter_no_sglist(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd) |
12030 | { |
12031 | struct scr_tblmove *data = &cp->phys.data[MAX_SCATTER((127))-1]; |
12032 | int segment; |
12033 | |
12034 | cp->data_len = cmd->request_bufflen; |
12035 | |
12036 | if (cmd->request_bufflen) { |
12037 | u_longunsigned long baddr = map_scsi_single_data(np, cmd)(virt_to_phys((cmd)->request_buffer)); |
12038 | |
12039 | SCATTER_ONE(data, baddr, cmd->request_bufflen)(data)->addr = (baddr); (data)->size = (cmd->request_bufflen );; |
12040 | if (CROSS_16MB(baddr, cmd->request_bufflen)(((((unsigned long) baddr) + cmd->request_bufflen - 1) ^ ( (unsigned long) baddr)) & ~0xffffff)) { |
12041 | cp->host_flagsphys.header.status[3] |= HF_PM_TO_C(1u<<6); |
12042 | #ifdef DEBUG_896R1 |
12043 | printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n", |
12044 | baddr, cmd->request_bufflen); |
12045 | #endif |
12046 | } |
12047 | segment = 1; |
12048 | } |
12049 | else |
12050 | segment = 0; |
12051 | |
12052 | return segment; |
12053 | } |
12054 | |
12055 | /* |
12056 | ** DEL 472 - 53C896 Rev 1 - Part Number 609-0393055 - ITEM 5. |
12057 | ** |
12058 | ** We disable data phase mismatch handling from SCRIPTS for data |
12059 | ** transfers that contains scatter/gather entries that cross |
12060 | ** a 16 MB boundary. |
12061 | ** We use a different scatter function for 896 rev. 1 that needs |
12062 | ** such a work-around. Doing so, we do not affect performance for |
12063 | ** other chips. |
12064 | ** This problem should not be triggered for disk IOs under Linux, |
12065 | ** since such IOs are performed using pages and buffers that are |
12066 | ** nicely power-of-two sized and aligned. But, since this may change |
12067 | ** at any time, a work-around was required. |
12068 | */ |
12069 | static int ncr_scatter_896R1(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd) |
12070 | { |
12071 | int segn; |
12072 | int use_sg = (int) cmd->use_sg; |
12073 | |
12074 | cp->data_len = 0; |
12075 | |
12076 | if (!use_sg) |
12077 | segn = ncr_scatter_no_sglist(np, cp, cmd); |
12078 | else if (use_sg > MAX_SCATTER((127))) |
12079 | segn = -1; |
12080 | else { |
12081 | struct scatterlist *scatter = (struct scatterlist *)cmd->buffer; |
12082 | struct scr_tblmove *data; |
12083 | |
12084 | use_sg = map_scsi_sg_data(np, cmd)((cmd)->use_sg); |
12085 | data = &cp->phys.data[MAX_SCATTER((127)) - use_sg]; |
12086 | |
12087 | for (segn = 0; segn < use_sg; segn++) { |
12088 | u_longunsigned long baddr = scsi_sg_dma_address(&scatter[segn])virt_to_phys((&scatter[segn])->address); |
12089 | unsigned int len = scsi_sg_dma_len(&scatter[segn])((&scatter[segn])->length); |
12090 | |
12091 | SCATTER_ONE(&data[segn],(&data[segn])->addr = (baddr); (&data[segn])->size = (len); |
12092 | baddr,(&data[segn])->addr = (baddr); (&data[segn])->size = (len); |
12093 | len)(&data[segn])->addr = (baddr); (&data[segn])->size = (len);; |
12094 | if (CROSS_16MB(baddr, scatter[segn].length)(((((unsigned long) baddr) + scatter[segn].length - 1) ^ ((unsigned long) baddr)) & ~0xffffff)) { |
12095 | cp->host_flagsphys.header.status[3] |= HF_PM_TO_C(1u<<6); |
12096 | #ifdef DEBUG_896R1 |
12097 | printk("He! we are crossing a 16 MB boundary (0x%lx, 0x%x)\n", |
12098 | baddr, scatter[segn].length); |
12099 | #endif |
12100 | } |
12101 | cp->data_len += len; |
12102 | } |
12103 | } |
12104 | |
12105 | return segn; |
12106 | } |
12107 | |
12108 | static int ncr_scatter(ncb_p np, ccb_p cp, Scsi_Cmnd *cmd) |
12109 | { |
12110 | int segment; |
12111 | int use_sg = (int) cmd->use_sg; |
12112 | |
12113 | cp->data_len = 0; |
12114 | |
12115 | if (!use_sg) |
12116 | segment = ncr_scatter_no_sglist(np, cp, cmd); |
12117 | else if (use_sg > MAX_SCATTER((127))) |
12118 | segment = -1; |
12119 | else { |
12120 | struct scatterlist *scatter = (struct scatterlist *)cmd->buffer; |
12121 | struct scr_tblmove *data; |
12122 | |
12123 | use_sg = map_scsi_sg_data(np, cmd)((cmd)->use_sg); |
12124 | data = &cp->phys.data[MAX_SCATTER((127)) - use_sg]; |
12125 | |
12126 | for (segment = 0; segment < use_sg; segment++) { |
12127 | u_longunsigned long baddr = scsi_sg_dma_address(&scatter[segment])virt_to_phys((&scatter[segment])->address); |
12128 | unsigned int len = scsi_sg_dma_len(&scatter[segment])((&scatter[segment])->length); |
12129 | |
12130 | SCATTER_ONE(&data[segment],(&data[segment])->addr = (baddr); (&data[segment]) ->size = (len); |
12131 | baddr,(&data[segment])->addr = (baddr); (&data[segment]) ->size = (len); |
12132 | len)(&data[segment])->addr = (baddr); (&data[segment]) ->size = (len);; |
12133 | cp->data_len += len; |
12134 | } |
12135 | } |
12136 | |
12137 | return segment; |
12138 | } |
12139 | |
12140 | /*========================================================== |
12141 | ** |
12142 | ** |
12143 | ** Test the pci bus snoop logic :-( |
12144 | ** |
12145 | ** Has to be called with interrupts disabled. |
12146 | ** |
12147 | ** |
12148 | **========================================================== |
12149 | */ |
12150 | |
12151 | #ifndef SCSI_NCR_IOMAPPED |
12152 | static int __init ncr_regtest (struct ncb* np) |
12153 | { |
12154 | register volatile u_int32 data; |
12155 | /* |
12156 | ** ncr registers may NOT be cached. |
12157 | ** write 0xffffffff to a read only register area, |
12158 | ** and try to read it back. |
12159 | */ |
12160 | data = 0xffffffff; |
12161 | OUTL_OFF(offsetof(struct ncr_reg, nc_dstat), data)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dstat))))) = ((data))); |
12162 | data = INL_OFF(offsetof(struct ncr_reg, nc_dstat))(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dstat))))); |
12163 | #if 1 |
12164 | if (data == 0xffffffff) { |
12165 | #else |
12166 | if ((data & 0xe2f0fffd) != 0x02000080) { |
12167 | #endif |
12168 | printk ("CACHE TEST FAILED: reg dstat-sstat2 readback %x.\n", |
12169 | (unsigned) data); |
12170 | return (0x10); |
12171 | }; |
12172 | return (0); |
12173 | } |
12174 | #endif |
12175 | |
12176 | static int __init ncr_snooptest (struct ncb* np) |
12177 | { |
12178 | u_int32 ncr_rd, ncr_wr, ncr_bk, host_rd, host_wr, pc; |
12179 | int i, err=0; |
12180 | #ifndef SCSI_NCR_IOMAPPED |
12181 | if (np->reg) { |
12182 | err |= ncr_regtest (np); |
12183 | if (err) return (err); |
12184 | } |
12185 | #endif |
12186 | /* |
12187 | ** init |
12188 | */ |
12189 | pc = NCB_SCRIPTH0_PHYS (np, snooptest)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> snooptest))); |
12190 | host_wr = 1; |
12191 | ncr_wr = 2; |
12192 | /* |
12193 | ** Set memory and register. |
12194 | */ |
12195 | np->ncr_cache = cpu_to_scr(host_wr)(host_wr); |
12196 | OUTL (nc_temp, ncr_wr)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_temp))))) = (((ncr_wr)))); |
12197 | /* |
12198 | ** Start script (exchange values) |
12199 | */ |
12200 | OUTL (nc_dsa, np->p_ncb)((*(volatile unsigned int *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsa))))) = (((np->p_ncb )))); |
12201 | OUTL_DSP (pc)do { do { ; } while(0); ((*(volatile unsigned int *) ((char * )np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_dsp ))))) = ((((pc))))); } while (0); |
12202 | /* |
12203 | ** Wait 'til done (with timeout) |
12204 | */ |
12205 | for (i=0; i<NCR_SNOOP_TIMEOUT(1000000); i++) |
12206 | if (INB(nc_istat)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_istat))))) & (INTF0x04|SIP0x02|DIP0x01)) |
12207 | break; |
12208 | /* |
12209 | ** Save termination position. |
12210 | */ |
12211 | pc = INL (nc_dsp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_dsp))))); |
12212 | /* |
12213 | ** Read memory and register. |
12214 | */ |
12215 | host_rd = scr_to_cpu(np->ncr_cache)(np->ncr_cache); |
12216 | ncr_rd = INL (nc_scratcha)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_scratcha))))); |
12217 | ncr_bk = INL (nc_temp)(*(volatile unsigned int *) ((char *)np->reg + (((size_t) ( &((struct ncr_reg *)0)->nc_temp))))); |
12218 | |
12219 | /* |
12220 | ** check for timeout |
12221 | */ |
12222 | if (i>=NCR_SNOOP_TIMEOUT(1000000)) { |
12223 | printk ("CACHE TEST FAILED: timeout.\n"); |
12224 | return (0x20); |
12225 | }; |
12226 | /* |
12227 | ** Check termination position. |
12228 | */ |
12229 | if (pc != NCB_SCRIPTH0_PHYS (np, snoopend)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> snoopend)))+8) { |
12230 | printk ("CACHE TEST FAILED: script execution failed.\n"); |
12231 | printk ("start=%08lx, pc=%08lx, end=%08lx\n", |
12232 | (u_longunsigned long) NCB_SCRIPTH0_PHYS (np, snooptest)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> snooptest))), (u_longunsigned long) pc, |
12233 | (u_longunsigned long) NCB_SCRIPTH0_PHYS (np, snoopend)(np->p_scripth0+((size_t) (&((struct scripth *)0)-> snoopend))) +8); |
12234 | return (0x40); |
12235 | }; |
12236 | /* |
12237 | ** Show results. |
12238 | */ |
12239 | if (host_wr != ncr_rd) { |
12240 | printk ("CACHE TEST FAILED: host wrote %d, ncr read %d.\n", |
12241 | (int) host_wr, (int) ncr_rd); |
12242 | err |= 1; |
12243 | }; |
12244 | if (host_rd != ncr_wr) { |
12245 | printk ("CACHE TEST FAILED: ncr wrote %d, host read %d.\n", |
12246 | (int) ncr_wr, (int) host_rd); |
12247 | err |= 2; |
12248 | }; |
12249 | if (ncr_bk != ncr_wr) { |
12250 | printk ("CACHE TEST FAILED: ncr wrote %d, read back %d.\n", |
12251 | (int) ncr_wr, (int) ncr_bk); |
12252 | err |= 4; |
12253 | }; |
12254 | return (err); |
12255 | } |
12256 | |
12257 | /*========================================================== |
12258 | ** |
12259 | ** Determine the ncr's clock frequency. |
12260 | ** This is essential for the negotiation |
12261 | ** of the synchronous transfer rate. |
12262 | ** |
12263 | **========================================================== |
12264 | ** |
12265 | ** Note: we have to return the correct value. |
12266 | ** THERE IS NO SAFE DEFAULT VALUE. |
12267 | ** |
12268 | ** Most NCR/SYMBIOS boards are delivered with a 40 Mhz clock. |
12269 | ** 53C860 and 53C875 rev. 1 support fast20 transfers but |
12270 | ** do not have a clock doubler and so are provided with a |
12271 | ** 80 MHz clock. All other fast20 boards incorporate a doubler |
12272 | ** and so should be delivered with a 40 MHz clock. |
12273 | ** The recent fast40 chips (895/896/895A) and the |
12274 | ** fast80 chip (C1010) use a 40 Mhz base clock |
12275 | ** and provide a clock quadrupler (160 Mhz). The code below |
12276 | ** tries to deal as cleverly as possible with all this stuff. |
12277 | ** |
12278 | **---------------------------------------------------------- |
12279 | */ |
12280 | |
12281 | /* |
12282 | * Select NCR SCSI clock frequency |
12283 | */ |
12284 | static void ncr_selectclock(ncb_p np, u_charunsigned char scntl3) |
12285 | { |
12286 | if (np->multiplier < 2) { |
12287 | OUTB(nc_scntl3, scntl3)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl3))))) = (((scntl3) ))); |
12288 | return; |
12289 | } |
12290 | |
12291 | if (bootverbose(np->verbose) >= 2) |
12292 | printk ("%s: enabling clock multiplier\n", ncr_name(np)); |
12293 | |
12294 | OUTB(nc_stest1, DBLEN)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest1))))) = (((0x08))) ); /* Enable clock multiplier */ |
12295 | |
12296 | if ( (np->device_id != PCI_DEVICE_ID_LSI_53C10100x20) && |
12297 | (np->device_id != PCI_DEVICE_ID_LSI_53C1010_660x21) && |
12298 | (np->multiplier > 2)) { |
12299 | int i = 20; /* Poll bit 5 of stest4 for quadrupler */ |
12300 | while (!(INB(nc_stest4)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_stest4))))) & LCKFRQ0x20) && --i > 0) |
12301 | UDELAY (20); |
12302 | if (!i) |
12303 | printk("%s: the chip cannot lock the frequency\n", |
12304 | ncr_name(np)); |
12305 | |
12306 | } else /* Wait 120 micro-seconds for multiplier*/ |
12307 | UDELAY (120); |
12308 | |
12309 | OUTB(nc_stest3, HSC)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x20))) ); /* Halt the scsi clock */ |
12310 | OUTB(nc_scntl3, scntl3)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl3))))) = (((scntl3) ))); |
12311 | OUTB(nc_stest1, (DBLEN|DBLSEL))((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest1))))) = ((((0x08|0x04 )))));/* Select clock multiplier */ |
12312 | OUTB(nc_stest3, 0x00)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest3))))) = (((0x00))) ); /* Restart scsi clock */ |
12313 | } |
12314 | |
12315 | |
12316 | /* |
12317 | * calculate NCR SCSI clock frequency (in KHz) |
12318 | */ |
12319 | static unsigned __init ncrgetfreq (ncb_p np, int gen) |
12320 | { |
12321 | unsigned int ms = 0; |
12322 | unsigned int f; |
12323 | int count; |
12324 | |
12325 | /* |
12326 | * Measure GEN timer delay in order |
12327 | * to calculate SCSI clock frequency |
12328 | * |
12329 | * This code will never execute too |
12330 | * many loop iterations (if DELAY is |
12331 | * reasonably correct). It could get |
12332 | * too low a delay (too high a freq.) |
12333 | * if the CPU is slow executing the |
12334 | * loop for some reason (an NMI, for |
12335 | * example). For this reason we will |
12336 | * if multiple measurements are to be |
12337 | * performed trust the higher delay |
12338 | * (lower frequency returned). |
12339 | */ |
12340 | OUTW (nc_sien , 0x0)((*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sien))))) = (((0x0))));/* mask all scsi interrupts */ |
12341 | /* enable general purpose timer */ |
12342 | (void) INW (nc_sist)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sist))))); /* clear pending scsi interrupt */ |
12343 | OUTB (nc_dien , 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_dien))))) = (((0)))); /* mask all dma interrupts */ |
12344 | (void) INW (nc_sist)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sist))))); /* another one, just to be sure :) */ |
12345 | OUTB (nc_scntl3, 4)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl3))))) = (((4)))); /* set pre-scaler to divide by 3 */ |
12346 | OUTB (nc_stime1, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stime1))))) = (((0)))); /* disable general purpose timer */ |
12347 | OUTB (nc_stime1, gen)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stime1))))) = (((gen)))); /* set to nominal delay of 1<<gen * 125us */ |
12348 | /* Temporary fix for udelay issue with Alpha |
12349 | platform */ |
12350 | while (!(INW(nc_sist)(*(volatile unsigned short *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_sist))))) & GEN0x0200) && ms++ < 100000) { |
12351 | /* count 1ms */ |
12352 | for (count = 0; count < 10; count++) |
12353 | UDELAY (100); |
12354 | } |
12355 | OUTB (nc_stime1, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stime1))))) = (((0)))); /* disable general purpose timer */ |
12356 | /* |
12357 | * set prescaler to divide by whatever 0 means |
12358 | * 0 ought to choose divide by 2, but appears |
12359 | * to set divide by 3.5 mode in my 53c810 ... |
12360 | */ |
12361 | OUTB (nc_scntl3, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_scntl3))))) = (((0)))); |
12362 | |
12363 | /* |
12364 | * adjust for prescaler, and convert into KHz |
12365 | * scale values derived empirically. C1010 uses |
12366 | * different dividers |
12367 | */ |
12368 | #if 0 |
12369 | if (np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) |
12370 | f = ms ? ((1 << gen) * 2866 ) / ms : 0; |
12371 | else |
12372 | #endif |
12373 | f = ms ? ((1 << gen) * 4340) / ms : 0; |
12374 | |
12375 | if (bootverbose(np->verbose) >= 2) |
12376 | printk ("%s: Delay (GEN=%d): %u msec, %u KHz\n", |
12377 | ncr_name(np), gen, ms, f); |
12378 | |
12379 | return f; |
12380 | } |
12381 | |
12382 | static unsigned __init ncr_getfreq (ncb_p np) |
12383 | { |
12384 | u_intunsigned int f1, f2; |
12385 | int gen = 11; |
12386 | |
12387 | (void) ncrgetfreq (np, gen); /* throw away first result */ |
12388 | f1 = ncrgetfreq (np, gen); |
12389 | f2 = ncrgetfreq (np, gen); |
12390 | if (f1 > f2) f1 = f2; /* trust lower result */ |
12391 | return f1; |
12392 | } |
12393 | |
12394 | /* |
12395 | * Get/probe NCR SCSI clock frequency |
12396 | */ |
12397 | static void __init ncr_getclock (ncb_p np, int mult) |
12398 | { |
12399 | unsigned char scntl3 = np->sv_scntl3; |
12400 | unsigned char stest1 = np->sv_stest1; |
12401 | unsigned f1; |
12402 | |
12403 | np->multiplier = 1; |
12404 | f1 = 40000; |
12405 | |
12406 | /* |
12407 | ** True with 875/895/896/895A with clock multiplier selected |
12408 | */ |
12409 | if (mult > 1 && (stest1 & (DBLEN0x08+DBLSEL0x04)) == DBLEN0x08+DBLSEL0x04) { |
12410 | if (bootverbose(np->verbose) >= 2) |
12411 | printk ("%s: clock multiplier found\n", ncr_name(np)); |
12412 | np->multiplier = mult; |
12413 | } |
12414 | |
12415 | /* |
12416 | ** If multiplier not found but a C1010, assume a mult of 4. |
12417 | ** If multiplier not found or scntl3 not 7,5,3, |
12418 | ** reset chip and get frequency from general purpose timer. |
12419 | ** Otherwise trust scntl3 BIOS setting. |
12420 | */ |
12421 | if ((np->device_id == PCI_DEVICE_ID_LSI_53C10100x20) || |
12422 | (np->device_id == PCI_DEVICE_ID_LSI_53C1010_660x21)) { |
12423 | f1=40000; |
12424 | np->multiplier = mult; |
12425 | if (bootverbose(np->verbose) >= 2) |
12426 | printk ("%s: clock multiplier assumed\n", ncr_name(np)); |
12427 | } |
12428 | else if (np->multiplier != mult || (scntl3 & 7) < 3 || !(scntl3 & 1)) { |
12429 | OUTB (nc_stest1, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest1))))) = (((0)))); /* make sure doubler is OFF */ |
12430 | f1 = ncr_getfreq (np); |
12431 | |
12432 | if (bootverbose(np->verbose)) |
12433 | printk ("%s: NCR clock is %uKHz\n", ncr_name(np), f1); |
12434 | |
12435 | if (f1 < 55000) f1 = 40000; |
12436 | else f1 = 80000; |
12437 | |
12438 | /* |
12439 | ** Suggest to also check the PCI clock frequency |
12440 | ** to make sure our frequency calculation algorithm |
12441 | ** is not too biased. |
12442 | */ |
12443 | if (np->features & FE_66MHZ(1<<23)) { |
12444 | np->pciclock_min = (66000*55+80-1)/80; |
12445 | np->pciclock_max = (66000*55)/40; |
12446 | } |
12447 | else { |
12448 | np->pciclock_min = (33000*55+80-1)/80; |
12449 | np->pciclock_max = (33000*55)/40; |
12450 | } |
12451 | |
12452 | if (f1 == 40000 && mult > 1) { |
12453 | if (bootverbose(np->verbose) >= 2) |
12454 | printk ("%s: clock multiplier assumed\n", ncr_name(np)); |
12455 | np->multiplier = mult; |
12456 | } |
12457 | } else { |
12458 | if ((scntl3 & 7) == 3) f1 = 40000; |
12459 | else if ((scntl3 & 7) == 5) f1 = 80000; |
12460 | else f1 = 160000; |
12461 | |
12462 | f1 /= np->multiplier; |
12463 | } |
12464 | |
12465 | /* |
12466 | ** Compute controller synchronous parameters. |
12467 | */ |
12468 | f1 *= np->multiplier; |
12469 | np->clock_khz = f1; |
12470 | } |
12471 | |
12472 | /* |
12473 | * Get/probe PCI clock frequency |
12474 | */ |
12475 | static u_intunsigned int __init ncr_getpciclock (ncb_p np) |
12476 | { |
12477 | static u_intunsigned int f; |
12478 | |
12479 | OUTB (nc_stest1, SCLK)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest1))))) = (((0x80))) ); /* Use the PCI clock as SCSI clock */ |
12480 | f = ncr_getfreq (np); |
12481 | OUTB (nc_stest1, 0)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_stest1))))) = (((0)))); |
12482 | |
12483 | return f; |
12484 | } |
12485 | |
12486 | /*===================== LINUX ENTRY POINTS SECTION ==========================*/ |
12487 | |
12488 | #ifndef ucharunsigned char |
12489 | #define ucharunsigned char unsigned char |
12490 | #endif |
12491 | |
12492 | #ifndef ushortunsigned short |
12493 | #define ushortunsigned short unsigned short |
12494 | #endif |
12495 | |
12496 | #ifndef ulongunsigned long |
12497 | #define ulongunsigned long unsigned long |
12498 | #endif |
12499 | |
12500 | /* --------------------------------------------------------------------- |
12501 | ** |
12502 | ** Driver setup from the boot command line |
12503 | ** |
12504 | ** --------------------------------------------------------------------- |
12505 | */ |
12506 | |
12507 | #ifdef MODULE |
12508 | #define ARG_SEP',' ' ' |
12509 | #else |
12510 | #define ARG_SEP',' ',' |
12511 | #endif |
12512 | |
12513 | #define OPT_TAGS1 1 |
12514 | #define OPT_MASTER_PARITY2 2 |
12515 | #define OPT_SCSI_PARITY3 3 |
12516 | #define OPT_DISCONNECTION4 4 |
12517 | #define OPT_SPECIAL_FEATURES5 5 |
12518 | #define OPT_ULTRA_SCSI6 6 |
12519 | #define OPT_FORCE_SYNC_NEGO7 7 |
12520 | #define OPT_REVERSE_PROBE8 8 |
12521 | #define OPT_DEFAULT_SYNC9 9 |
12522 | #define OPT_VERBOSE10 10 |
12523 | #define OPT_DEBUG11 11 |
12524 | #define OPT_BURST_MAX12 12 |
12525 | #define OPT_LED_PIN13 13 |
12526 | #define OPT_MAX_WIDE14 14 |
12527 | #define OPT_SETTLE_DELAY15 15 |
12528 | #define OPT_DIFF_SUPPORT16 16 |
12529 | #define OPT_IRQM17 17 |
12530 | #define OPT_PCI_FIX_UP18 18 |
12531 | #define OPT_BUS_CHECK19 19 |
12532 | #define OPT_OPTIMIZE20 20 |
12533 | #define OPT_RECOVERY21 21 |
12534 | #define OPT_SAFE_SETUP22 22 |
12535 | #define OPT_USE_NVRAM23 23 |
12536 | #define OPT_EXCLUDE24 24 |
12537 | #define OPT_HOST_ID25 25 |
12538 | |
12539 | #ifdef SCSI_NCR_IARB_SUPPORT |
12540 | #define OPT_IARB 26 |
12541 | #endif |
12542 | |
12543 | static char setup_token[] __initdata = |
12544 | "tags:" "mpar:" |
12545 | "spar:" "disc:" |
12546 | "specf:" "ultra:" |
12547 | "fsn:" "revprob:" |
12548 | "sync:" "verb:" |
12549 | "debug:" "burst:" |
12550 | "led:" "wide:" |
12551 | "settle:" "diff:" |
12552 | "irqm:" "pcifix:" |
12553 | "buschk:" "optim:" |
12554 | "recovery:" |
12555 | "safe:" "nvram:" |
12556 | "excl:" "hostid:" |
12557 | #ifdef SCSI_NCR_IARB_SUPPORT |
12558 | "iarb:" |
12559 | #endif |
12560 | ; /* DONNOT REMOVE THIS ';' */ |
12561 | |
12562 | #ifdef MODULE |
12563 | #define ARG_SEP',' ' ' |
12564 | #else |
12565 | #define ARG_SEP',' ',' |
12566 | #endif |
12567 | |
12568 | static int __init get_setup_token(char *p) |
12569 | { |
12570 | char *cur = setup_token; |
12571 | char *pc; |
12572 | int i = 0; |
12573 | |
12574 | while (cur != NULL((void *) 0) && (pc = strchr(cur, ':')) != NULL((void *) 0)) { |
12575 | ++pc; |
12576 | ++i; |
12577 | if (!strncmp(p, cur, pc - cur)) |
12578 | return i; |
12579 | cur = pc; |
12580 | } |
12581 | return 0; |
12582 | } |
12583 | |
12584 | |
12585 | int __init sym53c8xx_setup(char *str) |
12586 | { |
12587 | #ifdef SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT |
12588 | char *cur = str; |
12589 | char *pc, *pv; |
12590 | unsigned long val; |
12591 | int i, c; |
12592 | int xi = 0; |
12593 | |
12594 | while (cur != NULL((void *) 0) && (pc = strchr(cur, ':')) != NULL((void *) 0)) { |
12595 | char *pe; |
12596 | |
12597 | val = 0; |
12598 | pv = pc; |
12599 | c = *++pv; |
12600 | |
12601 | if (c == 'n') |
12602 | val = 0; |
12603 | else if (c == 'y') |
12604 | val = 1; |
12605 | else |
12606 | val = (int) simple_strtoul(pv, &pe, 0); |
12607 | |
12608 | switch (get_setup_token(cur)) { |
12609 | case OPT_TAGS1: |
12610 | driver_setup.default_tags = val; |
12611 | if (pe && *pe == '/') { |
12612 | i = 0; |
12613 | while (*pe && *pe != ARG_SEP',' && |
12614 | i < sizeof(driver_setup.tag_ctrl)-1) { |
12615 | driver_setup.tag_ctrl[i++] = *pe++; |
12616 | } |
12617 | driver_setup.tag_ctrl[i] = '\0'; |
12618 | } |
12619 | break; |
12620 | case OPT_MASTER_PARITY2: |
12621 | driver_setup.master_parity = val; |
12622 | break; |
12623 | case OPT_SCSI_PARITY3: |
12624 | driver_setup.scsi_parity = val; |
12625 | break; |
12626 | case OPT_DISCONNECTION4: |
12627 | driver_setup.disconnection = val; |
12628 | break; |
12629 | case OPT_SPECIAL_FEATURES5: |
12630 | driver_setup.special_features = val; |
12631 | break; |
12632 | case OPT_ULTRA_SCSI6: |
12633 | driver_setup.ultra_scsi = val; |
12634 | break; |
12635 | case OPT_FORCE_SYNC_NEGO7: |
12636 | driver_setup.force_sync_nego = val; |
12637 | break; |
12638 | case OPT_REVERSE_PROBE8: |
12639 | driver_setup.reverse_probe = val; |
12640 | break; |
12641 | case OPT_DEFAULT_SYNC9: |
12642 | driver_setup.default_sync = val; |
12643 | break; |
12644 | case OPT_VERBOSE10: |
12645 | driver_setup.verbose = val; |
12646 | break; |
12647 | case OPT_DEBUG11: |
12648 | driver_setup.debug = val; |
12649 | break; |
12650 | case OPT_BURST_MAX12: |
12651 | driver_setup.burst_max = val; |
12652 | break; |
12653 | case OPT_LED_PIN13: |
12654 | driver_setup.led_pin = val; |
12655 | break; |
12656 | case OPT_MAX_WIDE14: |
12657 | driver_setup.max_wide = val? 1:0; |
12658 | break; |
12659 | case OPT_SETTLE_DELAY15: |
12660 | driver_setup.settle_delay = val; |
12661 | break; |
12662 | case OPT_DIFF_SUPPORT16: |
12663 | driver_setup.diff_support = val; |
12664 | break; |
12665 | case OPT_IRQM17: |
12666 | driver_setup.irqm = val; |
12667 | break; |
12668 | case OPT_PCI_FIX_UP18: |
12669 | driver_setup.pci_fix_up = val; |
12670 | break; |
12671 | case OPT_BUS_CHECK19: |
12672 | driver_setup.bus_check = val; |
12673 | break; |
12674 | case OPT_OPTIMIZE20: |
12675 | driver_setup.optimize = val; |
12676 | break; |
12677 | case OPT_RECOVERY21: |
12678 | driver_setup.recovery = val; |
12679 | break; |
12680 | case OPT_USE_NVRAM23: |
12681 | driver_setup.use_nvram = val; |
12682 | break; |
12683 | case OPT_SAFE_SETUP22: |
12684 | memcpy(&driver_setup, &driver_safe_setup,(__builtin_constant_p(sizeof(driver_setup)) ? __constant_memcpy ((&driver_setup),(&driver_safe_setup),(sizeof(driver_setup ))) : __memcpy((&driver_setup),(&driver_safe_setup),( sizeof(driver_setup)))) |
12685 | sizeof(driver_setup))(__builtin_constant_p(sizeof(driver_setup)) ? __constant_memcpy ((&driver_setup),(&driver_safe_setup),(sizeof(driver_setup ))) : __memcpy((&driver_setup),(&driver_safe_setup),( sizeof(driver_setup)))); |
12686 | break; |
12687 | case OPT_EXCLUDE24: |
12688 | if (xi < SCSI_NCR_MAX_EXCLUDES8) |
12689 | driver_setup.excludes[xi++] = val; |
12690 | break; |
12691 | case OPT_HOST_ID25: |
12692 | driver_setup.host_id = val; |
12693 | break; |
12694 | #ifdef SCSI_NCR_IARB_SUPPORT |
12695 | case OPT_IARB: |
12696 | driver_setup.iarb = val; |
12697 | break; |
12698 | #endif |
12699 | default: |
12700 | printk("sym53c8xx_setup: unexpected boot option '%.*s' ignored\n", (int)(pc-cur+1), cur); |
12701 | break; |
12702 | } |
12703 | |
12704 | if ((cur = strchr(cur, ARG_SEP',')) != NULL((void *) 0)) |
12705 | ++cur; |
12706 | } |
12707 | #endif /* SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT */ |
12708 | return 1; |
12709 | } |
12710 | |
12711 | #if LINUX_VERSION_CODE131108 >= LinuxVersionCode(2,3,13)(((2)<<16)+((3)<<8)+(13)) |
12712 | #ifndef MODULE |
12713 | __setup("sym53c8xx=", sym53c8xx_setup); |
12714 | #endif |
12715 | #endif |
12716 | |
12717 | static int |
12718 | sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device); |
12719 | |
12720 | /* |
12721 | ** Linux entry point for SYM53C8XX devices detection routine. |
12722 | ** |
12723 | ** Called by the middle-level scsi drivers at initialization time, |
12724 | ** or at module installation. |
12725 | ** |
12726 | ** Read the PCI configuration and try to attach each |
12727 | ** detected NCR board. |
12728 | ** |
12729 | ** If NVRAM is present, try to attach boards according to |
12730 | ** the used defined boot order. |
12731 | ** |
12732 | ** Returns the number of boards successfully attached. |
12733 | */ |
12734 | |
12735 | static void __init ncr_print_driver_setup(void) |
12736 | { |
12737 | #define YesNo(y) y ? 'y' : 'n' |
12738 | printk (NAME53C8XX"sym53c8xx" ": setup=disc:%c,specf:%d,ultra:%d,tags:%d,sync:%d," |
12739 | "burst:%d,wide:%c,diff:%d,revprob:%c,buschk:0x%x\n", |
12740 | YesNo(driver_setup.disconnection), |
12741 | driver_setup.special_features, |
12742 | driver_setup.ultra_scsi, |
12743 | driver_setup.default_tags, |
12744 | driver_setup.default_sync, |
12745 | driver_setup.burst_max, |
12746 | YesNo(driver_setup.max_wide), |
12747 | driver_setup.diff_support, |
12748 | YesNo(driver_setup.reverse_probe), |
12749 | driver_setup.bus_check); |
12750 | |
12751 | printk (NAME53C8XX"sym53c8xx" ": setup=mpar:%c,spar:%c,fsn=%c,verb:%d,debug:0x%x," |
12752 | "led:%c,settle:%d,irqm:0x%x,nvram:0x%x,pcifix:0x%x\n", |
12753 | YesNo(driver_setup.master_parity), |
12754 | YesNo(driver_setup.scsi_parity), |
12755 | YesNo(driver_setup.force_sync_nego), |
12756 | driver_setup.verbose, |
12757 | driver_setup.debug, |
12758 | YesNo(driver_setup.led_pin), |
12759 | driver_setup.settle_delay, |
12760 | driver_setup.irqm, |
12761 | driver_setup.use_nvram, |
12762 | driver_setup.pci_fix_up); |
12763 | #undef YesNo |
12764 | } |
12765 | |
12766 | /*=================================================================== |
12767 | ** SYM53C8XX devices description table and chip ids list. |
12768 | **=================================================================== |
12769 | */ |
12770 | |
12771 | static ncr_chip ncr_chip_table[] __initdata = SCSI_NCR_CHIP_TABLE{ {0x0001, 0x0f, "810", 4, 8, 4, (1<<6)} , {0x0001, 0xff , "810a", 4, 8, 4, ((1<<6)|(1<<7)|(1<<8)|(1 <<9))|(1<<13)|(1<<12)|(1<<10)} , {0x0004 , 0xff, "815", 4, 8, 4, (1<<6)|(1<<10)} , {0x0002 , 0xff, "820", 4, 8, 4, (1<<1)|(1<<6)} , {0x0003, 0x0f, "825", 4, 8, 4, (1<<1)|(1<<6)|(1<<10 )|(1<<21)} , {0x0003, 0xff, "825a", 6, 8, 4, (1<< 1)|(((1<<6)|(1<<7)|(1<<8)|(1<<9)) & ~(1<<6))|(1<<10)|(1<<11)|(1<<13)|(1<< 12)|(1<<14)|(1<<21)} , {0x0006, 0xff, "860", 4, 8 , 5, (1<<2)|(1<<15)|((1<<6)|(1<<7)|(1 <<8)|(1<<9))|(1<<10)|(1<<13)|(1<< 12)} , {0x000f, 0x01, "875", 6, 16, 5, (1<<1)|(1<< 2)|(1<<15)|(((1<<6)|(1<<7)|(1<<8)|(1<< 9)) & ~(1<<6))|(1<<10)|(1<<11)|(1<< 13)|(1<<12)| (1<<14)|(1<<21)} , {0x000f, 0x0f , "875", 6, 16, 5, (1<<1)|(1<<2)|(1<<4)|((( 1<<6)|(1<<7)|(1<<8)|(1<<9)) & ~(1 <<6))|(1<<10)|(1<<11)|(1<<13)|(1<< 12)| (1<<14)|(1<<21)} , {0x000f, 0x1f, "876", 6, 16 , 5, (1<<1)|(1<<2)|(1<<4)|(((1<<6)|(1 <<7)|(1<<8)|(1<<9)) & ~(1<<6))|(1 <<10)|(1<<11)|(1<<13)|(1<<12)| (1<< 14)|(1<<21)} , {0x000f, 0x2f, "875E", 6, 16, 5, (1<< 1)|(1<<2)|(1<<4)|(((1<<6)|(1<<7)|(1<< 8)|(1<<9)) & ~(1<<6))|(1<<10)|(1<< 11)|(1<<13)|(1<<12)| (1<<14)|(1<<21)} , {0x000f, 0xff, "876", 6, 16, 5, (1<<1)|(1<<2)| (1<<4)|(((1<<6)|(1<<7)|(1<<8)|(1<< 9)) & ~(1<<6))|(1<<10)|(1<<11)|(1<< 13)|(1<<12)| (1<<14)|(1<<21)} , {0x008f,0xff , "875J", 6, 16, 5, (1<<1)|(1<<2)|(1<<4)|(( (1<<6)|(1<<7)|(1<<8)|(1<<9)) & ~( 1<<6))|(1<<10)|(1<<11)|(1<<13)|(1<< 12)| (1<<14)} , {0x000d, 0xff, "885", 6, 16, 5, (1<< 1)|(1<<2)|(1<<4)|(((1<<6)|(1<<7)|(1<< 8)|(1<<9)) & ~(1<<6))|(1<<10)|(1<< 11)|(1<<13)|(1<<12)| (1<<14)|(1<<21)} , {0x000c, 0xff, "895", 6, 31, 7, (1<<1)|(1<<3)| (1<<5)|((1<<6)|(1<<7)|(1<<8)|(1<< 9))|(1<<10)|(1<<11)|(1<<13)|(1<<12)| ( 1<<14)} , {0x000b, 0xff, "896", 6, 31, 7, (1<<1)| (1<<3)|(1<<5)|((1<<6)|(1<<7)|(1<< 8)|(1<<9))|(1<<10)|(1<<11)|(1<<13)|(1 <<12)| (1<<14)|(1<<16)|(1<<17)|(1<< 18)|(1<<19)|(1<<20)} , {0x12, 0xff, "895a", 6, 31 , 7, (1<<1)|(1<<3)|(1<<5)|((1<<6)|(1<< 7)|(1<<8)|(1<<9))|(1<<10)|(1<<11)|(1<< 13)|(1<<12)| (1<<14)|(1<<16)|(1<<17)| (1<<18)|(1<<19)|(1<<20)} , {0xa, 0xff, "1510D" , 7, 31, 7, (1<<1)|(1<<3)|(1<<5)|((1<< 6)|(1<<7)|(1<<8)|(1<<9))|(1<<10)|(1<< 11)|(1<<13)|(1<<12)| (1<<14)|(1<<18)} , {0x20, 0xff, "1010", 6, 31, 7, (1<<1)|(1<<5)|( (1<<6)|(1<<7)|(1<<8)|(1<<9))|(1<< 10)|(1<<11)|(1<<13)|(1<<12)| (1<<14)| (1<<16)|(1<<17)|(1<<18)|(1<<19)|(1<< 20)|(1<<22)} , {0x21, 0xff, "1010_66", 6, 31, 7, (1<< 1)|(1<<5)|((1<<6)|(1<<7)|(1<<8)|(1<< 9))|(1<<10)|(1<<11)|(1<<13)|(1<<12)| ( 1<<14)|(1<<16)|(1<<17)|(1<<18)|(1<< 19)|(1<<20)|(1<<22)|(1<<23)} }; |
12772 | static ushortunsigned short ncr_chip_ids[] __initdata = SCSI_NCR_CHIP_IDS{ 0x0001, 0x0004, 0x0002, 0x0003, 0x0006, 0x000f, 0x008f, 0x000d , 0x000c, 0x000b, 0x12, 0xa, 0x20, 0x21 }; |
12773 | |
12774 | #ifdef SCSI_NCR_PQS_PDS_SUPPORT |
12775 | /*=================================================================== |
12776 | ** Detect all NCR PQS/PDS boards and keep track of their bus nr. |
12777 | ** |
12778 | ** The NCR PQS or PDS card is constructed as a DEC bridge |
12779 | ** behind which sit a proprietary NCR memory controller and |
12780 | ** four or two 53c875s as separate devices. In its usual mode |
12781 | ** of operation, the 875s are slaved to the memory controller |
12782 | ** for all transfers. We can tell if an 875 is part of a |
12783 | ** PQS/PDS or not since if it is, it will be on the same bus |
12784 | ** as the memory controller. To operate with the Linux |
12785 | ** driver, the memory controller is disabled and the 875s |
12786 | ** freed to function independently. The only wrinkle is that |
12787 | ** the preset SCSI ID (which may be zero) must be read in from |
12788 | ** a special configuration space register of the 875 |
12789 | **=================================================================== |
12790 | */ |
12791 | #define SCSI_NCR_MAX_PQS_BUS 16 |
12792 | static int pqs_bus[SCSI_NCR_MAX_PQS_BUS] __initdata = { 0 }; |
12793 | |
12794 | static void __init ncr_detect_pqs_pds(void) |
12795 | { |
12796 | short index; |
12797 | pcidev_t dev = PCIDEV_NULL(~0u); |
12798 | |
12799 | for(index=0; index < SCSI_NCR_MAX_PQS_BUS; index++) { |
12800 | u_charunsigned char tmp; |
12801 | |
12802 | dev = pci_find_device(0x101a, 0x0009, dev); |
12803 | if (dev == PCIDEV_NULL(~0u)) { |
12804 | pqs_bus[index] = -1; |
12805 | break; |
12806 | } |
12807 | printk(KERN_INFO"<6>" NAME53C8XX"sym53c8xx" ": NCR PQS/PDS memory controller detected on bus %d\n", PciBusNumber(dev)((dev)>>8)); |
12808 | pci_read_config_byte(dev, 0x44, &tmp)pcibios_read_config_byte(((dev)>>8), ((dev)&0xff), 0x44 , &tmp); |
12809 | /* bit 1: allow individual 875 configuration */ |
12810 | tmp |= 0x2; |
12811 | pci_write_config_byte(dev, 0x44, tmp)pcibios_write_config_byte(((dev)>>8), ((dev)&0xff), 0x44, tmp); |
12812 | pci_read_config_byte(dev, 0x45, &tmp)pcibios_read_config_byte(((dev)>>8), ((dev)&0xff), 0x45 , &tmp); |
12813 | /* bit 2: drive individual 875 interrupts to the bus */ |
12814 | tmp |= 0x4; |
12815 | pci_write_config_byte(dev, 0x45, tmp)pcibios_write_config_byte(((dev)>>8), ((dev)&0xff), 0x45, tmp); |
12816 | |
12817 | pqs_bus[index] = PciBusNumber(dev)((dev)>>8); |
12818 | } |
12819 | } |
12820 | #endif /* SCSI_NCR_PQS_PDS_SUPPORT */ |
12821 | |
12822 | /*=================================================================== |
12823 | ** Detect all 53c8xx hosts and then attach them. |
12824 | ** |
12825 | ** If we are using NVRAM, once all hosts are detected, we need to |
12826 | ** check any NVRAM for boot order in case detect and boot order |
12827 | ** differ and attach them using the order in the NVRAM. |
12828 | ** |
12829 | ** If no NVRAM is found or data appears invalid attach boards in |
12830 | ** the the order they are detected. |
12831 | **=================================================================== |
12832 | */ |
12833 | int __init sym53c8xx_detect(Scsi_Host_Template *tpnt) |
12834 | { |
12835 | pcidev_t pcidev; |
12836 | int i, j, chips, hosts, count; |
12837 | int attach_count = 0; |
12838 | ncr_device *devtbl, *devp; |
12839 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
12840 | ncr_nvram nvram0, nvram, *nvp; |
12841 | #endif |
12842 | |
12843 | /* |
12844 | ** PCI is required. |
12845 | */ |
12846 | if (!pci_presentpcibios_present()) |
12847 | return 0; |
12848 | |
12849 | /* |
12850 | ** Initialize driver general stuff. |
12851 | */ |
12852 | #ifdef SCSI_NCR_PROC_INFO_SUPPORT |
12853 | #if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,3,27)(((2)<<16)+((3)<<8)+(27)) |
12854 | tpnt->proc_dir = &proc_scsi_sym53c8xx; |
12855 | #else |
12856 | tpnt->proc_name = NAME53C8XX"sym53c8xx"; |
12857 | #endif |
12858 | tpnt->proc_info = sym53c8xx_proc_info; |
12859 | #endif |
12860 | |
12861 | #if defined(SCSI_NCR_BOOT_COMMAND_LINE_SUPPORT) && defined(MODULE) |
12862 | if (sym53c8xx) |
12863 | sym53c8xx_setup(sym53c8xx); |
12864 | #endif |
12865 | #ifdef SCSI_NCR_DEBUG_INFO_SUPPORT |
12866 | ncr_debug = driver_setup.debug; |
12867 | #endif |
12868 | |
12869 | if (initverbose(driver_setup.verbose) >= 2) |
12870 | ncr_print_driver_setup(); |
12871 | |
12872 | /* |
12873 | ** Allocate the device table since we donnot want to |
12874 | ** overflow the kernel stack. |
12875 | ** 1 x 4K PAGE is enough for more than 40 devices for i386. |
12876 | */ |
12877 | devtbl = m_calloc(PAGE_SIZE(1 << 12), "devtbl"); |
12878 | if (!devtbl) |
12879 | return 0; |
12880 | |
12881 | /* |
12882 | ** Detect all NCR PQS/PDS memory controllers. |
12883 | */ |
12884 | #ifdef SCSI_NCR_PQS_PDS_SUPPORT |
12885 | ncr_detect_pqs_pds(); |
12886 | #endif |
12887 | |
12888 | /* |
12889 | ** Detect all 53c8xx hosts. |
12890 | ** Save the first Symbios NVRAM content if any |
12891 | ** for the boot order. |
12892 | */ |
12893 | chips = sizeof(ncr_chip_ids) / sizeof(ncr_chip_ids[0]); |
12894 | hosts = PAGE_SIZE(1 << 12) / sizeof(*devtbl); |
12895 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
12896 | nvp = (driver_setup.use_nvram & 0x1) ? &nvram0 : 0; |
12897 | #endif |
12898 | j = 0; |
12899 | count = 0; |
12900 | pcidev = PCIDEV_NULL(~0u); |
12901 | while (1) { |
12902 | char *msg = ""; |
12903 | if (count >= hosts) |
12904 | break; |
12905 | if (j >= chips) |
12906 | break; |
12907 | i = driver_setup.reverse_probe ? chips - 1 - j : j; |
12908 | pcidev = pci_find_device(PCI_VENDOR_ID_NCR0x1000, ncr_chip_ids[i], |
12909 | pcidev); |
12910 | if (pcidev == PCIDEV_NULL(~0u)) { |
12911 | ++j; |
12912 | continue; |
12913 | } |
12914 | /* Some HW as the HP LH4 may report twice PCI devices */ |
12915 | for (i = 0; i < count ; i++) { |
12916 | if (devtbl[i].slot.bus == PciBusNumber(pcidev)((pcidev)>>8) && |
12917 | devtbl[i].slot.device_fn == PciDeviceFn(pcidev)((pcidev)&0xff)) |
12918 | break; |
12919 | } |
12920 | if (i != count) /* Ignore this device if we already have it */ |
12921 | continue; |
12922 | devp = &devtbl[count]; |
12923 | devp->host_id = driver_setup.host_id; |
12924 | devp->attach_done = 0; |
12925 | if (sym53c8xx_pci_init(tpnt, pcidev, devp)) { |
12926 | continue; |
12927 | } |
12928 | ++count; |
12929 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
12930 | if (nvp) { |
12931 | ncr_get_nvram(devp, nvp); |
12932 | switch(nvp->type) { |
12933 | case SCSI_NCR_SYMBIOS_NVRAM(1): |
12934 | /* |
12935 | * Switch to the other nvram buffer, so that |
12936 | * nvram0 will contain the first Symbios |
12937 | * format NVRAM content with boot order. |
12938 | */ |
12939 | nvp = &nvram; |
12940 | msg = "with Symbios NVRAM"; |
12941 | break; |
12942 | case SCSI_NCR_TEKRAM_NVRAM(2): |
12943 | msg = "with Tekram NVRAM"; |
12944 | break; |
12945 | } |
12946 | } |
12947 | #endif |
12948 | #ifdef SCSI_NCR_PQS_PDS_SUPPORT |
12949 | if (devp->pqs_pds) |
12950 | msg = "(NCR PQS/PDS)"; |
12951 | #endif |
12952 | printk(KERN_INFO"<6>" NAME53C8XX"sym53c8xx" ": 53c%s detected %s\n", |
12953 | devp->chip.name, msg); |
12954 | } |
12955 | |
12956 | /* |
12957 | ** If we have found a SYMBIOS NVRAM, use first the NVRAM boot |
12958 | ** sequence as device boot order. |
12959 | ** check devices in the boot record against devices detected. |
12960 | ** attach devices if we find a match. boot table records that |
12961 | ** do not match any detected devices will be ignored. |
12962 | ** devices that do not match any boot table will not be attached |
12963 | ** here but will attempt to be attached during the device table |
12964 | ** rescan. |
12965 | */ |
12966 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
12967 | if (!nvp || nvram0.type != SCSI_NCR_SYMBIOS_NVRAM(1)) |
12968 | goto next; |
12969 | for (i = 0; i < 4; i++) { |
12970 | Symbios_host *h = &nvram0.data.Symbios.host[i]; |
12971 | for (j = 0 ; j < count ; j++) { |
12972 | devp = &devtbl[j]; |
12973 | if (h->device_fn != devp->slot.device_fn || |
12974 | h->bus_nr != devp->slot.bus || |
12975 | h->device_id != devp->chip.device_id) |
12976 | continue; |
12977 | if (devp->attach_done) |
12978 | continue; |
12979 | if (h->flags & SYMBIOS_INIT_SCAN_AT_BOOT(1)) { |
12980 | ncr_get_nvram(devp, nvp); |
12981 | if (!ncr_attach (tpnt, attach_count, devp)) |
12982 | attach_count++; |
12983 | } |
12984 | else if (!(driver_setup.use_nvram & 0x80)) |
12985 | printk(KERN_INFO"<6>" NAME53C8XX"sym53c8xx" |
12986 | ": 53c%s state OFF thus not attached\n", |
12987 | devp->chip.name); |
12988 | else |
12989 | continue; |
12990 | |
12991 | devp->attach_done = 1; |
12992 | break; |
12993 | } |
12994 | } |
12995 | next: |
12996 | #endif |
12997 | |
12998 | /* |
12999 | ** Rescan device list to make sure all boards attached. |
13000 | ** Devices without boot records will not be attached yet |
13001 | ** so try to attach them here. |
13002 | */ |
13003 | for (i= 0; i < count; i++) { |
13004 | devp = &devtbl[i]; |
13005 | if (!devp->attach_done) { |
13006 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
13007 | ncr_get_nvram(devp, nvp); |
13008 | #endif |
13009 | if (!ncr_attach (tpnt, attach_count, devp)) |
13010 | attach_count++; |
13011 | } |
13012 | } |
13013 | |
13014 | m_free(devtbl, PAGE_SIZE(1 << 12), "devtbl"); |
13015 | |
13016 | return attach_count; |
13017 | } |
13018 | |
13019 | /*=================================================================== |
13020 | ** Read and check the PCI configuration for any detected NCR |
13021 | ** boards and save data for attaching after all boards have |
13022 | ** been detected. |
13023 | **=================================================================== |
13024 | */ |
13025 | static int __init |
13026 | sym53c8xx_pci_init(Scsi_Host_Template *tpnt, pcidev_t pdev, ncr_device *device) |
13027 | { |
13028 | u_shortunsigned short vendor_id, device_id, command, status_reg; |
13029 | u_charunsigned char cache_line_size, latency_timer; |
13030 | u_charunsigned char suggested_cache_line_size = 0; |
13031 | u_charunsigned char pci_fix_up = driver_setup.pci_fix_up; |
13032 | u_charunsigned char revision; |
13033 | u_intunsigned int irq; |
13034 | u_longunsigned long base, base_2, io_port; |
13035 | int i; |
13036 | ncr_chip *chip; |
13037 | |
13038 | printk(KERN_INFO"<6>" NAME53C8XX"sym53c8xx" ": at PCI bus %d, device %d, function %d\n", |
13039 | PciBusNumber(pdev)((pdev)>>8), |
13040 | (int) (PciDeviceFn(pdev)((pdev)&0xff) & 0xf8) >> 3, |
13041 | (int) (PciDeviceFn(pdev)((pdev)&0xff) & 7)); |
13042 | |
13043 | #ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING |
13044 | if (!pci_dma_supported(pdev, (dma_addr_t) (0xffffffffUL))) { |
13045 | printk(KERN_WARNING"<4>" NAME53C8XX"sym53c8xx" |
13046 | "32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n"); |
13047 | return -1; |
13048 | } |
13049 | #endif |
13050 | |
13051 | /* |
13052 | ** Read info from the PCI config space. |
13053 | ** pci_read_config_xxx() functions are assumed to be used for |
13054 | ** successfully detected PCI devices. |
13055 | */ |
13056 | vendor_id = PciVendorId(pdev); |
13057 | device_id = PciDeviceId(pdev); |
13058 | irq = PciIrqLine(pdev); |
13059 | i = 0; |
13060 | i = pci_get_base_address(pdev, i, &io_port); |
13061 | i = pci_get_base_address(pdev, i, &base); |
13062 | (void) pci_get_base_address(pdev, i, &base_2); |
13063 | |
13064 | pci_read_config_word(pdev, PCI_COMMAND, &command)pcibios_read_config_word(((pdev)>>8), ((pdev)&0xff) , 0x04, &command); |
13065 | pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision)pcibios_read_config_byte(((pdev)>>8), ((pdev)&0xff) , 0x08, &revision); |
13066 | pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size)pcibios_read_config_byte(((pdev)>>8), ((pdev)&0xff) , 0x0c, &cache_line_size); |
13067 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer)pcibios_read_config_byte(((pdev)>>8), ((pdev)&0xff) , 0x0d, &latency_timer); |
13068 | pci_read_config_word(pdev, PCI_STATUS, &status_reg)pcibios_read_config_word(((pdev)>>8), ((pdev)&0xff) , 0x06, &status_reg); |
13069 | |
13070 | #ifdef SCSI_NCR_PQS_PDS_SUPPORT |
13071 | /* |
13072 | ** Match the BUS number for PQS/PDS devices. |
13073 | ** Read the SCSI ID from a special register mapped |
13074 | ** into the configuration space of the individual |
13075 | ** 875s. This register is set up by the PQS bios |
13076 | */ |
13077 | for(i = 0; i < SCSI_NCR_MAX_PQS_BUS && pqs_bus[i] != -1; i++) { |
13078 | u_charunsigned char tmp; |
13079 | if (pqs_bus[i] == PciBusNumber(pdev)((pdev)>>8)) { |
13080 | pci_read_config_byte(pdev, 0x84, &tmp)pcibios_read_config_byte(((pdev)>>8), ((pdev)&0xff) , 0x84, &tmp); |
13081 | device->pqs_pds = 1; |
13082 | device->host_id = tmp; |
13083 | break; |
13084 | } |
13085 | } |
13086 | #endif /* SCSI_NCR_PQS_PDS_SUPPORT */ |
13087 | |
13088 | /* |
13089 | ** If user excludes this chip, donnot initialize it. |
13090 | */ |
13091 | for (i = 0 ; i < SCSI_NCR_MAX_EXCLUDES8 ; i++) { |
13092 | if (driver_setup.excludes[i] == |
13093 | (io_port & PCI_BASE_ADDRESS_IO_MASK(~0x03))) |
13094 | return -1; |
13095 | } |
13096 | /* |
13097 | ** Check if the chip is supported |
13098 | */ |
13099 | chip = 0; |
13100 | for (i = 0; i < sizeof(ncr_chip_table)/sizeof(ncr_chip_table[0]); i++) { |
13101 | if (device_id != ncr_chip_table[i].device_id) |
13102 | continue; |
13103 | if (revision > ncr_chip_table[i].revision_id) |
13104 | continue; |
13105 | if (!(ncr_chip_table[i].features & FE_LDSTR(1<<13))) |
13106 | break; |
13107 | chip = &device->chip; |
13108 | memcpy(chip, &ncr_chip_table[i], sizeof(*chip))(__builtin_constant_p(sizeof(*chip)) ? __constant_memcpy((chip ),(&ncr_chip_table[i]),(sizeof(*chip))) : __memcpy((chip) ,(&ncr_chip_table[i]),(sizeof(*chip)))); |
13109 | chip->revision_id = revision; |
13110 | break; |
13111 | } |
13112 | |
13113 | /* |
13114 | ** Ignore Symbios chips controlled by SISL RAID controller. |
13115 | ** This controller sets value 0x52414944 at RAM end - 16. |
13116 | */ |
13117 | #if defined(__i386__1) && !defined(SCSI_NCR_PCI_MEM_NOT_SUPPORTED) |
13118 | if (chip && (base_2 & PCI_BASE_ADDRESS_MEM_MASK(~0x0f))) { |
13119 | unsigned int ram_size, ram_val; |
13120 | u_longunsigned long ram_ptr; |
13121 | |
13122 | if (chip->features & FE_RAM8K(1<<16)) |
13123 | ram_size = 8192; |
13124 | else |
13125 | ram_size = 4096; |
13126 | |
13127 | ram_ptr = remap_pci_mem(base_2 & PCI_BASE_ADDRESS_MEM_MASK(~0x0f), |
13128 | ram_size); |
13129 | if (ram_ptr) { |
13130 | ram_val = readl_raw(ram_ptr + ram_size - 16)(*(volatile unsigned int *) (ram_ptr + ram_size - 16)); |
13131 | unmap_pci_mem(ram_ptr, ram_size); |
13132 | if (ram_val == 0x52414944) { |
13133 | printk(NAME53C8XX"sym53c8xx"": not initializing, " |
13134 | "driven by SISL RAID controller.\n"); |
13135 | return -1; |
13136 | } |
13137 | } |
13138 | } |
13139 | #endif /* i386 and PCI MEMORY accessible */ |
13140 | |
13141 | if (!chip) { |
13142 | printk(NAME53C8XX"sym53c8xx" ": not initializing, device not supported\n"); |
13143 | return -1; |
13144 | } |
13145 | |
13146 | #ifdef __powerpc__ |
13147 | /* |
13148 | ** Fix-up for power/pc. |
13149 | ** Should not be performed by the driver. |
13150 | */ |
13151 | if ((command & (PCI_COMMAND_IO0x1 | PCI_COMMAND_MEMORY0x2)) |
13152 | != (PCI_COMMAND_IO0x1 | PCI_COMMAND_MEMORY0x2)) { |
13153 | printk(NAME53C8XX"sym53c8xx" ": setting%s%s...\n", |
13154 | (command & PCI_COMMAND_IO0x1) ? "" : " PCI_COMMAND_IO", |
13155 | (command & PCI_COMMAND_MEMORY0x2) ? "" : " PCI_COMMAND_MEMORY"); |
13156 | command |= (PCI_COMMAND_IO0x1 | PCI_COMMAND_MEMORY0x2); |
13157 | pci_write_config_word(pdev, PCI_COMMAND, command)pcibios_write_config_word(((pdev)>>8), ((pdev)&0xff ), 0x04, command); |
13158 | } |
13159 | |
13160 | #if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,2,0)(((2)<<16)+((2)<<8)+(0)) |
13161 | if ( is_prep ) { |
13162 | if (io_port >= 0x10000000) { |
13163 | printk(NAME53C8XX"sym53c8xx" ": reallocating io_port (Wacky IBM)"); |
13164 | io_port = (io_port & 0x00FFFFFF) | 0x01000000; |
13165 | pci_write_config_dword(pdev,pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x10, io_port) |
13166 | PCI_BASE_ADDRESS_0, io_port)pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x10, io_port); |
13167 | } |
13168 | if (base >= 0x10000000) { |
13169 | printk(NAME53C8XX"sym53c8xx" ": reallocating base (Wacky IBM)"); |
13170 | base = (base & 0x00FFFFFF) | 0x01000000; |
13171 | pci_write_config_dword(pdev,pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x14, base) |
13172 | PCI_BASE_ADDRESS_1, base)pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x14, base); |
13173 | } |
13174 | if (base_2 >= 0x10000000) { |
13175 | printk(NAME53C8XX"sym53c8xx" ": reallocating base2 (Wacky IBM)"); |
13176 | base_2 = (base_2 & 0x00FFFFFF) | 0x01000000; |
13177 | pci_write_config_dword(pdev,pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x18, base_2) |
13178 | PCI_BASE_ADDRESS_2, base_2)pcibios_write_config_dword(((pdev)>>8), ((pdev)&0xff ), 0x18, base_2); |
13179 | } |
13180 | } |
13181 | #endif |
13182 | #endif /* __powerpc__ */ |
13183 | |
13184 | #if defined(__sparc__) && (LINUX_VERSION_CODE131108 < LinuxVersionCode(2,3,0)(((2)<<16)+((3)<<8)+(0))) |
13185 | /* |
13186 | ** Fix-ups for sparc. |
13187 | ** |
13188 | ** I wrote: Should not be performed by the driver, |
13189 | ** Guy wrote: but how can OBP know each and every PCI card, |
13190 | ** if they don't use Fcode? |
13191 | ** I replied: no need to know each and every PCI card, just |
13192 | ** be skilled enough to understand the PCI specs. |
13193 | */ |
13194 | |
13195 | /* |
13196 | ** PCI configuration is based on configuration registers being |
13197 | ** coherent with hardware and software resource identifications. |
13198 | ** This is fairly simple, but seems still too complex for Sparc. |
13199 | */ |
13200 | base = __pa(base); |
13201 | base_2 = __pa(base_2); |
13202 | |
13203 | if (!cache_line_size) |
13204 | suggested_cache_line_size = 16; |
13205 | |
13206 | driver_setup.pci_fix_up |= 0x7; |
13207 | |
13208 | #endif /* __sparc__ */ |
13209 | |
13210 | #if defined(__i386__1) && !defined(MODULE) |
13211 | if (!cache_line_size) { |
13212 | #if LINUX_VERSION_CODE131108 < LinuxVersionCode(2,1,75)(((2)<<16)+((1)<<8)+(75)) |
13213 | extern char x86; |
13214 | switch(x86) { |
13215 | #else |
13216 | switch(boot_cpu_data.x86) { |
13217 | #endif |
13218 | case 4: suggested_cache_line_size = 4; break; |
13219 | case 6: |
13220 | case 5: suggested_cache_line_size = 8; break; |
13221 | } |
13222 | } |
13223 | #endif /* __i386__ */ |
13224 | |
13225 | /* |
13226 | ** Check availability of IO space, memory space. |
13227 | ** Enable master capability if not yet. |
13228 | ** |
13229 | ** We shouldn't have to care about the IO region when |
13230 | ** we are using MMIO. But calling check_region() from |
13231 | ** both the ncr53c8xx and the sym53c8xx drivers prevents |
13232 | ** from attaching devices from the both drivers. |
13233 | ** If you have a better idea, let me know. |
13234 | */ |
13235 | /* #ifdef SCSI_NCR_IOMAPPED */ |
13236 | #if 1 |
13237 | if (!(command & PCI_COMMAND_IO0x1)) { |
13238 | printk(NAME53C8XX"sym53c8xx" ": I/O base address (0x%lx) disabled.\n", |
13239 | (long) io_port); |
13240 | io_port = 0; |
13241 | } |
13242 | #endif |
13243 | if (!(command & PCI_COMMAND_MEMORY0x2)) { |
13244 | printk(NAME53C8XX"sym53c8xx" ": PCI_COMMAND_MEMORY not set.\n"); |
13245 | base = 0; |
13246 | base_2 = 0; |
13247 | } |
13248 | io_port &= PCI_BASE_ADDRESS_IO_MASK(~0x03); |
13249 | base &= PCI_BASE_ADDRESS_MEM_MASK(~0x0f); |
13250 | base_2 &= PCI_BASE_ADDRESS_MEM_MASK(~0x0f); |
13251 | |
13252 | /* #ifdef SCSI_NCR_IOMAPPED */ |
13253 | #if 1 |
13254 | if (io_port && check_region (io_port, 128)) { |
13255 | printk(NAME53C8XX"sym53c8xx" ": IO region 0x%lx[0..127] is in use\n", |
13256 | (long) io_port); |
13257 | io_port = 0; |
13258 | } |
13259 | if (!io_port) |
13260 | return -1; |
13261 | #endif |
13262 | #ifndef SCSI_NCR_IOMAPPED |
13263 | if (!base) { |
13264 | printk(NAME53C8XX"sym53c8xx" ": MMIO base address disabled.\n"); |
13265 | return -1; |
13266 | } |
13267 | #endif |
13268 | |
13269 | /* |
13270 | ** Set MASTER capable and PARITY bit, if not yet. |
13271 | */ |
13272 | if ((command & (PCI_COMMAND_MASTER0x4 | PCI_COMMAND_PARITY0x40)) |
13273 | != (PCI_COMMAND_MASTER0x4 | PCI_COMMAND_PARITY0x40)) { |
13274 | printk(NAME53C8XX"sym53c8xx" ": setting%s%s...(fix-up)\n", |
13275 | (command & PCI_COMMAND_MASTER0x4) ? "" : " PCI_COMMAND_MASTER", |
13276 | (command & PCI_COMMAND_PARITY0x40) ? "" : " PCI_COMMAND_PARITY"); |
13277 | command |= (PCI_COMMAND_MASTER0x4 | PCI_COMMAND_PARITY0x40); |
13278 | pci_write_config_word(pdev, PCI_COMMAND, command)pcibios_write_config_word(((pdev)>>8), ((pdev)&0xff ), 0x04, command); |
13279 | } |
13280 | |
13281 | /* |
13282 | ** Fix some features according to driver setup. |
13283 | */ |
13284 | if (!(driver_setup.special_features & 1)) |
13285 | chip->features &= ~FE_SPECIAL_SET(((1<<6)|(1<<7)|(1<<8)|(1<<9))|(1<< 10)|(1<<11)|(1<<13)|(1<<12)|(1<<14)); |
13286 | else { |
13287 | if (driver_setup.special_features & 2) |
13288 | chip->features &= ~FE_WRIE(1<<8); |
13289 | if (driver_setup.special_features & 4) |
13290 | chip->features &= ~FE_NOPM(1<<19); |
13291 | } |
13292 | |
13293 | /* |
13294 | ** Work around for errant bit in 895A. The 66Mhz |
13295 | ** capable bit is set erroneously. Clear this bit. |
13296 | ** (Item 1 DEL 533) |
13297 | ** |
13298 | ** Make sure Config space and Features agree. |
13299 | ** |
13300 | ** Recall: writes are not normal to status register - |
13301 | ** write a 1 to clear and a 0 to leave unchanged. |
13302 | ** Can only reset bits. |
13303 | */ |
13304 | if (chip->features & FE_66MHZ(1<<23)) { |
13305 | if (!(status_reg & PCI_STATUS_66MHZ0x20)) |
13306 | chip->features &= ~FE_66MHZ(1<<23); |
13307 | } |
13308 | else { |
13309 | if (status_reg & PCI_STATUS_66MHZ0x20) { |
13310 | status_reg = PCI_STATUS_66MHZ0x20; |
13311 | pci_write_config_word(pdev, PCI_STATUS, status_reg)pcibios_write_config_word(((pdev)>>8), ((pdev)&0xff ), 0x06, status_reg); |
13312 | pci_read_config_word(pdev, PCI_STATUS, &status_reg)pcibios_read_config_word(((pdev)>>8), ((pdev)&0xff) , 0x06, &status_reg); |
13313 | } |
13314 | } |
13315 | |
13316 | if (driver_setup.ultra_scsi < 3 && (chip->features & FE_ULTRA3(1<<22))) { |
13317 | chip->features |= FE_ULTRA2(1<<3); |
13318 | chip->features &= ~FE_ULTRA3(1<<22); |
13319 | } |
13320 | if (driver_setup.ultra_scsi < 2 && (chip->features & FE_ULTRA2(1<<3))) { |
13321 | chip->features |= FE_ULTRA(1<<2); |
13322 | chip->features &= ~FE_ULTRA2(1<<3); |
13323 | } |
13324 | if (driver_setup.ultra_scsi < 1) |
13325 | chip->features &= ~FE_ULTRA(1<<2); |
13326 | |
13327 | if (!driver_setup.max_wide) |
13328 | chip->features &= ~FE_WIDE(1<<1); |
13329 | |
13330 | /* |
13331 | * C1010 Ultra3 support requires 16 bit data transfers. |
13332 | */ |
13333 | if (!driver_setup.max_wide && (chip->features & FE_ULTRA3(1<<22))) { |
13334 | chip->features |= FE_ULTRA2(1<<3); |
13335 | chip->features |= ~FE_ULTRA3(1<<22); |
13336 | } |
13337 | |
13338 | /* |
13339 | ** Some features are required to be enabled in order to |
13340 | ** work around some chip problems. :) ;) |
13341 | ** (ITEM 12 of a DEL about the 896 I haven't yet). |
13342 | ** We must ensure the chip will use WRITE AND INVALIDATE. |
13343 | ** The revision number limit is for now arbitrary. |
13344 | */ |
13345 | if (device_id == PCI_DEVICE_ID_NCR_53C8960x000b && revision <= 0x10) { |
13346 | chip->features |= (FE_WRIE(1<<8) | FE_CLSE(1<<7)); |
13347 | pci_fix_up |= 3; /* Force appropriate PCI fix-up */ |
13348 | } |
13349 | |
13350 | #ifdef SCSI_NCR_PCI_FIX_UP_SUPPORT |
13351 | /* |
13352 | ** Try to fix up PCI config according to wished features. |
13353 | */ |
13354 | if ((pci_fix_up & 1) && (chip->features & FE_CLSE(1<<7)) && |
13355 | !cache_line_size && suggested_cache_line_size) { |
13356 | cache_line_size = suggested_cache_line_size; |
13357 | pci_write_config_byte(pdev,pcibios_write_config_byte(((pdev)>>8), ((pdev)&0xff ), 0x0c, cache_line_size) |
13358 | PCI_CACHE_LINE_SIZE, cache_line_size)pcibios_write_config_byte(((pdev)>>8), ((pdev)&0xff ), 0x0c, cache_line_size); |
13359 | printk(NAME53C8XX"sym53c8xx" ": PCI_CACHE_LINE_SIZE set to %d (fix-up).\n", |
13360 | cache_line_size); |
13361 | } |
13362 | |
13363 | if ((pci_fix_up & 2) && cache_line_size && |
13364 | (chip->features & FE_WRIE(1<<8)) && !(command & PCI_COMMAND_INVALIDATE0x10)) { |
13365 | printk(NAME53C8XX"sym53c8xx"": setting PCI_COMMAND_INVALIDATE (fix-up)\n"); |
13366 | command |= PCI_COMMAND_INVALIDATE0x10; |
13367 | pci_write_config_word(pdev, PCI_COMMAND, command)pcibios_write_config_word(((pdev)>>8), ((pdev)&0xff ), 0x04, command); |
13368 | } |
13369 | |
13370 | /* |
13371 | ** Tune PCI LATENCY TIMER according to burst max length transfer. |
13372 | ** (latency timer >= burst length + 6, we add 10 to be quite sure) |
13373 | */ |
13374 | |
13375 | if (chip->burst_max && (latency_timer == 0 || (pci_fix_up & 4))) { |
13376 | ucharunsigned char lt = (1 << chip->burst_max) + 6 + 10; |
13377 | if (latency_timer < lt) { |
13378 | printk(NAME53C8XX"sym53c8xx" |
13379 | ": changing PCI_LATENCY_TIMER from %d to %d.\n", |
13380 | (int) latency_timer, (int) lt); |
13381 | latency_timer = lt; |
13382 | pci_write_config_byte(pdev,pcibios_write_config_byte(((pdev)>>8), ((pdev)&0xff ), 0x0d, latency_timer) |
13383 | PCI_LATENCY_TIMER, latency_timer)pcibios_write_config_byte(((pdev)>>8), ((pdev)&0xff ), 0x0d, latency_timer); |
13384 | } |
13385 | } |
13386 | |
13387 | #endif /* SCSI_NCR_PCI_FIX_UP_SUPPORT */ |
13388 | |
13389 | /* |
13390 | ** Initialise ncr_device structure with items required by ncr_attach. |
13391 | */ |
13392 | device->pdev = pdev; |
13393 | device->slot.bus = PciBusNumber(pdev)((pdev)>>8); |
13394 | device->slot.device_fn = PciDeviceFn(pdev)((pdev)&0xff); |
13395 | device->slot.base = base; |
13396 | device->slot.base_2 = base_2; |
13397 | device->slot.io_port = io_port; |
13398 | device->slot.irq = irq; |
13399 | device->attach_done = 0; |
13400 | |
13401 | return 0; |
13402 | } |
13403 | |
13404 | |
13405 | /*=================================================================== |
13406 | ** Detect and try to read SYMBIOS and TEKRAM NVRAM. |
13407 | ** |
13408 | ** Data can be used to order booting of boards. |
13409 | ** |
13410 | ** Data is saved in ncr_device structure if NVRAM found. This |
13411 | ** is then used to find drive boot order for ncr_attach(). |
13412 | ** |
13413 | ** NVRAM data is passed to Scsi_Host_Template later during |
13414 | ** ncr_attach() for any device set up. |
13415 | *=================================================================== |
13416 | */ |
13417 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
13418 | static void __init ncr_get_nvram(ncr_device *devp, ncr_nvram *nvp) |
13419 | { |
13420 | devp->nvram = nvp; |
13421 | if (!nvp) |
13422 | return; |
13423 | /* |
13424 | ** Get access to chip IO registers |
13425 | */ |
13426 | #ifdef SCSI_NCR_IOMAPPED |
13427 | request_region(devp->slot.io_port, 128, NAME53C8XX"sym53c8xx"); |
13428 | devp->slot.base_io = devp->slot.io_port; |
13429 | #else |
13430 | devp->slot.reg = (struct ncr_reg *) remap_pci_mem(devp->slot.base, 128); |
13431 | if (!devp->slot.reg) |
13432 | return; |
13433 | #endif |
13434 | |
13435 | /* |
13436 | ** Try to read SYMBIOS nvram. |
13437 | ** Try to read TEKRAM nvram if Symbios nvram not found. |
13438 | */ |
13439 | if (!sym_read_Symbios_nvram(&devp->slot, &nvp->data.Symbios)) |
13440 | nvp->type = SCSI_NCR_SYMBIOS_NVRAM(1); |
13441 | else if (!sym_read_Tekram_nvram(&devp->slot, devp->chip.device_id, |
13442 | &nvp->data.Tekram)) |
13443 | nvp->type = SCSI_NCR_TEKRAM_NVRAM(2); |
13444 | else { |
13445 | nvp->type = 0; |
13446 | devp->nvram = 0; |
13447 | } |
13448 | |
13449 | /* |
13450 | ** Release access to chip IO registers |
13451 | */ |
13452 | #ifdef SCSI_NCR_IOMAPPED |
13453 | release_region(devp->slot.base_io, 128); |
13454 | #else |
13455 | unmap_pci_mem((u_longunsigned long) devp->slot.reg, 128ul); |
13456 | #endif |
13457 | |
13458 | } |
13459 | #endif /* SCSI_NCR_NVRAM_SUPPORT */ |
13460 | |
13461 | /* |
13462 | ** Linux select queue depths function |
13463 | */ |
13464 | |
13465 | #define DEF_DEPTH(driver_setup.default_tags) (driver_setup.default_tags) |
13466 | #define ALL_TARGETS-2 -2 |
13467 | #define NO_TARGET-1 -1 |
13468 | #define ALL_LUNS-2 -2 |
13469 | #define NO_LUN-1 -1 |
13470 | |
13471 | static int device_queue_depth(ncb_p np, int target, int lun) |
13472 | { |
13473 | int c, h, t, u, v; |
13474 | char *p = driver_setup.tag_ctrl; |
13475 | char *ep; |
13476 | |
13477 | h = -1; |
13478 | t = NO_TARGET-1; |
13479 | u = NO_LUN-1; |
13480 | while ((c = *p++) != 0) { |
13481 | v = simple_strtoul(p, &ep, 0); |
13482 | switch(c) { |
13483 | case '/': |
13484 | ++h; |
13485 | t = ALL_TARGETS-2; |
13486 | u = ALL_LUNS-2; |
13487 | break; |
13488 | case 't': |
13489 | if (t != target) |
13490 | t = (target == v) ? v : NO_TARGET-1; |
13491 | u = ALL_LUNS-2; |
13492 | break; |
13493 | case 'u': |
13494 | if (u != lun) |
13495 | u = (lun == v) ? v : NO_LUN-1; |
13496 | break; |
13497 | case 'q': |
13498 | if (h == np->unit && |
13499 | (t == ALL_TARGETS-2 || t == target) && |
13500 | (u == ALL_LUNS-2 || u == lun)) |
13501 | return v; |
13502 | break; |
13503 | case '-': |
13504 | t = ALL_TARGETS-2; |
13505 | u = ALL_LUNS-2; |
13506 | break; |
13507 | default: |
13508 | break; |
13509 | } |
13510 | p = ep; |
13511 | } |
13512 | return DEF_DEPTH(driver_setup.default_tags); |
13513 | } |
13514 | |
13515 | static void sym53c8xx_select_queue_depths(struct Scsi_Host *host, struct scsi_device *devlist) |
13516 | { |
13517 | struct scsi_device *device; |
13518 | |
13519 | for (device = devlist; device; device = device->next) { |
13520 | ncb_p np; |
13521 | tcb_p tp; |
13522 | lcb_p lp; |
13523 | int numtags; |
13524 | |
13525 | if (device->host != host) |
13526 | continue; |
13527 | |
13528 | np = ((struct host_data *) host->hostdata)->ncb; |
13529 | tp = &np->target[device->id]; |
13530 | lp = ncr_lp(np, tp, device->lun)(!device->lun) ? (tp)->l0p : (tp)->lmp ? (tp)->lmp [(device->lun)] : 0; |
13531 | |
13532 | /* |
13533 | ** Select queue depth from driver setup. |
13534 | ** Donnot use more than configured by user. |
13535 | ** Use at least 2. |
13536 | ** Donnot use more than our maximum. |
13537 | */ |
13538 | numtags = device_queue_depth(np, device->id, device->lun); |
13539 | if (numtags > tp->usrtags) |
13540 | numtags = tp->usrtags; |
13541 | if (!device->tagged_supported) |
13542 | numtags = 1; |
13543 | device->queue_depth = numtags; |
13544 | if (device->queue_depth < 2) |
13545 | device->queue_depth = 2; |
13546 | if (device->queue_depth > MAX_TAGS(8)) |
13547 | device->queue_depth = MAX_TAGS(8); |
13548 | |
13549 | /* |
13550 | ** Since the queue depth is not tunable under Linux, |
13551 | ** we need to know this value in order not to |
13552 | ** announce stupid things to user. |
13553 | */ |
13554 | if (lp) { |
13555 | lp->numtags = lp->maxtags = numtags; |
13556 | lp->scdev_depth = device->queue_depth; |
13557 | } |
13558 | ncr_setup_tags (np, device->id, device->lun); |
13559 | |
13560 | #ifdef DEBUG_SYM53C8XX |
13561 | printk("sym53c8xx_select_queue_depth: host=%d, id=%d, lun=%d, depth=%d\n", |
13562 | np->unit, device->id, device->lun, device->queue_depth); |
13563 | #endif |
13564 | } |
13565 | } |
13566 | |
13567 | /* |
13568 | ** Linux entry point for info() function |
13569 | */ |
13570 | const char *sym53c8xx_info (struct Scsi_Host *host) |
13571 | { |
13572 | return SCSI_NCR_DRIVER_NAME"sym53c8xx-1.7.1-20000726"; |
13573 | } |
13574 | |
13575 | /* |
13576 | ** Linux entry point of queuecommand() function |
13577 | */ |
13578 | |
13579 | int sym53c8xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) |
13580 | { |
13581 | ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb; |
13582 | unsigned long flags; |
13583 | int sts; |
13584 | |
13585 | #ifdef DEBUG_SYM53C8XX |
13586 | printk("sym53c8xx_queue_command\n"); |
13587 | #endif |
13588 | |
13589 | cmd->scsi_done = done; |
13590 | cmd->host_scribble = NULL((void *) 0); |
13591 | cmd->SCp.ptr = NULL((void *) 0); |
13592 | cmd->SCp.buffer = NULL((void *) 0); |
13593 | #ifdef SCSI_NCR_DYNAMIC_DMA_MAPPING |
13594 | cmd->__data_mapped = 0; |
13595 | cmd->__data_mapping = 0; |
13596 | #endif |
13597 | |
13598 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
13599 | |
13600 | if ((sts = ncr_queue_command(np, cmd)) != DID_OK0x00) { |
13601 | SetScsiResult(cmd, sts, 0)cmd->result = (((sts) << 16) + ((0) & 0x7f)); |
13602 | #ifdef DEBUG_SYM53C8XX |
13603 | printk("sym53c8xx : command not queued - result=%d\n", sts); |
13604 | #endif |
13605 | } |
13606 | #ifdef DEBUG_SYM53C8XX |
13607 | else |
13608 | printk("sym53c8xx : command successfully queued\n"); |
13609 | #endif |
13610 | |
13611 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
13612 | |
13613 | if (sts != DID_OK0x00) { |
13614 | unmap_scsi_data(np, cmd)do {; } while (0); |
13615 | done(cmd); |
13616 | } |
13617 | |
13618 | return sts; |
13619 | } |
13620 | |
13621 | /* |
13622 | ** Linux entry point of the interrupt handler. |
13623 | ** Since linux versions > 1.3.70, we trust the kernel for |
13624 | ** passing the internal host descriptor as 'dev_id'. |
13625 | ** Otherwise, we scan the host list and call the interrupt |
13626 | ** routine for each host that uses this IRQ. |
13627 | */ |
13628 | |
13629 | static void sym53c8xx_intr(int irq, void *dev_id, struct pt_regs * regs) |
13630 | { |
13631 | unsigned long flags; |
13632 | ncb_p np = (ncb_p) dev_id; |
13633 | Scsi_Cmnd *done_list; |
13634 | |
13635 | #ifdef DEBUG_SYM53C8XX |
13636 | printk("sym53c8xx : interrupt received\n"); |
13637 | #endif |
13638 | |
13639 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("["); |
13640 | |
13641 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
13642 | ncr_exception(np); |
13643 | done_list = np->done_list; |
13644 | np->done_list = 0; |
13645 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
13646 | |
13647 | if (DEBUG_FLAGSncr_debug & DEBUG_TINY(0x0080)) printk ("]\n"); |
13648 | |
13649 | if (done_list) { |
13650 | NCR_LOCK_SCSI_DONE(np, flags)do {;} while (0); |
13651 | ncr_flush_done_cmds(done_list); |
13652 | NCR_UNLOCK_SCSI_DONE(np, flags)do {;} while (0); |
13653 | } |
13654 | } |
13655 | |
13656 | /* |
13657 | ** Linux entry point of the timer handler |
13658 | */ |
13659 | |
13660 | static void sym53c8xx_timeout(unsigned long npref) |
13661 | { |
13662 | ncb_p np = (ncb_p) npref; |
13663 | unsigned long flags; |
13664 | Scsi_Cmnd *done_list; |
13665 | |
13666 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
13667 | ncr_timeout((ncb_p) np); |
13668 | done_list = np->done_list; |
13669 | np->done_list = 0; |
13670 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
13671 | |
13672 | if (done_list) { |
13673 | NCR_LOCK_SCSI_DONE(np, flags)do {;} while (0); |
13674 | ncr_flush_done_cmds(done_list); |
13675 | NCR_UNLOCK_SCSI_DONE(np, flags)do {;} while (0); |
13676 | } |
13677 | } |
13678 | |
13679 | /* |
13680 | ** Linux entry point of reset() function |
13681 | */ |
13682 | |
13683 | #if defined SCSI_RESET_SYNCHRONOUS0x01 && defined SCSI_RESET_ASYNCHRONOUS0x02 |
13684 | int sym53c8xx_reset(Scsi_Cmnd *cmd, unsigned int reset_flags) |
13685 | #else |
13686 | int sym53c8xx_reset(Scsi_Cmnd *cmd) |
13687 | #endif |
13688 | { |
13689 | ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb; |
13690 | int sts; |
13691 | unsigned long flags; |
13692 | Scsi_Cmnd *done_list; |
13693 | |
13694 | #if defined SCSI_RESET_SYNCHRONOUS0x01 && defined SCSI_RESET_ASYNCHRONOUS0x02 |
13695 | printk("sym53c8xx_reset: pid=%lu reset_flags=%x serial_number=%ld serial_number_at_timeout=%ld\n", |
13696 | cmd->pid, reset_flags, cmd->serial_number, cmd->serial_number_at_timeout); |
13697 | #else |
13698 | printk("sym53c8xx_reset: command pid %lu\n", cmd->pid); |
13699 | #endif |
13700 | |
13701 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
13702 | |
13703 | /* |
13704 | * We have to just ignore reset requests in some situations. |
13705 | */ |
13706 | #if defined SCSI_RESET_NOT_RUNNING5 |
13707 | if (cmd->serial_number != cmd->serial_number_at_timeout) { |
13708 | sts = SCSI_RESET_NOT_RUNNING5; |
13709 | goto out; |
13710 | } |
13711 | #endif |
13712 | /* |
13713 | * If the mid-level driver told us reset is synchronous, it seems |
13714 | * that we must call the done() callback for the involved command, |
13715 | * even if this command was not queued to the low-level driver, |
13716 | * before returning SCSI_RESET_SUCCESS. |
13717 | */ |
13718 | |
13719 | #if defined SCSI_RESET_SYNCHRONOUS0x01 && defined SCSI_RESET_ASYNCHRONOUS0x02 |
13720 | sts = ncr_reset_bus(np, cmd, |
13721 | (reset_flags & (SCSI_RESET_SYNCHRONOUS0x01 | SCSI_RESET_ASYNCHRONOUS0x02)) == SCSI_RESET_SYNCHRONOUS0x01); |
13722 | #else |
13723 | sts = ncr_reset_bus(np, cmd, 0); |
13724 | #endif |
13725 | |
13726 | /* |
13727 | * Since we always reset the controller, when we return success, |
13728 | * we add this information to the return code. |
13729 | */ |
13730 | #if defined SCSI_RESET_HOST_RESET0x200 |
13731 | if (sts == SCSI_RESET_SUCCESS2) |
13732 | sts |= SCSI_RESET_HOST_RESET0x200; |
13733 | #endif |
13734 | |
13735 | out: |
13736 | done_list = np->done_list; |
13737 | np->done_list = 0; |
13738 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
13739 | |
13740 | ncr_flush_done_cmds(done_list); |
13741 | |
13742 | return sts; |
13743 | } |
13744 | |
13745 | /* |
13746 | ** Linux entry point of abort() function |
13747 | */ |
13748 | |
13749 | int sym53c8xx_abort(Scsi_Cmnd *cmd) |
13750 | { |
13751 | ncb_p np = ((struct host_data *) cmd->host->hostdata)->ncb; |
13752 | int sts; |
13753 | unsigned long flags; |
13754 | Scsi_Cmnd *done_list; |
13755 | |
13756 | #if defined SCSI_RESET_SYNCHRONOUS0x01 && defined SCSI_RESET_ASYNCHRONOUS0x02 |
13757 | printk("sym53c8xx_abort: pid=%lu serial_number=%ld serial_number_at_timeout=%ld\n", |
13758 | cmd->pid, cmd->serial_number, cmd->serial_number_at_timeout); |
13759 | #else |
13760 | printk("sym53c8xx_abort: command pid %lu\n", cmd->pid); |
13761 | #endif |
13762 | |
13763 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
13764 | |
13765 | #if defined SCSI_RESET_SYNCHRONOUS0x01 && defined SCSI_RESET_ASYNCHRONOUS0x02 |
13766 | /* |
13767 | * We have to just ignore abort requests in some situations. |
13768 | */ |
13769 | if (cmd->serial_number != cmd->serial_number_at_timeout) { |
13770 | sts = SCSI_ABORT_NOT_RUNNING4; |
13771 | goto out; |
13772 | } |
13773 | #endif |
13774 | |
13775 | sts = ncr_abort_command(np, cmd); |
13776 | out: |
13777 | done_list = np->done_list; |
13778 | np->done_list = 0; |
13779 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
13780 | |
13781 | ncr_flush_done_cmds(done_list); |
13782 | |
13783 | return sts; |
13784 | } |
13785 | |
13786 | |
13787 | #ifdef MODULE |
13788 | int sym53c8xx_release(struct Scsi_Host *host) |
13789 | { |
13790 | #ifdef DEBUG_SYM53C8XX |
13791 | printk("sym53c8xx : release\n"); |
13792 | #endif |
13793 | ncr_detach(((struct host_data *) host->hostdata)->ncb); |
13794 | |
13795 | return 1; |
13796 | } |
13797 | #endif |
13798 | |
13799 | |
13800 | /* |
13801 | ** Scsi command waiting list management. |
13802 | ** |
13803 | ** It may happen that we cannot insert a scsi command into the start queue, |
13804 | ** in the following circumstances. |
13805 | ** Too few preallocated ccb(s), |
13806 | ** maxtags < cmd_per_lun of the Linux host control block, |
13807 | ** etc... |
13808 | ** Such scsi commands are inserted into a waiting list. |
13809 | ** When a scsi command complete, we try to requeue the commands of the |
13810 | ** waiting list. |
13811 | */ |
13812 | |
13813 | #define next_wcmd host_scribble |
13814 | |
13815 | static void insert_into_waiting_list(ncb_p np, Scsi_Cmnd *cmd) |
13816 | { |
13817 | Scsi_Cmnd *wcmd; |
13818 | |
13819 | #ifdef DEBUG_WAITING_LIST |
13820 | printk("%s: cmd %lx inserted into waiting list\n", ncr_name(np), (u_longunsigned long) cmd); |
13821 | #endif |
13822 | cmd->next_wcmd = 0; |
13823 | if (!(wcmd = np->waiting_list)) np->waiting_list = cmd; |
13824 | else { |
13825 | while ((wcmd->next_wcmd) != 0) |
13826 | wcmd = (Scsi_Cmnd *) wcmd->next_wcmd; |
13827 | wcmd->next_wcmd = (char *) cmd; |
13828 | } |
13829 | } |
13830 | |
13831 | static Scsi_Cmnd *retrieve_from_waiting_list(int to_remove, ncb_p np, Scsi_Cmnd *cmd) |
13832 | { |
13833 | Scsi_Cmnd **pcmd = &np->waiting_list; |
13834 | |
13835 | while (*pcmd) { |
13836 | if (cmd == *pcmd) { |
13837 | if (to_remove) { |
13838 | *pcmd = (Scsi_Cmnd *) cmd->next_wcmd; |
13839 | cmd->next_wcmd = 0; |
13840 | } |
13841 | #ifdef DEBUG_WAITING_LIST |
13842 | printk("%s: cmd %lx retrieved from waiting list\n", ncr_name(np), (u_longunsigned long) cmd); |
13843 | #endif |
13844 | return cmd; |
13845 | } |
13846 | pcmd = (Scsi_Cmnd **) &(*pcmd)->next_wcmd; |
13847 | } |
13848 | return 0; |
13849 | } |
13850 | |
13851 | static void process_waiting_list(ncb_p np, int sts) |
13852 | { |
13853 | Scsi_Cmnd *waiting_list, *wcmd; |
13854 | |
13855 | waiting_list = np->waiting_list; |
13856 | np->waiting_list = 0; |
13857 | |
13858 | #ifdef DEBUG_WAITING_LIST |
13859 | if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_longunsigned long) waiting_list, sts); |
13860 | #endif |
13861 | while ((wcmd = waiting_list) != 0) { |
13862 | waiting_list = (Scsi_Cmnd *) wcmd->next_wcmd; |
13863 | wcmd->next_wcmd = 0; |
13864 | if (sts == DID_OK0x00) { |
13865 | #ifdef DEBUG_WAITING_LIST |
13866 | printk("%s: cmd %lx trying to requeue\n", ncr_name(np), (u_longunsigned long) wcmd); |
13867 | #endif |
13868 | sts = ncr_queue_command(np, wcmd); |
13869 | } |
13870 | if (sts != DID_OK0x00) { |
13871 | #ifdef DEBUG_WAITING_LIST |
13872 | printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_longunsigned long) wcmd, sts); |
13873 | #endif |
13874 | SetScsiResult(wcmd, sts, 0)wcmd->result = (((sts) << 16) + ((0) & 0x7f)); |
13875 | ncr_queue_done_cmd(np, wcmd); |
13876 | } |
13877 | } |
13878 | } |
13879 | |
13880 | #undef next_wcmd |
13881 | |
13882 | #ifdef SCSI_NCR_PROC_INFO_SUPPORT |
13883 | |
13884 | /*========================================================================= |
13885 | ** Proc file system stuff |
13886 | ** |
13887 | ** A read operation returns adapter information. |
13888 | ** A write operation is a control command. |
13889 | ** The string is parsed in the driver code and the command is passed |
13890 | ** to the ncr_usercmd() function. |
13891 | **========================================================================= |
13892 | */ |
13893 | |
13894 | #ifdef SCSI_NCR_USER_COMMAND_SUPPORT |
13895 | |
13896 | #define is_digit(c) ((c) >= '0' && (c) <= '9') |
13897 | #define digit_to_bin(c) ((c) - '0') |
13898 | #define is_space(c) ((c) == ' ' || (c) == '\t') |
13899 | |
13900 | static int skip_spaces(char *ptr, int len) |
13901 | { |
13902 | int cnt, c; |
13903 | |
13904 | for (cnt = len; cnt > 0 && (c = *ptr++) && is_space(c); cnt--); |
13905 | |
13906 | return (len - cnt); |
13907 | } |
13908 | |
13909 | static int get_int_arg(char *ptr, int len, u_longunsigned long *pv) |
13910 | { |
13911 | int cnt, c; |
13912 | u_longunsigned long v; |
13913 | |
13914 | for (v = 0, cnt = len; cnt > 0 && (c = *ptr++) && is_digit(c); cnt--) { |
13915 | v = (v * 10) + digit_to_bin(c); |
13916 | } |
13917 | |
13918 | if (pv) |
13919 | *pv = v; |
13920 | |
13921 | return (len - cnt); |
13922 | } |
13923 | |
13924 | static int is_keyword(char *ptr, int len, char *verb) |
13925 | { |
13926 | int verb_len = strlen(verb); |
13927 | |
13928 | if (len >= strlen(verb) && !memcmp__builtin_memcmp(verb, ptr, verb_len)) |
13929 | return verb_len; |
13930 | else |
13931 | return 0; |
13932 | |
13933 | } |
13934 | |
13935 | #define SKIP_SPACES(min_spaces) \ |
13936 | if ((arg_len = skip_spaces(ptr, len)) < (min_spaces)) \ |
13937 | return -EINVAL22; \ |
13938 | ptr += arg_len; len -= arg_len; |
13939 | |
13940 | #define GET_INT_ARG(v) \ |
13941 | if (!(arg_len = get_int_arg(ptr, len, &(v)))) \ |
13942 | return -EINVAL22; \ |
13943 | ptr += arg_len; len -= arg_len; |
13944 | |
13945 | |
13946 | /* |
13947 | ** Parse a control command |
13948 | */ |
13949 | |
13950 | static int ncr_user_command(ncb_p np, char *buffer, int length) |
13951 | { |
13952 | char *ptr = buffer; |
13953 | int len = length; |
13954 | struct usrcmd *uc = &np->user; |
13955 | int arg_len; |
13956 | u_longunsigned long target; |
13957 | |
13958 | bzero(uc, sizeof(*uc))(__builtin_constant_p(0) ? (__builtin_constant_p(((sizeof(*uc )))) ? __constant_c_and_count_memset((((uc))),((0x01010101UL* (unsigned char)(0))),(((sizeof(*uc))))) : __constant_c_memset ((((uc))),((0x01010101UL*(unsigned char)(0))),(((sizeof(*uc)) )))) : (__builtin_constant_p(((sizeof(*uc)))) ? __memset_generic (((((uc)))),(((0))),((((sizeof(*uc)))))) : __memset_generic(( ((uc))),((0)),(((sizeof(*uc))))))); |
13959 | |
13960 | if (len > 0 && ptr[len-1] == '\n') |
13961 | --len; |
13962 | |
13963 | if ((arg_len = is_keyword(ptr, len, "setsync")) != 0) |
13964 | uc->cmd = UC_SETSYNC10; |
13965 | else if ((arg_len = is_keyword(ptr, len, "settags")) != 0) |
13966 | uc->cmd = UC_SETTAGS11; |
13967 | else if ((arg_len = is_keyword(ptr, len, "setorder")) != 0) |
13968 | uc->cmd = UC_SETORDER13; |
13969 | else if ((arg_len = is_keyword(ptr, len, "setverbose")) != 0) |
13970 | uc->cmd = UC_SETVERBOSE17; |
13971 | else if ((arg_len = is_keyword(ptr, len, "setwide")) != 0) |
13972 | uc->cmd = UC_SETWIDE14; |
13973 | else if ((arg_len = is_keyword(ptr, len, "setdebug")) != 0) |
13974 | uc->cmd = UC_SETDEBUG12; |
13975 | else if ((arg_len = is_keyword(ptr, len, "setflag")) != 0) |
13976 | uc->cmd = UC_SETFLAG15; |
13977 | else if ((arg_len = is_keyword(ptr, len, "resetdev")) != 0) |
13978 | uc->cmd = UC_RESETDEV18; |
13979 | else if ((arg_len = is_keyword(ptr, len, "cleardev")) != 0) |
13980 | uc->cmd = UC_CLEARDEV19; |
13981 | else |
13982 | arg_len = 0; |
13983 | |
13984 | #ifdef DEBUG_PROC_INFO |
13985 | printk("ncr_user_command: arg_len=%d, cmd=%ld\n", arg_len, uc->cmd); |
13986 | #endif |
13987 | |
13988 | if (!arg_len) |
13989 | return -EINVAL22; |
13990 | ptr += arg_len; len -= arg_len; |
13991 | |
13992 | switch(uc->cmd) { |
13993 | case UC_SETSYNC10: |
13994 | case UC_SETTAGS11: |
13995 | case UC_SETWIDE14: |
13996 | case UC_SETFLAG15: |
13997 | case UC_RESETDEV18: |
13998 | case UC_CLEARDEV19: |
13999 | SKIP_SPACES(1); |
14000 | if ((arg_len = is_keyword(ptr, len, "all")) != 0) { |
14001 | ptr += arg_len; len -= arg_len; |
14002 | uc->target = ~0; |
14003 | } else { |
14004 | GET_INT_ARG(target); |
14005 | uc->target = (1<<target); |
14006 | #ifdef DEBUG_PROC_INFO |
14007 | printk("ncr_user_command: target=%ld\n", target); |
14008 | #endif |
14009 | } |
14010 | break; |
14011 | } |
14012 | |
14013 | switch(uc->cmd) { |
14014 | case UC_SETVERBOSE17: |
14015 | case UC_SETSYNC10: |
14016 | case UC_SETTAGS11: |
14017 | case UC_SETWIDE14: |
14018 | SKIP_SPACES(1); |
14019 | GET_INT_ARG(uc->data); |
14020 | #ifdef DEBUG_PROC_INFO |
14021 | printk("ncr_user_command: data=%ld\n", uc->data); |
14022 | #endif |
14023 | break; |
14024 | case UC_SETORDER13: |
14025 | SKIP_SPACES(1); |
14026 | if ((arg_len = is_keyword(ptr, len, "simple"))) |
14027 | uc->data = M_SIMPLE_TAG(0x20); |
14028 | else if ((arg_len = is_keyword(ptr, len, "ordered"))) |
14029 | uc->data = M_ORDERED_TAG(0x22); |
14030 | else if ((arg_len = is_keyword(ptr, len, "default"))) |
14031 | uc->data = 0; |
14032 | else |
14033 | return -EINVAL22; |
14034 | break; |
14035 | case UC_SETDEBUG12: |
14036 | while (len > 0) { |
14037 | SKIP_SPACES(1); |
14038 | if ((arg_len = is_keyword(ptr, len, "alloc"))) |
14039 | uc->data |= DEBUG_ALLOC(0x0001); |
14040 | else if ((arg_len = is_keyword(ptr, len, "phase"))) |
14041 | uc->data |= DEBUG_PHASE(0x0002); |
14042 | else if ((arg_len = is_keyword(ptr, len, "queue"))) |
14043 | uc->data |= DEBUG_QUEUE(0x0008); |
14044 | else if ((arg_len = is_keyword(ptr, len, "result"))) |
14045 | uc->data |= DEBUG_RESULT(0x0010); |
14046 | else if ((arg_len = is_keyword(ptr, len, "pointer"))) |
14047 | uc->data |= DEBUG_POINTER(0x0020); |
14048 | else if ((arg_len = is_keyword(ptr, len, "script"))) |
14049 | uc->data |= DEBUG_SCRIPT(0x0040); |
14050 | else if ((arg_len = is_keyword(ptr, len, "tiny"))) |
14051 | uc->data |= DEBUG_TINY(0x0080); |
14052 | else if ((arg_len = is_keyword(ptr, len, "timing"))) |
14053 | uc->data |= DEBUG_TIMING(0x0100); |
14054 | else if ((arg_len = is_keyword(ptr, len, "nego"))) |
14055 | uc->data |= DEBUG_NEGO(0x0200); |
14056 | else if ((arg_len = is_keyword(ptr, len, "tags"))) |
14057 | uc->data |= DEBUG_TAGS(0x0400); |
14058 | else |
14059 | return -EINVAL22; |
14060 | ptr += arg_len; len -= arg_len; |
14061 | } |
14062 | #ifdef DEBUG_PROC_INFO |
14063 | printk("ncr_user_command: data=%ld\n", uc->data); |
14064 | #endif |
14065 | break; |
14066 | case UC_SETFLAG15: |
14067 | while (len > 0) { |
14068 | SKIP_SPACES(1); |
14069 | if ((arg_len = is_keyword(ptr, len, "trace"))) |
14070 | uc->data |= UF_TRACE(0x01); |
14071 | else if ((arg_len = is_keyword(ptr, len, "no_disc"))) |
14072 | uc->data |= UF_NODISC(0x02); |
14073 | else |
14074 | return -EINVAL22; |
14075 | ptr += arg_len; len -= arg_len; |
14076 | } |
14077 | break; |
14078 | default: |
14079 | break; |
14080 | } |
14081 | |
14082 | if (len) |
14083 | return -EINVAL22; |
14084 | else { |
14085 | long flags; |
14086 | |
14087 | NCR_LOCK_NCB(np, flags)do { __asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); __asm__ __volatile__ ("cli": : :"memory"); } while (0); |
14088 | ncr_usercmd (np); |
14089 | NCR_UNLOCK_NCB(np, flags)do { __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); } while (0); |
14090 | } |
14091 | return length; |
14092 | } |
14093 | |
14094 | #endif /* SCSI_NCR_USER_COMMAND_SUPPORT */ |
14095 | |
14096 | #ifdef SCSI_NCR_USER_INFO_SUPPORT |
14097 | |
14098 | struct info_str |
14099 | { |
14100 | char *buffer; |
14101 | int length; |
14102 | int offset; |
14103 | int pos; |
14104 | }; |
14105 | |
14106 | static void copy_mem_info(struct info_str *info, char *data, int len) |
14107 | { |
14108 | if (info->pos + len > info->length) |
14109 | len = info->length - info->pos; |
14110 | |
14111 | if (info->pos + len < info->offset) { |
14112 | info->pos += len; |
14113 | return; |
14114 | } |
14115 | if (info->pos < info->offset) { |
14116 | data += (info->offset - info->pos); |
14117 | len -= (info->offset - info->pos); |
14118 | } |
14119 | |
14120 | if (len > 0) { |
14121 | memcpy(info->buffer + info->pos, data, len)(__builtin_constant_p(len) ? __constant_memcpy((info->buffer + info->pos),(data),(len)) : __memcpy((info->buffer + info ->pos),(data),(len))); |
14122 | info->pos += len; |
14123 | } |
14124 | } |
14125 | |
14126 | static int copy_info(struct info_str *info, char *fmt, ...) |
14127 | { |
14128 | va_list args; |
14129 | char buf[81]; |
14130 | int len; |
14131 | |
14132 | va_start(args, fmt)__builtin_va_start(args,fmt); |
14133 | len = vsprintflinux_vsprintf(buf, fmt, args); |
14134 | va_end(args)__builtin_va_end(args); |
14135 | |
14136 | copy_mem_info(info, buf, len); |
14137 | return len; |
14138 | } |
14139 | |
14140 | /* |
14141 | ** Copy formatted information into the input buffer. |
14142 | */ |
14143 | |
14144 | static int ncr_host_info(ncb_p np, char *ptr, off_t offset, int len) |
14145 | { |
14146 | struct info_str info; |
14147 | #ifdef CONFIG_ALL_PPC |
14148 | struct device_node* of_node; |
14149 | #endif |
14150 | |
14151 | info.buffer = ptr; |
14152 | info.length = len; |
14153 | info.offset = offset; |
14154 | info.pos = 0; |
14155 | |
14156 | copy_info(&info, "General information:\n"); |
14157 | copy_info(&info, " Chip " NAME53C"sym53c" "%s, device id 0x%x, " |
14158 | "revision id 0x%x\n", |
14159 | np->chip_name, np->device_id, np->revision_id); |
14160 | copy_info(&info, " On PCI bus %d, device %d, function %d, " |
14161 | #ifdef __sparc__ |
14162 | "IRQ %s\n", |
14163 | #else |
14164 | "IRQ %d\n", |
14165 | #endif |
14166 | np->bus, (np->device_fn & 0xf8) >> 3, np->device_fn & 7, |
14167 | #ifdef __sparc__ |
14168 | __irq_itoa(np->irq)); |
14169 | #else |
14170 | (int) np->irq); |
14171 | #endif |
14172 | #ifdef CONFIG_ALL_PPC |
14173 | of_node = find_pci_device_OFnode(np->bus, np->device_fn); |
14174 | if (of_node && of_node->full_name) |
14175 | copy_info(&info, "PPC OpenFirmware path : %s\n", of_node->full_name); |
14176 | #endif |
14177 | copy_info(&info, " Synchronous period factor %d, " |
14178 | "max commands per lun %d\n", |
14179 | (int) np->minsync, MAX_TAGS(8)); |
14180 | |
14181 | if (driver_setup.debug || driver_setup.verbose > 1) { |
14182 | copy_info(&info, " Debug flags 0x%x, verbosity level %d\n", |
14183 | driver_setup.debug, driver_setup.verbose); |
14184 | } |
14185 | |
14186 | return info.pos > info.offset? info.pos - info.offset : 0; |
14187 | } |
14188 | |
14189 | #endif /* SCSI_NCR_USER_INFO_SUPPORT */ |
14190 | |
14191 | /* |
14192 | ** Entry point of the scsi proc fs of the driver. |
14193 | ** - func = 0 means read (returns adapter infos) |
14194 | ** - func = 1 means write (parse user control command) |
14195 | */ |
14196 | |
14197 | static int sym53c8xx_proc_info(char *buffer, char **start, off_t offset, |
14198 | int length, int hostno, int func) |
14199 | { |
14200 | struct Scsi_Host *host; |
14201 | struct host_data *host_data; |
14202 | ncb_p ncb = 0; |
14203 | int retv; |
14204 | |
14205 | #ifdef DEBUG_PROC_INFO |
14206 | printk("sym53c8xx_proc_info: hostno=%d, func=%d\n", hostno, func); |
14207 | #endif |
14208 | |
14209 | for (host = first_host; host; host = host->next) { |
14210 | if (host->hostt != first_host->hostt) |
14211 | continue; |
14212 | if (host->host_no == hostno) { |
14213 | host_data = (struct host_data *) host->hostdata; |
14214 | ncb = host_data->ncb; |
14215 | break; |
14216 | } |
14217 | } |
14218 | |
14219 | if (!ncb) |
14220 | return -EINVAL22; |
14221 | |
14222 | if (func) { |
14223 | #ifdef SCSI_NCR_USER_COMMAND_SUPPORT |
14224 | retv = ncr_user_command(ncb, buffer, length); |
14225 | #else |
14226 | retv = -EINVAL22; |
14227 | #endif |
14228 | } |
14229 | else { |
14230 | if (start) |
14231 | *start = buffer; |
14232 | #ifdef SCSI_NCR_USER_INFO_SUPPORT |
14233 | retv = ncr_host_info(ncb, buffer, offset, length); |
14234 | #else |
14235 | retv = -EINVAL22; |
14236 | #endif |
14237 | } |
14238 | |
14239 | return retv; |
14240 | } |
14241 | |
14242 | |
14243 | /*========================================================================= |
14244 | ** End of proc file system stuff |
14245 | **========================================================================= |
14246 | */ |
14247 | #endif |
14248 | |
14249 | |
14250 | #ifdef SCSI_NCR_NVRAM_SUPPORT |
14251 | |
14252 | /* |
14253 | * 24C16 EEPROM reading. |
14254 | * |
14255 | * GPOI0 - data in/data out |
14256 | * GPIO1 - clock |
14257 | * Symbios NVRAM wiring now also used by Tekram. |
14258 | */ |
14259 | |
14260 | #define SET_BIT 0 |
14261 | #define CLR_BIT 1 |
14262 | #define SET_CLK 2 |
14263 | #define CLR_CLK 3 |
14264 | |
14265 | /* |
14266 | * Set/clear data/clock bit in GPIO0 |
14267 | */ |
14268 | static void __init |
14269 | S24C16_set_bit(ncr_slot *np, u_charunsigned char write_bit, u_charunsigned char *gpreg, int bit_mode) |
14270 | { |
14271 | UDELAY (5); |
14272 | switch (bit_mode){ |
14273 | case SET_BIT: |
14274 | *gpreg |= write_bit; |
14275 | break; |
14276 | case CLR_BIT: |
14277 | *gpreg &= 0xfe; |
14278 | break; |
14279 | case SET_CLK: |
14280 | *gpreg |= 0x02; |
14281 | break; |
14282 | case CLR_CLK: |
14283 | *gpreg &= 0xfd; |
14284 | break; |
14285 | |
14286 | } |
14287 | OUTB (nc_gpreg, *gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((*gpreg)) )); |
14288 | UDELAY (5); |
14289 | } |
14290 | |
14291 | /* |
14292 | * Send START condition to NVRAM to wake it up. |
14293 | */ |
14294 | static void __init S24C16_start(ncr_slot *np, u_charunsigned char *gpreg) |
14295 | { |
14296 | S24C16_set_bit(np, 1, gpreg, SET_BIT); |
14297 | S24C16_set_bit(np, 0, gpreg, SET_CLK); |
14298 | S24C16_set_bit(np, 0, gpreg, CLR_BIT); |
14299 | S24C16_set_bit(np, 0, gpreg, CLR_CLK); |
14300 | } |
14301 | |
14302 | /* |
14303 | * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZzzzz!! |
14304 | */ |
14305 | static void __init S24C16_stop(ncr_slot *np, u_charunsigned char *gpreg) |
14306 | { |
14307 | S24C16_set_bit(np, 0, gpreg, SET_CLK); |
14308 | S24C16_set_bit(np, 1, gpreg, SET_BIT); |
14309 | } |
14310 | |
14311 | /* |
14312 | * Read or write a bit to the NVRAM, |
14313 | * read if GPIO0 input else write if GPIO0 output |
14314 | */ |
14315 | static void __init |
14316 | S24C16_do_bit(ncr_slot *np, u_charunsigned char *read_bit, u_charunsigned char write_bit, u_charunsigned char *gpreg) |
14317 | { |
14318 | S24C16_set_bit(np, write_bit, gpreg, SET_BIT); |
14319 | S24C16_set_bit(np, 0, gpreg, SET_CLK); |
14320 | if (read_bit) |
14321 | *read_bit = INB (nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))); |
14322 | S24C16_set_bit(np, 0, gpreg, CLR_CLK); |
14323 | S24C16_set_bit(np, 0, gpreg, CLR_BIT); |
14324 | } |
14325 | |
14326 | /* |
14327 | * Output an ACK to the NVRAM after reading, |
14328 | * change GPIO0 to output and when done back to an input |
14329 | */ |
14330 | static void __init |
14331 | S24C16_write_ack(ncr_slot *np, u_charunsigned char write_bit, u_charunsigned char *gpreg, u_charunsigned char *gpcntl) |
14332 | { |
14333 | OUTB (nc_gpcntl, *gpcntl & 0xfe)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((*gpcntl & 0xfe)))); |
14334 | S24C16_do_bit(np, 0, write_bit, gpreg); |
14335 | OUTB (nc_gpcntl, *gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((*gpcntl )))); |
14336 | } |
14337 | |
14338 | /* |
14339 | * Input an ACK from NVRAM after writing, |
14340 | * change GPIO0 to input and when done back to an output |
14341 | */ |
14342 | static void __init |
14343 | S24C16_read_ack(ncr_slot *np, u_charunsigned char *read_bit, u_charunsigned char *gpreg, u_charunsigned char *gpcntl) |
14344 | { |
14345 | OUTB (nc_gpcntl, *gpcntl | 0x01)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((*gpcntl | 0x01)))); |
14346 | S24C16_do_bit(np, read_bit, 1, gpreg); |
14347 | OUTB (nc_gpcntl, *gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((*gpcntl )))); |
14348 | } |
14349 | |
14350 | /* |
14351 | * WRITE a byte to the NVRAM and then get an ACK to see it was accepted OK, |
14352 | * GPIO0 must already be set as an output |
14353 | */ |
14354 | static void __init |
14355 | S24C16_write_byte(ncr_slot *np, u_charunsigned char *ack_data, u_charunsigned char write_data, |
14356 | u_charunsigned char *gpreg, u_charunsigned char *gpcntl) |
14357 | { |
14358 | int x; |
14359 | |
14360 | for (x = 0; x < 8; x++) |
14361 | S24C16_do_bit(np, 0, (write_data >> (7 - x)) & 0x01, gpreg); |
14362 | |
14363 | S24C16_read_ack(np, ack_data, gpreg, gpcntl); |
14364 | } |
14365 | |
14366 | /* |
14367 | * READ a byte from the NVRAM and then send an ACK to say we have got it, |
14368 | * GPIO0 must already be set as an input |
14369 | */ |
14370 | static void __init |
14371 | S24C16_read_byte(ncr_slot *np, u_charunsigned char *read_data, u_charunsigned char ack_data, |
14372 | u_charunsigned char *gpreg, u_charunsigned char *gpcntl) |
14373 | { |
14374 | int x; |
14375 | u_charunsigned char read_bit; |
14376 | |
14377 | *read_data = 0; |
14378 | for (x = 0; x < 8; x++) { |
14379 | S24C16_do_bit(np, &read_bit, 1, gpreg); |
14380 | *read_data |= ((read_bit & 0x01) << (7 - x)); |
14381 | } |
14382 | |
14383 | S24C16_write_ack(np, ack_data, gpreg, gpcntl); |
14384 | } |
14385 | |
14386 | /* |
14387 | * Read 'len' bytes starting at 'offset'. |
14388 | */ |
14389 | static int __init |
14390 | sym_read_S24C16_nvram (ncr_slot *np, int offset, u_charunsigned char *data, int len) |
14391 | { |
14392 | u_charunsigned char gpcntl, gpreg; |
14393 | u_charunsigned char old_gpcntl, old_gpreg; |
14394 | u_charunsigned char ack_data; |
14395 | int retv = 1; |
14396 | int x; |
14397 | |
14398 | /* save current state of GPCNTL and GPREG */ |
14399 | old_gpreg = INB (nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))); |
14400 | old_gpcntl = INB (nc_gpcntl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpcntl))))); |
14401 | gpcntl = old_gpcntl & 0xfc; |
14402 | |
14403 | /* set up GPREG & GPCNTL to set GPIO0 and GPIO1 in to known state */ |
14404 | OUTB (nc_gpreg, old_gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((old_gpreg )))); |
14405 | OUTB (nc_gpcntl, gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((gpcntl) ))); |
14406 | |
14407 | /* this is to set NVRAM into a known state with GPIO0/1 both low */ |
14408 | gpreg = old_gpreg; |
14409 | S24C16_set_bit(np, 0, &gpreg, CLR_CLK); |
14410 | S24C16_set_bit(np, 0, &gpreg, CLR_BIT); |
14411 | |
14412 | /* now set NVRAM inactive with GPIO0/1 both high */ |
14413 | S24C16_stop(np, &gpreg); |
14414 | |
14415 | /* activate NVRAM */ |
14416 | S24C16_start(np, &gpreg); |
14417 | |
14418 | /* write device code and random address MSB */ |
14419 | S24C16_write_byte(np, &ack_data, |
14420 | 0xa0 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); |
14421 | if (ack_data & 0x01) |
14422 | goto out; |
14423 | |
14424 | /* write random address LSB */ |
14425 | S24C16_write_byte(np, &ack_data, |
14426 | offset & 0xff, &gpreg, &gpcntl); |
14427 | if (ack_data & 0x01) |
14428 | goto out; |
14429 | |
14430 | /* regenerate START state to set up for reading */ |
14431 | S24C16_start(np, &gpreg); |
14432 | |
14433 | /* rewrite device code and address MSB with read bit set (lsb = 0x01) */ |
14434 | S24C16_write_byte(np, &ack_data, |
14435 | 0xa1 | ((offset >> 7) & 0x0e), &gpreg, &gpcntl); |
14436 | if (ack_data & 0x01) |
14437 | goto out; |
14438 | |
14439 | /* now set up GPIO0 for inputting data */ |
14440 | gpcntl |= 0x01; |
14441 | OUTB (nc_gpcntl, gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((gpcntl) ))); |
14442 | |
14443 | /* input all requested data - only part of total NVRAM */ |
14444 | for (x = 0; x < len; x++) |
14445 | S24C16_read_byte(np, &data[x], (x == (len-1)), &gpreg, &gpcntl); |
14446 | |
14447 | /* finally put NVRAM back in inactive mode */ |
14448 | gpcntl &= 0xfe; |
14449 | OUTB (nc_gpcntl, gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((gpcntl) ))); |
14450 | S24C16_stop(np, &gpreg); |
14451 | retv = 0; |
14452 | out: |
14453 | /* return GPIO0/1 to original states after having accessed NVRAM */ |
14454 | OUTB (nc_gpcntl, old_gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((old_gpcntl )))); |
14455 | OUTB (nc_gpreg, old_gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((old_gpreg )))); |
14456 | |
14457 | return retv; |
14458 | } |
14459 | |
14460 | #undef SET_BIT |
14461 | #undef CLR_BIT |
14462 | #undef SET_CLK |
14463 | #undef CLR_CLK |
14464 | |
14465 | /* |
14466 | * Try reading Symbios NVRAM. |
14467 | * Return 0 if OK. |
14468 | */ |
14469 | static int __init sym_read_Symbios_nvram (ncr_slot *np, Symbios_nvram *nvram) |
14470 | { |
14471 | static u_charunsigned char Symbios_trailer[6] = {0xfe, 0xfe, 0, 0, 0, 0}; |
14472 | u_charunsigned char *data = (u_charunsigned char *) nvram; |
14473 | int len = sizeof(*nvram); |
14474 | u_shortunsigned short csum; |
14475 | int x; |
14476 | |
14477 | /* probe the 24c16 and read the SYMBIOS 24c16 area */ |
14478 | if (sym_read_S24C16_nvram (np, SYMBIOS_NVRAM_ADDRESS0x100, data, len)) |
14479 | return 1; |
14480 | |
14481 | /* check valid NVRAM signature, verify byte count and checksum */ |
14482 | if (nvram->type != 0 || |
14483 | memcmp__builtin_memcmp(nvram->trailer, Symbios_trailer, 6) || |
14484 | nvram->byte_count != len - 12) |
14485 | return 1; |
14486 | |
14487 | /* verify checksum */ |
14488 | for (x = 6, csum = 0; x < len - 6; x++) |
14489 | csum += data[x]; |
14490 | if (csum != nvram->checksum) |
14491 | return 1; |
14492 | |
14493 | return 0; |
14494 | } |
14495 | |
14496 | /* |
14497 | * 93C46 EEPROM reading. |
14498 | * |
14499 | * GPOI0 - data in |
14500 | * GPIO1 - data out |
14501 | * GPIO2 - clock |
14502 | * GPIO4 - chip select |
14503 | * |
14504 | * Used by Tekram. |
14505 | */ |
14506 | |
14507 | /* |
14508 | * Pulse clock bit in GPIO0 |
14509 | */ |
14510 | static void __init T93C46_Clk(ncr_slot *np, u_charunsigned char *gpreg) |
14511 | { |
14512 | OUTB (nc_gpreg, *gpreg | 0x04)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((*gpreg | 0x04)))); |
14513 | UDELAY (2); |
14514 | OUTB (nc_gpreg, *gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((*gpreg)) )); |
14515 | } |
14516 | |
14517 | /* |
14518 | * Read bit from NVRAM |
14519 | */ |
14520 | static void __init T93C46_Read_Bit(ncr_slot *np, u_charunsigned char *read_bit, u_charunsigned char *gpreg) |
14521 | { |
14522 | UDELAY (2); |
14523 | T93C46_Clk(np, gpreg); |
14524 | *read_bit = INB (nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))); |
14525 | } |
14526 | |
14527 | /* |
14528 | * Write bit to GPIO0 |
14529 | */ |
14530 | static void __init T93C46_Write_Bit(ncr_slot *np, u_charunsigned char write_bit, u_charunsigned char *gpreg) |
14531 | { |
14532 | if (write_bit & 0x01) |
14533 | *gpreg |= 0x02; |
14534 | else |
14535 | *gpreg &= 0xfd; |
14536 | |
14537 | *gpreg |= 0x10; |
14538 | |
14539 | OUTB (nc_gpreg, *gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((*gpreg)) )); |
14540 | UDELAY (2); |
14541 | |
14542 | T93C46_Clk(np, gpreg); |
14543 | } |
14544 | |
14545 | /* |
14546 | * Send STOP condition to NVRAM - puts NVRAM to sleep... ZZZzzz!! |
14547 | */ |
14548 | static void __init T93C46_Stop(ncr_slot *np, u_charunsigned char *gpreg) |
14549 | { |
14550 | *gpreg &= 0xef; |
14551 | OUTB (nc_gpreg, *gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((*gpreg)) )); |
14552 | UDELAY (2); |
14553 | |
14554 | T93C46_Clk(np, gpreg); |
14555 | } |
14556 | |
14557 | /* |
14558 | * Send read command and address to NVRAM |
14559 | */ |
14560 | static void __init |
14561 | T93C46_Send_Command(ncr_slot *np, u_shortunsigned short write_data, |
14562 | u_charunsigned char *read_bit, u_charunsigned char *gpreg) |
14563 | { |
14564 | int x; |
14565 | |
14566 | /* send 9 bits, start bit (1), command (2), address (6) */ |
14567 | for (x = 0; x < 9; x++) |
14568 | T93C46_Write_Bit(np, (u_charunsigned char) (write_data >> (8 - x)), gpreg); |
14569 | |
14570 | *read_bit = INB (nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))); |
14571 | } |
14572 | |
14573 | /* |
14574 | * READ 2 bytes from the NVRAM |
14575 | */ |
14576 | static void __init |
14577 | T93C46_Read_Word(ncr_slot *np, u_shortunsigned short *nvram_data, u_charunsigned char *gpreg) |
14578 | { |
14579 | int x; |
14580 | u_charunsigned char read_bit; |
14581 | |
14582 | *nvram_data = 0; |
14583 | for (x = 0; x < 16; x++) { |
14584 | T93C46_Read_Bit(np, &read_bit, gpreg); |
14585 | |
14586 | if (read_bit & 0x01) |
14587 | *nvram_data |= (0x01 << (15 - x)); |
14588 | else |
14589 | *nvram_data &= ~(0x01 << (15 - x)); |
14590 | } |
14591 | } |
14592 | |
14593 | /* |
14594 | * Read Tekram NvRAM data. |
14595 | */ |
14596 | static int __init |
14597 | T93C46_Read_Data(ncr_slot *np, u_shortunsigned short *data,int len,u_charunsigned char *gpreg) |
14598 | { |
14599 | u_charunsigned char read_bit; |
14600 | int x; |
14601 | |
14602 | for (x = 0; x < len; x++) { |
14603 | |
14604 | /* output read command and address */ |
14605 | T93C46_Send_Command(np, 0x180 | x, &read_bit, gpreg); |
14606 | if (read_bit & 0x01) |
14607 | return 1; /* Bad */ |
14608 | T93C46_Read_Word(np, &data[x], gpreg); |
14609 | T93C46_Stop(np, gpreg); |
14610 | } |
14611 | |
14612 | return 0; |
14613 | } |
14614 | |
14615 | /* |
14616 | * Try reading 93C46 Tekram NVRAM. |
14617 | */ |
14618 | static int __init |
14619 | sym_read_T93C46_nvram (ncr_slot *np, Tekram_nvram *nvram) |
14620 | { |
14621 | u_charunsigned char gpcntl, gpreg; |
14622 | u_charunsigned char old_gpcntl, old_gpreg; |
14623 | int retv = 1; |
14624 | |
14625 | /* save current state of GPCNTL and GPREG */ |
14626 | old_gpreg = INB (nc_gpreg)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpreg))))); |
14627 | old_gpcntl = INB (nc_gpcntl)(*(volatile unsigned char *) ((char *)np->reg + (((size_t) (&((struct ncr_reg *)0)->nc_gpcntl))))); |
14628 | |
14629 | /* set up GPREG & GPCNTL to set GPIO0/1/2/4 in to known state, 0 in, |
14630 | 1/2/4 out */ |
14631 | gpreg = old_gpreg & 0xe9; |
14632 | OUTB (nc_gpreg, gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((gpreg))) ); |
14633 | gpcntl = (old_gpcntl & 0xe9) | 0x09; |
14634 | OUTB (nc_gpcntl, gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((gpcntl) ))); |
14635 | |
14636 | /* input all of NVRAM, 64 words */ |
14637 | retv = T93C46_Read_Data(np, (u_shortunsigned short *) nvram, |
14638 | sizeof(*nvram) / sizeof(short), &gpreg); |
14639 | |
14640 | /* return GPIO0/1/2/4 to original states after having accessed NVRAM */ |
14641 | OUTB (nc_gpcntl, old_gpcntl)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpcntl))))) = (((old_gpcntl )))); |
14642 | OUTB (nc_gpreg, old_gpreg)((*(volatile unsigned char *) ((char *)np->reg + (((size_t ) (&((struct ncr_reg *)0)->nc_gpreg))))) = (((old_gpreg )))); |
14643 | |
14644 | return retv; |
14645 | } |
14646 | |
14647 | /* |
14648 | * Try reading Tekram NVRAM. |
14649 | * Return 0 if OK. |
14650 | */ |
14651 | static int __init |
14652 | sym_read_Tekram_nvram (ncr_slot *np, u_shortunsigned short device_id, Tekram_nvram *nvram) |
14653 | { |
14654 | u_charunsigned char *data = (u_charunsigned char *) nvram; |
14655 | int len = sizeof(*nvram); |
14656 | u_shortunsigned short csum; |
14657 | int x; |
14658 | |
14659 | switch (device_id) { |
14660 | case PCI_DEVICE_ID_NCR_53C8850x000d: |
14661 | case PCI_DEVICE_ID_NCR_53C8950x000c: |
14662 | case PCI_DEVICE_ID_NCR_53C8960x000b: |
14663 | x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS0x40, |
14664 | data, len); |
14665 | break; |
14666 | case PCI_DEVICE_ID_NCR_53C8750x000f: |
14667 | x = sym_read_S24C16_nvram(np, TEKRAM_24C16_NVRAM_ADDRESS0x40, |
14668 | data, len); |
14669 | if (!x) |
14670 | break; |
14671 | default: |
14672 | x = sym_read_T93C46_nvram(np, nvram); |
14673 | break; |
14674 | } |
14675 | if (x) |
14676 | return 1; |
14677 | |
14678 | /* verify checksum */ |
14679 | for (x = 0, csum = 0; x < len - 1; x += 2) |
14680 | csum += data[x] + (data[x+1] << 8); |
14681 | if (csum != 0x1234) |
14682 | return 1; |
14683 | |
14684 | return 0; |
14685 | } |
14686 | |
14687 | #endif /* SCSI_NCR_NVRAM_SUPPORT */ |
14688 | |
14689 | /* |
14690 | ** Module stuff |
14691 | */ |
14692 | |
14693 | #ifdef MODULE |
14694 | Scsi_Host_Template driver_template = SYM53C8XX; |
14695 | #include "scsi_module.c" |
14696 | #endif |