File: | obj-scan-build/../linux/src/drivers/scsi/scsi.c |
Location: | line 549, column 3 |
Description: | Access to field 'host' results in a dereference of a null pointer (loaded from variable 'SDpnt') |
1 | /* | |||
2 | * scsi.c Copyright (C) 1992 Drew Eckhardt | |||
3 | * Copyright (C) 1993, 1994, 1995 Eric Youngdale | |||
4 | * | |||
5 | * generic mid-level SCSI driver | |||
6 | * Initial versions: Drew Eckhardt | |||
7 | * Subsequent revisions: Eric Youngdale | |||
8 | * | |||
9 | * <drew@colorado.edu> | |||
10 | * | |||
11 | * Bug correction thanks go to : | |||
12 | * Rik Faith <faith@cs.unc.edu> | |||
13 | * Tommy Thorn <tthorn> | |||
14 | * Thomas Wuensche <tw@fgb1.fgb.mw.tu-muenchen.de> | |||
15 | * | |||
16 | * Modified by Eric Youngdale eric@aib.com to | |||
17 | * add scatter-gather, multiple outstanding request, and other | |||
18 | * enhancements. | |||
19 | * | |||
20 | * Native multichannel, wide scsi, /proc/scsi and hot plugging | |||
21 | * support added by Michael Neuffer <mike@i-connect.net> | |||
22 | * | |||
23 | * Added request_module("scsi_hostadapter") for kerneld: | |||
24 | * (Put an "alias scsi_hostadapter your_hostadapter" in /etc/conf.modules) | |||
25 | * Bjorn Ekwall <bj0rn@blox.se> | |||
26 | * | |||
27 | * Major improvements to the timeout, abort, and reset processing, | |||
28 | * as well as performance modifications for large queue depths by | |||
29 | * Leonard N. Zubkoff <lnz@dandelion.com> | |||
30 | */ | |||
31 | ||||
32 | /* | |||
33 | * Don't import our own symbols, as this would severely mess up our | |||
34 | * symbol tables. | |||
35 | */ | |||
36 | #define _SCSI_SYMS_VER_ | |||
37 | ||||
38 | #include <linux/config.h> | |||
39 | #include <linux/module.h> | |||
40 | ||||
41 | #include <linux/sched.h> | |||
42 | #include <linux/timer.h> | |||
43 | #include <linux/string.h> | |||
44 | #include <linux/malloc.h> | |||
45 | #include <linux/ioport.h> | |||
46 | #include <linux/kernel.h> | |||
47 | #include <linux/stat.h> | |||
48 | #include <linux/blk.h> | |||
49 | #include <linux/interrupt.h> | |||
50 | #include <linux/delay.h> | |||
51 | ||||
52 | #include <asm/system.h> | |||
53 | #include <asm/irq.h> | |||
54 | #include <asm/dma.h> | |||
55 | ||||
56 | #include "scsi.h" | |||
57 | #include "hosts.h" | |||
58 | #include "constants.h" | |||
59 | ||||
60 | #ifdef CONFIG_KERNELD | |||
61 | #include <linux/kerneld.h> | |||
62 | #endif | |||
63 | ||||
64 | #undef USE_STATIC_SCSI_MEMORY | |||
65 | ||||
66 | /* | |||
67 | static const char RCSid[] = "$Header: cvs/gnumach/linux/src/drivers/scsi/Attic/scsi.c,v 1.1 1999/04/26 05:54:57 tb Exp $"; | |||
68 | */ | |||
69 | ||||
70 | ||||
71 | /* Command groups 3 and 4 are reserved and should never be used. */ | |||
72 | const unsigned char scsi_command_size[8] = { 6, 10, 10, 12, 12, 12, 10, 10 }; | |||
73 | ||||
74 | #define INTERNAL_ERROR(panic ("Internal error in file %s, line %d.\n", "../linux/src/drivers/scsi/scsi.c" , 74)) (panic ("Internal error in file %s, line %d.\n", __FILE__"../linux/src/drivers/scsi/scsi.c", __LINE__74)) | |||
75 | ||||
76 | /* | |||
77 | * PAGE_SIZE must be a multiple of the sector size (512). True | |||
78 | * for all reasonably recent architectures (even the VAX...). | |||
79 | */ | |||
80 | #define SECTOR_SIZE512 512 | |||
81 | #define SECTORS_PER_PAGE((1 << 12)/512) (PAGE_SIZE(1 << 12)/SECTOR_SIZE512) | |||
82 | ||||
83 | #if SECTORS_PER_PAGE((1 << 12)/512) <= 8 | |||
84 | typedef unsigned char FreeSectorBitmap; | |||
85 | #elif SECTORS_PER_PAGE((1 << 12)/512) <= 32 | |||
86 | typedef unsigned int FreeSectorBitmap; | |||
87 | #else | |||
88 | # error You lose. | |||
89 | #endif | |||
90 | ||||
91 | static void scsi_done (Scsi_Cmnd *SCpnt); | |||
92 | static int update_timeout (Scsi_Cmnd *, int); | |||
93 | static void print_inquiry(unsigned char *data); | |||
94 | static void scsi_times_out (Scsi_Cmnd * SCpnt); | |||
95 | static int scan_scsis_single (int channel,int dev,int lun,int * max_scsi_dev , | |||
96 | int * sparse_lun, Scsi_Device ** SDpnt, Scsi_Cmnd * SCpnt, | |||
97 | struct Scsi_Host *shpnt, char * scsi_result); | |||
98 | void scsi_build_commandblocks(Scsi_Device * SDpnt); | |||
99 | ||||
100 | #ifdef CONFIG_MODULES | |||
101 | extern struct symbol_table scsi_symbol_table; | |||
102 | #endif | |||
103 | ||||
104 | static FreeSectorBitmap * dma_malloc_freelist = NULL((void *) 0); | |||
105 | static int scsi_need_isa_bounce_buffers; | |||
106 | static unsigned int dma_sectors = 0; | |||
107 | unsigned int dma_free_sectors = 0; | |||
108 | unsigned int need_isa_buffer = 0; | |||
109 | static unsigned char ** dma_malloc_pages = NULL((void *) 0); | |||
110 | ||||
111 | static int time_start; | |||
112 | static int time_elapsed; | |||
113 | static volatile struct Scsi_Host * host_active = NULL((void *) 0); | |||
114 | #define SCSI_BLOCK(HOST)((HOST->block && host_active && HOST != host_active ) || (HOST->can_queue && HOST->host_busy >= HOST ->can_queue)) ((HOST->block && host_active && HOST != host_active) \ | |||
115 | || (HOST->can_queue && HOST->host_busy >= HOST->can_queue)) | |||
116 | ||||
117 | const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE10] = | |||
118 | { | |||
119 | "Direct-Access ", | |||
120 | "Sequential-Access", | |||
121 | "Printer ", | |||
122 | "Processor ", | |||
123 | "WORM ", | |||
124 | "CD-ROM ", | |||
125 | "Scanner ", | |||
126 | "Optical Device ", | |||
127 | "Medium Changer ", | |||
128 | "Communications " | |||
129 | }; | |||
130 | ||||
131 | ||||
132 | /* | |||
133 | * global variables : | |||
134 | * scsi_devices an array of these specifying the address for each | |||
135 | * (host, id, LUN) | |||
136 | */ | |||
137 | ||||
138 | Scsi_Device * scsi_devices = NULL((void *) 0); | |||
139 | ||||
140 | /* Process ID of SCSI commands */ | |||
141 | unsigned long scsi_pid = 0; | |||
142 | ||||
143 | static unsigned long serial_number = 0; | |||
144 | ||||
145 | static unsigned char generic_sense[6] = {REQUEST_SENSE0x03, 0,0,0, 255, 0}; | |||
146 | static void resize_dma_pool(void); | |||
147 | ||||
148 | /* This variable is merely a hook so that we can debug the kernel with gdb. */ | |||
149 | Scsi_Cmnd * last_cmnd = NULL((void *) 0); | |||
150 | ||||
151 | /* This is the pointer to the /proc/scsi code. | |||
152 | * It is only initialized to !=0 if the scsi code is present | |||
153 | */ | |||
154 | #if CONFIG_PROC_FS1 | |||
155 | extern int (* dispatch_scsi_info_ptr)(int ino, char *buffer, char **start, | |||
156 | off_t offset, int length, int inout); | |||
157 | extern int dispatch_scsi_info(int ino, char *buffer, char **start, | |||
158 | off_t offset, int length, int inout); | |||
159 | ||||
160 | struct proc_dir_entry proc_scsi_scsi = { | |||
161 | PROC_SCSI_SCSI, 4, "scsi", | |||
162 | S_IFREG0100000 | S_IRUGO(00400|00040|00004) | S_IWUSR00200, 1, 0, 0, 0, | |||
163 | NULL((void *) 0), | |||
164 | NULL((void *) 0), NULL((void *) 0), | |||
165 | NULL((void *) 0), NULL((void *) 0), NULL((void *) 0) | |||
166 | }; | |||
167 | #endif | |||
168 | ||||
169 | /* | |||
170 | * This is the number of clock ticks we should wait before we time out | |||
171 | * and abort the command. This is for where the scsi.c module generates | |||
172 | * the command, not where it originates from a higher level, in which | |||
173 | * case the timeout is specified there. | |||
174 | * | |||
175 | * ABORT_TIMEOUT and RESET_TIMEOUT are the timeouts for RESET and ABORT | |||
176 | * respectively. | |||
177 | */ | |||
178 | ||||
179 | #ifdef DEBUG_TIMEOUT | |||
180 | static void scsi_dump_status(void); | |||
181 | #endif | |||
182 | ||||
183 | ||||
184 | #ifdef DEBUG | |||
185 | #define SCSI_TIMEOUT(2*100) (5*HZ100) | |||
186 | #else | |||
187 | #define SCSI_TIMEOUT(2*100) (2*HZ100) | |||
188 | #endif | |||
189 | ||||
190 | #ifdef DEBUG | |||
191 | #define SENSE_TIMEOUT(5*100/10) SCSI_TIMEOUT(2*100) | |||
192 | #define ABORT_TIMEOUT(5*100/10) SCSI_TIMEOUT(2*100) | |||
193 | #define RESET_TIMEOUT(5*100/10) SCSI_TIMEOUT(2*100) | |||
194 | #else | |||
195 | #define SENSE_TIMEOUT(5*100/10) (5*HZ100/10) | |||
196 | #define RESET_TIMEOUT(5*100/10) (5*HZ100/10) | |||
197 | #define ABORT_TIMEOUT(5*100/10) (5*HZ100/10) | |||
198 | #endif | |||
199 | ||||
200 | #define MIN_RESET_DELAY(2*100) (2*HZ100) | |||
201 | ||||
202 | /* Do not call reset on error if we just did a reset within 15 sec. */ | |||
203 | #define MIN_RESET_PERIOD(15*100) (15*HZ100) | |||
204 | ||||
205 | /* The following devices are known not to tolerate a lun != 0 scan for | |||
206 | * one reason or another. Some will respond to all luns, others will | |||
207 | * lock up. | |||
208 | */ | |||
209 | ||||
210 | #define BLIST_NOLUN0x01 0x01 | |||
211 | #define BLIST_FORCELUN0x02 0x02 | |||
212 | #define BLIST_BORKEN0x04 0x04 | |||
213 | #define BLIST_KEY0x08 0x08 | |||
214 | #define BLIST_SINGLELUN0x10 0x10 | |||
215 | #define BLIST_NOTQ0x20 0x20 | |||
216 | #define BLIST_SPARSELUN0x40 0x40 | |||
217 | #define BLIST_MAX5LUN0x80 0x80 | |||
218 | ||||
219 | struct dev_info{ | |||
220 | const char * vendor; | |||
221 | const char * model; | |||
222 | const char * revision; /* Latest revision known to be bad. Not used yet */ | |||
223 | unsigned flags; | |||
224 | }; | |||
225 | ||||
226 | /* | |||
227 | * This is what was previously known as the blacklist. The concept | |||
228 | * has been expanded so that we can specify other types of things we | |||
229 | * need to be aware of. | |||
230 | */ | |||
231 | static struct dev_info device_list[] = | |||
232 | { | |||
233 | {"TEAC","CD-R55S","1.0H", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
234 | {"CHINON","CD-ROM CDS-431","H42", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
235 | {"CHINON","CD-ROM CDS-535","Q14", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
236 | {"DENON","DRD-25X","V", BLIST_NOLUN0x01}, /* Locks up if probed for lun != 0 */ | |||
237 | {"HITACHI","DK312C","CM81", BLIST_NOLUN0x01}, /* Responds to all lun - dtg */ | |||
238 | {"HITACHI","DK314C","CR21" , BLIST_NOLUN0x01}, /* responds to all lun */ | |||
239 | {"IMS", "CDD521/10","2.06", BLIST_NOLUN0x01}, /* Locks-up when LUN>0 polled. */ | |||
240 | {"MAXTOR","XT-3280","PR02", BLIST_NOLUN0x01}, /* Locks-up when LUN>0 polled. */ | |||
241 | {"MAXTOR","XT-4380S","B3C", BLIST_NOLUN0x01}, /* Locks-up when LUN>0 polled. */ | |||
242 | {"MAXTOR","MXT-1240S","I1.2", BLIST_NOLUN0x01}, /* Locks up when LUN>0 polled */ | |||
243 | {"MAXTOR","XT-4170S","B5A", BLIST_NOLUN0x01}, /* Locks-up sometimes when LUN>0 polled. */ | |||
244 | {"MAXTOR","XT-8760S","B7B", BLIST_NOLUN0x01}, /* guess what? */ | |||
245 | {"MEDIAVIS","RENO CD-ROMX2A","2.03",BLIST_NOLUN0x01},/*Responds to all lun */ | |||
246 | {"MICROP", "4110", "*", BLIST_NOTQ0x20}, /* Buggy Tagged Queuing */ | |||
247 | {"NEC","CD-ROM DRIVE:841","1.0", BLIST_NOLUN0x01}, /* Locks-up when LUN>0 polled. */ | |||
248 | {"RODIME","RO3000S","2.33", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
249 | {"SANYO", "CRD-250S", "1.20", BLIST_NOLUN0x01}, /* causes failed REQUEST SENSE on lun 1 | |||
250 | * for aha152x controller, which causes | |||
251 | * SCSI code to reset bus.*/ | |||
252 | {"SEAGATE", "ST157N", "\004|j", BLIST_NOLUN0x01}, /* causes failed REQUEST SENSE on lun 1 | |||
253 | * for aha152x controller, which causes | |||
254 | * SCSI code to reset bus.*/ | |||
255 | {"SEAGATE", "ST296","921", BLIST_NOLUN0x01}, /* Responds to all lun */ | |||
256 | {"SEAGATE","ST1581","6538",BLIST_NOLUN0x01}, /* Responds to all lun */ | |||
257 | {"SONY","CD-ROM CDU-541","4.3d", BLIST_NOLUN0x01}, | |||
258 | {"SONY","CD-ROM CDU-55S","1.0i", BLIST_NOLUN0x01}, | |||
259 | {"SONY","CD-ROM CDU-561","1.7x", BLIST_NOLUN0x01}, | |||
260 | {"TANDBERG","TDC 3600","U07", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
261 | {"TEAC","CD-ROM","1.06", BLIST_NOLUN0x01}, /* causes failed REQUEST SENSE on lun 1 | |||
262 | * for seagate controller, which causes | |||
263 | * SCSI code to reset bus.*/ | |||
264 | {"TEXEL","CD-ROM","1.06", BLIST_NOLUN0x01}, /* causes failed REQUEST SENSE on lun 1 | |||
265 | * for seagate controller, which causes | |||
266 | * SCSI code to reset bus.*/ | |||
267 | {"QUANTUM","LPS525S","3110", BLIST_NOLUN0x01}, /* Locks sometimes if polled for lun != 0 */ | |||
268 | {"QUANTUM","PD1225S","3110", BLIST_NOLUN0x01}, /* Locks sometimes if polled for lun != 0 */ | |||
269 | {"MEDIAVIS","CDR-H93MV","1.31", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
270 | {"SANKYO", "CP525","6.64", BLIST_NOLUN0x01}, /* causes failed REQ SENSE, extra reset */ | |||
271 | {"HP", "C1750A", "3226", BLIST_NOLUN0x01}, /* scanjet iic */ | |||
272 | {"HP", "C1790A", "", BLIST_NOLUN0x01}, /* scanjet iip */ | |||
273 | {"HP", "C2500A", "", BLIST_NOLUN0x01}, /* scanjet iicx */ | |||
274 | ||||
275 | /* | |||
276 | * Other types of devices that have special flags. | |||
277 | */ | |||
278 | {"SONY","CD-ROM CDU-8001","*", BLIST_BORKEN0x04}, | |||
279 | {"TEXEL","CD-ROM","1.06", BLIST_BORKEN0x04}, | |||
280 | {"IOMEGA","Io20S *F","*", BLIST_KEY0x08}, | |||
281 | {"INSITE","Floptical F*8I","*", BLIST_KEY0x08}, | |||
282 | {"INSITE","I325VM","*", BLIST_KEY0x08}, | |||
283 | {"NRC","MBR-7","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
284 | {"NRC","MBR-7.4","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
285 | {"REGAL","CDC-4X","*", BLIST_MAX5LUN0x80 | BLIST_SINGLELUN0x10}, | |||
286 | {"NAKAMICH","MJ-4.8S","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
287 | {"NAKAMICH","MJ-5.16S","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
288 | {"PIONEER","CD-ROM DRM-600","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
289 | {"PIONEER","CD-ROM DRM-602X","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
290 | {"PIONEER","CD-ROM DRM-604X","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
291 | {"EMULEX","MD21/S2 ESDI","*", BLIST_SINGLELUN0x10}, | |||
292 | {"CANON","IPUBJD","*", BLIST_SPARSELUN0x40}, | |||
293 | {"MATSHITA","PD","*", BLIST_FORCELUN0x02 | BLIST_SINGLELUN0x10}, | |||
294 | {"YAMAHA","CDR100","1.00", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
295 | {"YAMAHA","CDR102","1.00", BLIST_NOLUN0x01}, /* Locks up if polled for lun != 0 */ | |||
296 | {"nCipher","Fastness Crypto","*", BLIST_FORCELUN0x02}, | |||
297 | /* | |||
298 | * Must be at end of list... | |||
299 | */ | |||
300 | {NULL((void *) 0), NULL((void *) 0), NULL((void *) 0)} | |||
301 | }; | |||
302 | ||||
303 | static int get_device_flags(unsigned char * response_data){ | |||
304 | int i = 0; | |||
305 | unsigned char * pnt; | |||
306 | for(i=0; 1; i++){ | |||
307 | if(device_list[i].vendor == NULL((void *) 0)) return 0; | |||
308 | pnt = &response_data[8]; | |||
309 | while(*pnt && *pnt == ' ') pnt++; | |||
310 | if(memcmp__builtin_memcmp(device_list[i].vendor, pnt, | |||
311 | strlen(device_list[i].vendor))) continue; | |||
312 | pnt = &response_data[16]; | |||
313 | while(*pnt && *pnt == ' ') pnt++; | |||
314 | if(memcmp__builtin_memcmp(device_list[i].model, pnt, | |||
315 | strlen(device_list[i].model))) continue; | |||
316 | return device_list[i].flags; | |||
317 | } | |||
318 | return 0; | |||
319 | } | |||
320 | ||||
321 | void scsi_make_blocked_list(void) { | |||
322 | int block_count = 0, index; | |||
323 | unsigned long flags; | |||
324 | struct Scsi_Host * sh[128], * shpnt; | |||
325 | ||||
326 | /* | |||
327 | * Create a circular linked list from the scsi hosts which have | |||
328 | * the "wish_block" field in the Scsi_Host structure set. | |||
329 | * The blocked list should include all the scsi hosts using ISA DMA. | |||
330 | * In some systems, using two dma channels simultaneously causes | |||
331 | * unpredictable results. | |||
332 | * Among the scsi hosts in the blocked list, only one host at a time | |||
333 | * is allowed to have active commands queued. The transition from | |||
334 | * one active host to the next one is allowed only when host_busy == 0 | |||
335 | * for the active host (which implies host_busy == 0 for all the hosts | |||
336 | * in the list). Moreover for block devices the transition to a new | |||
337 | * active host is allowed only when a request is completed, since a | |||
338 | * block device request can be divided into multiple scsi commands | |||
339 | * (when there are few sg lists or clustering is disabled). | |||
340 | * | |||
341 | * (DB, 4 Feb 1995) | |||
342 | */ | |||
343 | ||||
344 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
345 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
346 | host_active = NULL((void *) 0); | |||
347 | ||||
348 | for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) { | |||
349 | ||||
350 | #if 0 | |||
351 | /* | |||
352 | * Is this is a candidate for the blocked list? | |||
353 | * Useful to put into the blocked list all the hosts whose driver | |||
354 | * does not know about the host->block feature. | |||
355 | */ | |||
356 | if (shpnt->unchecked_isa_dma) shpnt->wish_block = 1; | |||
357 | #endif | |||
358 | ||||
359 | if (shpnt->wish_block) sh[block_count++] = shpnt; | |||
360 | } | |||
361 | ||||
362 | if (block_count == 1) sh[0]->block = NULL((void *) 0); | |||
363 | ||||
364 | else if (block_count > 1) { | |||
365 | ||||
366 | for(index = 0; index < block_count - 1; index++) { | |||
367 | sh[index]->block = sh[index + 1]; | |||
368 | printk("scsi%d : added to blocked host list.\n", | |||
369 | sh[index]->host_no); | |||
370 | } | |||
371 | ||||
372 | sh[block_count - 1]->block = sh[0]; | |||
373 | printk("scsi%d : added to blocked host list.\n", | |||
374 | sh[index]->host_no); | |||
375 | } | |||
376 | ||||
377 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
378 | } | |||
379 | ||||
380 | static void scan_scsis_done (Scsi_Cmnd * SCpnt) | |||
381 | { | |||
382 | ||||
383 | #ifdef DEBUG | |||
384 | printk ("scan_scsis_done(%p, %06x)\n", SCpnt->host, SCpnt->result); | |||
385 | #endif | |||
386 | SCpnt->request.rq_status = RQ_SCSI_DONE0xfffe; | |||
387 | ||||
388 | if (SCpnt->request.sem != NULL((void *) 0)) | |||
389 | up(SCpnt->request.sem); | |||
390 | } | |||
391 | ||||
392 | #ifdef CONFIG_SCSI_MULTI_LUN | |||
393 | static int max_scsi_luns = 8; | |||
394 | #else | |||
395 | static int max_scsi_luns = 1; | |||
396 | #endif | |||
397 | ||||
398 | void scsi_luns_setup(char *str, int *ints) { | |||
399 | if (ints[0] != 1) | |||
400 | printk("scsi_luns_setup : usage max_scsi_luns=n (n should be between 1 and 8)\n"); | |||
401 | else | |||
402 | max_scsi_luns = ints[1]; | |||
403 | } | |||
404 | ||||
405 | /* | |||
406 | * Detecting SCSI devices : | |||
407 | * We scan all present host adapter's busses, from ID 0 to ID (max_id). | |||
408 | * We use the INQUIRY command, determine device type, and pass the ID / | |||
409 | * lun address of all sequential devices to the tape driver, all random | |||
410 | * devices to the disk driver. | |||
411 | */ | |||
412 | static void scan_scsis (struct Scsi_Host *shpnt, unchar hardcoded, | |||
413 | unchar hchannel, unchar hid, unchar hlun) | |||
414 | { | |||
415 | int dev, lun, channel; | |||
416 | unsigned char scsi_result0[256]; | |||
417 | unsigned char *scsi_result; | |||
418 | Scsi_Device *SDpnt; | |||
419 | int max_dev_lun, sparse_lun; | |||
420 | Scsi_Cmnd *SCpnt; | |||
421 | ||||
422 | SCpnt = (Scsi_Cmnd *) scsi_init_malloc (sizeof (Scsi_Cmnd), GFP_ATOMIC0x01 | GFP_DMA0x80); | |||
423 | SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC0x01); | |||
424 | memset (SCpnt, 0, sizeof (Scsi_Cmnd))(__builtin_constant_p(0) ? (__builtin_constant_p((sizeof (Scsi_Cmnd ))) ? __constant_c_and_count_memset(((SCpnt)),((0x01010101UL* (unsigned char)(0))),((sizeof (Scsi_Cmnd)))) : __constant_c_memset (((SCpnt)),((0x01010101UL*(unsigned char)(0))),((sizeof (Scsi_Cmnd ))))) : (__builtin_constant_p((sizeof (Scsi_Cmnd))) ? __memset_generic ((((SCpnt))),(((0))),(((sizeof (Scsi_Cmnd))))) : __memset_generic (((SCpnt)),((0)),((sizeof (Scsi_Cmnd)))))); | |||
425 | ||||
426 | ||||
427 | /* Make sure we have something that is valid for DMA purposes */ | |||
428 | scsi_result = ( ( !shpnt->unchecked_isa_dma ) | |||
| ||||
429 | ? &scsi_result0[0] : scsi_init_malloc (512, GFP_DMA0x80)); | |||
430 | ||||
431 | if (scsi_result == NULL((void *) 0)) { | |||
432 | printk ("Unable to obtain scsi_result buffer\n"); | |||
433 | goto leave; | |||
434 | } | |||
435 | ||||
436 | /* We must chain ourself in the host_queue, so commands can time out */ | |||
437 | if(shpnt->host_queue) | |||
438 | shpnt->host_queue->prev = SCpnt; | |||
439 | SCpnt->next = shpnt->host_queue; | |||
440 | SCpnt->prev = NULL((void *) 0); | |||
441 | shpnt->host_queue = SCpnt; | |||
442 | ||||
443 | ||||
444 | if (hardcoded == 1) { | |||
445 | Scsi_Device *oldSDpnt=SDpnt; | |||
446 | struct Scsi_Device_Template * sdtpnt; | |||
447 | channel = hchannel; | |||
448 | if(channel > shpnt->max_channel) goto leave; | |||
449 | dev = hid; | |||
450 | if(dev >= shpnt->max_id) goto leave; | |||
451 | lun = hlun; | |||
452 | if(lun >= shpnt->max_lun) goto leave; | |||
453 | scan_scsis_single (channel, dev, lun, &max_dev_lun, &sparse_lun, | |||
454 | &SDpnt, SCpnt, shpnt, scsi_result); | |||
455 | if(SDpnt!=oldSDpnt) { | |||
456 | ||||
457 | /* it could happen the blockdevice hasn't yet been inited */ | |||
458 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
459 | if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)(); | |||
460 | ||||
461 | oldSDpnt->scsi_request_fn = NULL((void *) 0); | |||
462 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
463 | if(sdtpnt->attach) { | |||
464 | (*sdtpnt->attach)(oldSDpnt); | |||
465 | if(oldSDpnt->attached) scsi_build_commandblocks(oldSDpnt);} | |||
466 | resize_dma_pool(); | |||
467 | ||||
468 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) { | |||
469 | if(sdtpnt->finish && sdtpnt->nr_dev) | |||
470 | {(*sdtpnt->finish)();} | |||
471 | } | |||
472 | } | |||
473 | ||||
474 | } | |||
475 | else { | |||
476 | for (channel = 0; channel <= shpnt->max_channel; channel++) { | |||
477 | for (dev = 0; dev < shpnt->max_id; ++dev) { | |||
478 | if (shpnt->this_id != dev) { | |||
479 | ||||
480 | /* | |||
481 | * We need the for so our continue, etc. work fine. We put this in | |||
482 | * a variable so that we can override it during the scan if we | |||
483 | * detect a device *KNOWN* to have multiple logical units. | |||
484 | */ | |||
485 | max_dev_lun = (max_scsi_luns < shpnt->max_lun ? | |||
486 | max_scsi_luns : shpnt->max_lun); | |||
487 | sparse_lun = 0; | |||
488 | for (lun = 0; lun < max_dev_lun; ++lun) { | |||
489 | if (!scan_scsis_single (channel, dev, lun, &max_dev_lun, | |||
490 | &sparse_lun, &SDpnt, SCpnt, shpnt, | |||
491 | scsi_result) | |||
492 | && !sparse_lun) | |||
493 | break; /* break means don't probe further for luns!=0 */ | |||
494 | } /* for lun ends */ | |||
495 | } /* if this_id != id ends */ | |||
496 | } /* for dev ends */ | |||
497 | } /* for channel ends */ | |||
498 | } /* if/else hardcoded */ | |||
499 | ||||
500 | leave: | |||
501 | ||||
502 | {/* Unchain SCpnt from host_queue */ | |||
503 | Scsi_Cmnd *prev, *next, *hqptr; | |||
504 | for(hqptr = shpnt->host_queue; hqptr != SCpnt; hqptr = hqptr->next) ; | |||
505 | if(hqptr) { | |||
506 | prev = hqptr->prev; | |||
507 | next = hqptr->next; | |||
508 | if(prev) | |||
509 | prev->next = next; | |||
510 | else | |||
511 | shpnt->host_queue = next; | |||
512 | if(next) next->prev = prev; | |||
513 | } | |||
514 | } | |||
515 | ||||
516 | /* Last device block does not exist. Free memory. */ | |||
517 | if (SDpnt != NULL((void *) 0)) | |||
518 | scsi_init_free ((char *) SDpnt, sizeof (Scsi_Device)); | |||
519 | ||||
520 | if (SCpnt != NULL((void *) 0)) | |||
521 | scsi_init_free ((char *) SCpnt, sizeof (Scsi_Cmnd)); | |||
522 | ||||
523 | /* If we allocated a buffer so we could do DMA, free it now */ | |||
524 | if (scsi_result != &scsi_result0[0] && scsi_result != NULL((void *) 0)) | |||
525 | scsi_init_free (scsi_result, 512); | |||
526 | ||||
527 | } | |||
528 | ||||
529 | /* | |||
530 | * The worker for scan_scsis. | |||
531 | * Returning 0 means Please don't ask further for lun!=0, 1 means OK go on. | |||
532 | * Global variables used : scsi_devices(linked list) | |||
533 | */ | |||
534 | int scan_scsis_single (int channel, int dev, int lun, int *max_dev_lun, | |||
535 | int *sparse_lun, Scsi_Device **SDpnt2, Scsi_Cmnd * SCpnt, | |||
536 | struct Scsi_Host * shpnt, char *scsi_result) | |||
537 | { | |||
538 | unsigned char scsi_cmd[12]; | |||
539 | struct Scsi_Device_Template *sdtpnt; | |||
540 | Scsi_Device * SDtail, *SDpnt=*SDpnt2; | |||
541 | int bflags, type=-1; | |||
542 | ||||
543 | SDtail = scsi_devices; | |||
544 | if (scsi_devices) | |||
545 | while (SDtail->next) | |||
546 | SDtail = SDtail->next; | |||
547 | ||||
548 | memset (SDpnt, 0, sizeof (Scsi_Device))(__builtin_constant_p(0) ? (__builtin_constant_p((sizeof (Scsi_Device ))) ? __constant_c_and_count_memset(((SDpnt)),((0x01010101UL* (unsigned char)(0))),((sizeof (Scsi_Device)))) : __constant_c_memset (((SDpnt)),((0x01010101UL*(unsigned char)(0))),((sizeof (Scsi_Device ))))) : (__builtin_constant_p((sizeof (Scsi_Device))) ? __memset_generic ((((SDpnt))),(((0))),(((sizeof (Scsi_Device))))) : __memset_generic (((SDpnt)),((0)),((sizeof (Scsi_Device)))))); | |||
549 | SDpnt->host = shpnt; | |||
| ||||
550 | SDpnt->id = dev; | |||
551 | SDpnt->lun = lun; | |||
552 | SDpnt->channel = channel; | |||
553 | ||||
554 | /* Some low level driver could use device->type (DB) */ | |||
555 | SDpnt->type = -1; | |||
556 | ||||
557 | /* | |||
558 | * Assume that the device will have handshaking problems, and then fix this | |||
559 | * field later if it turns out it doesn't | |||
560 | */ | |||
561 | SDpnt->borken = 1; | |||
562 | SDpnt->was_reset = 0; | |||
563 | SDpnt->expecting_cc_ua = 0; | |||
564 | ||||
565 | scsi_cmd[0] = TEST_UNIT_READY0x00; | |||
566 | scsi_cmd[1] = lun << 5; | |||
567 | scsi_cmd[2] = scsi_cmd[3] = scsi_cmd[4] = scsi_cmd[5] = 0; | |||
568 | ||||
569 | SCpnt->host = SDpnt->host; | |||
570 | SCpnt->device = SDpnt; | |||
571 | SCpnt->target = SDpnt->id; | |||
572 | SCpnt->lun = SDpnt->lun; | |||
573 | SCpnt->channel = SDpnt->channel; | |||
574 | { | |||
575 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); | |||
576 | SCpnt->request.sem = &sem; | |||
577 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; | |||
578 | scsi_do_cmd (SCpnt, (void *) scsi_cmd, | |||
579 | (void *) scsi_result, | |||
580 | 256, scan_scsis_done, SCSI_TIMEOUT(2*100) + 4 * HZ100, 5); | |||
581 | down (&sem); | |||
582 | } | |||
583 | ||||
584 | #if defined(DEBUG) || defined(DEBUG_INIT) | |||
585 | printk ("scsi: scan_scsis_single id %d lun %d. Return code 0x%08x\n", | |||
586 | dev, lun, SCpnt->result); | |||
587 | print_driverbyte(SCpnt->result); print_hostbyte(SCpnt->result); | |||
588 | printk("\n"); | |||
589 | #endif | |||
590 | ||||
591 | if (SCpnt->result) { | |||
592 | if (((driver_byte (SCpnt->result)(((SCpnt->result) >> 24) & 0xff) & DRIVER_SENSE0x08) || | |||
593 | (status_byte (SCpnt->result)(((SCpnt->result) >> 1) & 0x1f) & CHECK_CONDITION0x01)) && | |||
594 | ((SCpnt->sense_buffer[0] & 0x70) >> 4) == 7) { | |||
595 | if (((SCpnt->sense_buffer[2] & 0xf) != NOT_READY0x02) && | |||
596 | ((SCpnt->sense_buffer[2] & 0xf) != UNIT_ATTENTION0x06) && | |||
597 | ((SCpnt->sense_buffer[2] & 0xf) != ILLEGAL_REQUEST0x05 || lun > 0)) | |||
598 | return 1; | |||
599 | } | |||
600 | else | |||
601 | return 0; | |||
602 | } | |||
603 | ||||
604 | #if defined (DEBUG) || defined(DEBUG_INIT) | |||
605 | printk ("scsi: performing INQUIRY\n"); | |||
606 | #endif | |||
607 | /* | |||
608 | * Build an INQUIRY command block. | |||
609 | */ | |||
610 | scsi_cmd[0] = INQUIRY0x12; | |||
611 | scsi_cmd[1] = (lun << 5) & 0xe0; | |||
612 | scsi_cmd[2] = 0; | |||
613 | scsi_cmd[3] = 0; | |||
614 | scsi_cmd[4] = 255; | |||
615 | scsi_cmd[5] = 0; | |||
616 | SCpnt->cmd_len = 0; | |||
617 | { | |||
618 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); | |||
619 | SCpnt->request.sem = &sem; | |||
620 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; | |||
621 | scsi_do_cmd (SCpnt, (void *) scsi_cmd, | |||
622 | (void *) scsi_result, | |||
623 | 256, scan_scsis_done, SCSI_TIMEOUT(2*100), 3); | |||
624 | down (&sem); | |||
625 | } | |||
626 | ||||
627 | #if defined(DEBUG) || defined(DEBUG_INIT) | |||
628 | printk ("scsi: INQUIRY %s with code 0x%x\n", | |||
629 | SCpnt->result ? "failed" : "successful", SCpnt->result); | |||
630 | #endif | |||
631 | ||||
632 | if (SCpnt->result) | |||
633 | return 0; /* assume no peripheral if any sort of error */ | |||
634 | ||||
635 | /* | |||
636 | * Check the peripheral qualifier field - this tells us whether LUNS | |||
637 | * are supported here or not. | |||
638 | */ | |||
639 | if( (scsi_result[0] >> 5) == 3 ) | |||
640 | { | |||
641 | return 0; /* assume no peripheral if any sort of error */ | |||
642 | } | |||
643 | ||||
644 | /* | |||
645 | * It would seem some TOSHIBA CDROM gets things wrong | |||
646 | */ | |||
647 | if (!strncmp (scsi_result + 8, "TOSHIBA", 7) && | |||
648 | !strncmp (scsi_result + 16, "CD-ROM", 6) && | |||
649 | scsi_result[0] == TYPE_DISK0x00) { | |||
650 | scsi_result[0] = TYPE_ROM0x05; | |||
651 | scsi_result[1] |= 0x80; /* removable */ | |||
652 | } | |||
653 | ||||
654 | if (!strncmp (scsi_result + 8, "NEC", 3)) { | |||
655 | if (!strncmp (scsi_result + 16, "CD-ROM DRIVE:84 ", 16) || | |||
656 | !strncmp (scsi_result + 16, "CD-ROM DRIVE:25", 15)) | |||
657 | SDpnt->manufacturer = SCSI_MAN_NEC_OLDCDR3; | |||
658 | else | |||
659 | SDpnt->manufacturer = SCSI_MAN_NEC1; | |||
660 | } | |||
661 | else if (!strncmp (scsi_result + 8, "TOSHIBA", 7)) | |||
662 | SDpnt->manufacturer = SCSI_MAN_TOSHIBA2; | |||
663 | else if (!strncmp (scsi_result + 8, "SONY", 4)) | |||
664 | SDpnt->manufacturer = SCSI_MAN_SONY4; | |||
665 | else if (!strncmp (scsi_result + 8, "PIONEER", 7)) | |||
666 | SDpnt->manufacturer = SCSI_MAN_PIONEER5; | |||
667 | else | |||
668 | SDpnt->manufacturer = SCSI_MAN_UNKNOWN0; | |||
669 | ||||
670 | memcpy (SDpnt->vendor, scsi_result + 8, 8)(__builtin_constant_p(8) ? __constant_memcpy((SDpnt->vendor ),(scsi_result + 8),(8)) : __memcpy((SDpnt->vendor),(scsi_result + 8),(8))); | |||
671 | memcpy (SDpnt->model, scsi_result + 16, 16)(__builtin_constant_p(16) ? __constant_memcpy((SDpnt->model ),(scsi_result + 16),(16)) : __memcpy((SDpnt->model),(scsi_result + 16),(16))); | |||
672 | memcpy (SDpnt->rev, scsi_result + 32, 4)(__builtin_constant_p(4) ? __constant_memcpy((SDpnt->rev), (scsi_result + 32),(4)) : __memcpy((SDpnt->rev),(scsi_result + 32),(4))); | |||
673 | ||||
674 | SDpnt->removable = (0x80 & scsi_result[1]) >> 7; | |||
675 | SDpnt->lockable = SDpnt->removable; | |||
676 | SDpnt->changed = 0; | |||
677 | SDpnt->access_count = 0; | |||
678 | SDpnt->busy = 0; | |||
679 | SDpnt->has_cmdblocks = 0; | |||
680 | /* | |||
681 | * Currently, all sequential devices are assumed to be tapes, all random | |||
682 | * devices disk, with the appropriate read only flags set for ROM / WORM | |||
683 | * treated as RO. | |||
684 | */ | |||
685 | switch (type = (scsi_result[0] & 0x1f)) { | |||
686 | case TYPE_TAPE0x01: | |||
687 | case TYPE_DISK0x00: | |||
688 | case TYPE_MOD0x07: | |||
689 | case TYPE_PROCESSOR0x03: | |||
690 | case TYPE_SCANNER0x06: | |||
691 | case TYPE_MEDIUM_CHANGER0x08: | |||
692 | SDpnt->writeable = 1; | |||
693 | break; | |||
694 | case TYPE_WORM0x04: | |||
695 | case TYPE_ROM0x05: | |||
696 | SDpnt->writeable = 0; | |||
697 | break; | |||
698 | default: | |||
699 | printk ("scsi: unknown type %d\n", type); | |||
700 | } | |||
701 | ||||
702 | SDpnt->single_lun = 0; | |||
703 | SDpnt->soft_reset = | |||
704 | (scsi_result[7] & 1) && ((scsi_result[3] & 7) == 2); | |||
705 | SDpnt->random = (type == TYPE_TAPE0x01) ? 0 : 1; | |||
706 | SDpnt->type = (type & 0x1f); | |||
707 | ||||
708 | print_inquiry (scsi_result); | |||
709 | ||||
710 | for (sdtpnt = scsi_devicelist; sdtpnt; | |||
711 | sdtpnt = sdtpnt->next) | |||
712 | if (sdtpnt->detect) | |||
713 | SDpnt->attached += | |||
714 | (*sdtpnt->detect) (SDpnt); | |||
715 | ||||
716 | SDpnt->scsi_level = scsi_result[2] & 0x07; | |||
717 | if (SDpnt->scsi_level >= 2 || | |||
718 | (SDpnt->scsi_level == 1 && | |||
719 | (scsi_result[3] & 0x0f) == 1)) | |||
720 | SDpnt->scsi_level++; | |||
721 | ||||
722 | /* | |||
723 | * Accommodate drivers that want to sleep when they should be in a polling | |||
724 | * loop. | |||
725 | */ | |||
726 | SDpnt->disconnect = 0; | |||
727 | ||||
728 | /* | |||
729 | * Get any flags for this device. | |||
730 | */ | |||
731 | bflags = get_device_flags (scsi_result); | |||
732 | ||||
733 | /* | |||
734 | * Set the tagged_queue flag for SCSI-II devices that purport to support | |||
735 | * tagged queuing in the INQUIRY data. | |||
736 | */ | |||
737 | SDpnt->tagged_queue = 0; | |||
738 | if ((SDpnt->scsi_level >= SCSI_23) && | |||
739 | (scsi_result[7] & 2) && | |||
740 | !(bflags & BLIST_NOTQ0x20)) { | |||
741 | SDpnt->tagged_supported = 1; | |||
742 | SDpnt->current_tag = 0; | |||
743 | } | |||
744 | ||||
745 | /* | |||
746 | * Some revisions of the Texel CD ROM drives have handshaking problems when | |||
747 | * used with the Seagate controllers. Before we know what type of device | |||
748 | * we're talking to, we assume it's borken and then change it here if it | |||
749 | * turns out that it isn't a TEXEL drive. | |||
750 | */ | |||
751 | if ((bflags & BLIST_BORKEN0x04) == 0) | |||
752 | SDpnt->borken = 0; | |||
753 | ||||
754 | /* | |||
755 | * If we want to only allow I/O to one of the luns attached to this device | |||
756 | * at a time, then we set this flag. | |||
757 | */ | |||
758 | if (bflags & BLIST_SINGLELUN0x10) | |||
759 | SDpnt->single_lun = 1; | |||
760 | ||||
761 | /* | |||
762 | * These devices need this "key" to unlock the devices so we can use it | |||
763 | */ | |||
764 | if ((bflags & BLIST_KEY0x08) != 0) { | |||
765 | printk ("Unlocked floptical drive.\n"); | |||
766 | SDpnt->lockable = 0; | |||
767 | scsi_cmd[0] = MODE_SENSE0x1a; | |||
768 | scsi_cmd[1] = (lun << 5) & 0xe0; | |||
769 | scsi_cmd[2] = 0x2e; | |||
770 | scsi_cmd[3] = 0; | |||
771 | scsi_cmd[4] = 0x2a; | |||
772 | scsi_cmd[5] = 0; | |||
773 | SCpnt->cmd_len = 0; | |||
774 | { | |||
775 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); | |||
776 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; | |||
777 | SCpnt->request.sem = &sem; | |||
778 | scsi_do_cmd (SCpnt, (void *) scsi_cmd, | |||
779 | (void *) scsi_result, 0x2a, | |||
780 | scan_scsis_done, SCSI_TIMEOUT(2*100), 3); | |||
781 | down (&sem); | |||
782 | } | |||
783 | } | |||
784 | /* Add this device to the linked list at the end */ | |||
785 | if (SDtail) | |||
786 | SDtail->next = SDpnt; | |||
787 | else | |||
788 | scsi_devices = SDpnt; | |||
789 | SDtail = SDpnt; | |||
790 | ||||
791 | SDpnt = (Scsi_Device *) scsi_init_malloc (sizeof (Scsi_Device), GFP_ATOMIC0x01); | |||
792 | *SDpnt2=SDpnt; | |||
793 | if (!SDpnt) | |||
794 | printk ("scsi: scan_scsis_single: Cannot malloc\n"); | |||
795 | ||||
796 | ||||
797 | /* | |||
798 | * Some scsi devices cannot be polled for lun != 0 due to firmware bugs | |||
799 | */ | |||
800 | if (bflags & BLIST_NOLUN0x01) | |||
801 | return 0; /* break; */ | |||
802 | ||||
803 | /* | |||
804 | * If this device is known to support sparse multiple units, override the | |||
805 | * other settings, and scan all of them. | |||
806 | */ | |||
807 | if (bflags & BLIST_SPARSELUN0x40) { | |||
808 | *max_dev_lun = 8; | |||
809 | *sparse_lun = 1; | |||
810 | return 1; | |||
811 | } | |||
812 | ||||
813 | /* | |||
814 | * If this device is known to support multiple units, override the other | |||
815 | * settings, and scan all of them. | |||
816 | */ | |||
817 | if (bflags & BLIST_FORCELUN0x02) { | |||
818 | *max_dev_lun = 8; | |||
819 | return 1; | |||
820 | } | |||
821 | ||||
822 | /* | |||
823 | * REGAL CDC-4X: avoid hang after LUN 4 | |||
824 | */ | |||
825 | if (bflags & BLIST_MAX5LUN0x80) { | |||
826 | *max_dev_lun = 5; | |||
827 | return 1; | |||
828 | } | |||
829 | ||||
830 | /* | |||
831 | * We assume the device can't handle lun!=0 if: - it reports scsi-0 (ANSI | |||
832 | * SCSI Revision 0) (old drives like MAXTOR XT-3280) or - it reports scsi-1 | |||
833 | * (ANSI SCSI Revision 1) and Response Data Format 0 | |||
834 | */ | |||
835 | if (((scsi_result[2] & 0x07) == 0) | |||
836 | || | |||
837 | ((scsi_result[2] & 0x07) == 1 && | |||
838 | (scsi_result[3] & 0x0f) == 0)) | |||
839 | return 0; | |||
840 | return 1; | |||
841 | } | |||
842 | ||||
843 | /* | |||
844 | * Flag bits for the internal_timeout array | |||
845 | */ | |||
846 | #define NORMAL_TIMEOUT0 0 | |||
847 | #define IN_ABORT1 1 | |||
848 | #define IN_RESET2 2 | |||
849 | #define IN_RESET24 4 | |||
850 | #define IN_RESET38 8 | |||
851 | ||||
852 | /* | |||
853 | * This is our time out function, called when the timer expires for a | |||
854 | * given host adapter. It will attempt to abort the currently executing | |||
855 | * command, that failing perform a kernel panic. | |||
856 | */ | |||
857 | ||||
858 | static void scsi_times_out (Scsi_Cmnd * SCpnt) | |||
859 | { | |||
860 | ||||
861 | switch (SCpnt->internal_timeout & (IN_ABORT1 | IN_RESET2 | IN_RESET24 | IN_RESET38)) | |||
862 | { | |||
863 | case NORMAL_TIMEOUT0: | |||
864 | { | |||
865 | #ifdef DEBUG_TIMEOUT | |||
866 | scsi_dump_status(); | |||
867 | #endif | |||
868 | } | |||
869 | ||||
870 | if (!scsi_abort (SCpnt, DID_TIME_OUT0x03)) | |||
871 | return; | |||
872 | case IN_ABORT1: | |||
873 | printk("SCSI host %d abort (pid %ld) timed out - resetting\n", | |||
874 | SCpnt->host->host_no, SCpnt->pid); | |||
875 | if (!scsi_reset (SCpnt, SCSI_RESET_ASYNCHRONOUS0x02)) | |||
876 | return; | |||
877 | case IN_RESET2: | |||
878 | case (IN_ABORT1 | IN_RESET2): | |||
879 | /* This might be controversial, but if there is a bus hang, | |||
880 | * you might conceivably want the machine up and running | |||
881 | * esp if you have an ide disk. | |||
882 | */ | |||
883 | printk("SCSI host %d channel %d reset (pid %ld) timed out - " | |||
884 | "trying harder\n", | |||
885 | SCpnt->host->host_no, SCpnt->channel, SCpnt->pid); | |||
886 | SCpnt->internal_timeout &= ~IN_RESET2; | |||
887 | SCpnt->internal_timeout |= IN_RESET24; | |||
888 | scsi_reset (SCpnt, | |||
889 | SCSI_RESET_ASYNCHRONOUS0x02 | SCSI_RESET_SUGGEST_BUS_RESET0x04); | |||
890 | return; | |||
891 | case IN_RESET24: | |||
892 | case (IN_ABORT1 | IN_RESET24): | |||
893 | /* Obviously the bus reset didn't work. | |||
894 | * Let's try even harder and call for an HBA reset. | |||
895 | * Maybe the HBA itself crashed and this will shake it loose. | |||
896 | */ | |||
897 | printk("SCSI host %d reset (pid %ld) timed out - trying to shake it loose\n", | |||
898 | SCpnt->host->host_no, SCpnt->pid); | |||
899 | SCpnt->internal_timeout &= ~(IN_RESET2 | IN_RESET24); | |||
900 | SCpnt->internal_timeout |= IN_RESET38; | |||
901 | scsi_reset (SCpnt, | |||
902 | SCSI_RESET_ASYNCHRONOUS0x02 | SCSI_RESET_SUGGEST_HOST_RESET0x08); | |||
903 | return; | |||
904 | ||||
905 | default: | |||
906 | printk("SCSI host %d reset (pid %ld) timed out again -\n", | |||
907 | SCpnt->host->host_no, SCpnt->pid); | |||
908 | printk("probably an unrecoverable SCSI bus or device hang.\n"); | |||
909 | return; | |||
910 | ||||
911 | } | |||
912 | ||||
913 | } | |||
914 | ||||
915 | ||||
916 | /* This function takes a quick look at a request, and decides if it | |||
917 | * can be queued now, or if there would be a stall while waiting for | |||
918 | * something else to finish. This routine assumes that interrupts are | |||
919 | * turned off when entering the routine. It is the responsibility | |||
920 | * of the calling code to ensure that this is the case. | |||
921 | */ | |||
922 | ||||
923 | Scsi_Cmnd * request_queueable (struct request * req, Scsi_Device * device) | |||
924 | { | |||
925 | Scsi_Cmnd * SCpnt = NULL((void *) 0); | |||
926 | int tablesize; | |||
927 | Scsi_Cmnd * found = NULL((void *) 0); | |||
928 | struct buffer_head * bh, *bhp; | |||
929 | ||||
930 | if (!device) | |||
931 | panic ("No device passed to request_queueable().\n"); | |||
932 | ||||
933 | if (req && req->rq_status == RQ_INACTIVE(-1)) | |||
934 | panic("Inactive in request_queueable"); | |||
935 | ||||
936 | /* | |||
937 | * Look for a free command block. If we have been instructed not to queue | |||
938 | * multiple commands to multi-lun devices, then check to see what else is | |||
939 | * going for this device first. | |||
940 | */ | |||
941 | ||||
942 | if (!device->single_lun) { | |||
943 | SCpnt = device->device_queue; | |||
944 | while(SCpnt){ | |||
945 | if(SCpnt->request.rq_status == RQ_INACTIVE(-1)) break; | |||
946 | SCpnt = SCpnt->device_next; | |||
947 | } | |||
948 | } else { | |||
949 | SCpnt = device->host->host_queue; | |||
950 | while(SCpnt){ | |||
951 | if(SCpnt->channel == device->channel | |||
952 | && SCpnt->target == device->id) { | |||
953 | if (SCpnt->lun == device->lun) { | |||
954 | if(found == NULL((void *) 0) | |||
955 | && SCpnt->request.rq_status == RQ_INACTIVE(-1)) | |||
956 | { | |||
957 | found=SCpnt; | |||
958 | } | |||
959 | } | |||
960 | if(SCpnt->request.rq_status != RQ_INACTIVE(-1)) { | |||
961 | /* | |||
962 | * I think that we should really limit things to one | |||
963 | * outstanding command per device - this is what tends | |||
964 | * to trip up buggy firmware. | |||
965 | */ | |||
966 | return NULL((void *) 0); | |||
967 | } | |||
968 | } | |||
969 | SCpnt = SCpnt->next; | |||
970 | } | |||
971 | SCpnt = found; | |||
972 | } | |||
973 | ||||
974 | if (!SCpnt) return NULL((void *) 0); | |||
975 | ||||
976 | if (SCSI_BLOCK(device->host)((device->host->block && host_active && device->host != host_active) || (device->host->can_queue && device->host->host_busy >= device->host ->can_queue))) return NULL((void *) 0); | |||
977 | ||||
978 | if (req) { | |||
979 | memcpy(&SCpnt->request, req, sizeof(struct request))(__builtin_constant_p(sizeof(struct request)) ? __constant_memcpy ((&SCpnt->request),(req),(sizeof(struct request))) : __memcpy ((&SCpnt->request),(req),(sizeof(struct request)))); | |||
980 | tablesize = device->host->sg_tablesize; | |||
981 | bhp = bh = req->bh; | |||
982 | if(!tablesize) bh = NULL((void *) 0); | |||
983 | /* Take a quick look through the table to see how big it is. | |||
984 | * We already have our copy of req, so we can mess with that | |||
985 | * if we want to. | |||
986 | */ | |||
987 | while(req->nr_sectors && bh){ | |||
988 | bhp = bhp->b_reqnext; | |||
989 | if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)((bh->b_data+bh->b_size) == bhp->b_data)) tablesize--; | |||
990 | req->nr_sectors -= bh->b_size >> 9; | |||
991 | req->sector += bh->b_size >> 9; | |||
992 | if(!tablesize) break; | |||
993 | bh = bhp; | |||
994 | } | |||
995 | if(req->nr_sectors && bh && bh->b_reqnext){ /* Any leftovers? */ | |||
996 | SCpnt->request.bhtail = bh; | |||
997 | req->bh = bh->b_reqnext; /* Divide request */ | |||
998 | bh->b_reqnext = NULL((void *) 0); | |||
999 | bh = req->bh; | |||
1000 | ||||
1001 | /* Now reset things so that req looks OK */ | |||
1002 | SCpnt->request.nr_sectors -= req->nr_sectors; | |||
1003 | req->current_nr_sectors = bh->b_size >> 9; | |||
1004 | req->buffer = bh->b_data; | |||
1005 | SCpnt->request.sem = NULL((void *) 0); /* Wait until whole thing done */ | |||
1006 | } else { | |||
1007 | req->rq_status = RQ_INACTIVE(-1); | |||
1008 | wake_up(&wait_for_request); | |||
1009 | } | |||
1010 | } else { | |||
1011 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; /* Busy, but no request */ | |||
1012 | SCpnt->request.sem = NULL((void *) 0); /* And no one is waiting for the device | |||
1013 | * either */ | |||
1014 | } | |||
1015 | ||||
1016 | SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ | |||
1017 | SCpnt->old_use_sg = 0; | |||
1018 | SCpnt->transfersize = 0; | |||
1019 | SCpnt->underflow = 0; | |||
1020 | SCpnt->cmd_len = 0; | |||
1021 | ||||
1022 | /* Since not everyone seems to set the device info correctly | |||
1023 | * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. | |||
1024 | */ | |||
1025 | SCpnt->channel = device->channel; | |||
1026 | SCpnt->lun = device->lun; | |||
1027 | SCpnt->target = device->id; | |||
1028 | ||||
1029 | return SCpnt; | |||
1030 | } | |||
1031 | ||||
1032 | /* This function returns a structure pointer that will be valid for | |||
1033 | * the device. The wait parameter tells us whether we should wait for | |||
1034 | * the unit to become free or not. We are also able to tell this routine | |||
1035 | * not to return a descriptor if the host is unable to accept any more | |||
1036 | * commands for the time being. We need to keep in mind that there is no | |||
1037 | * guarantee that the host remain not busy. Keep in mind the | |||
1038 | * request_queueable function also knows the internal allocation scheme | |||
1039 | * of the packets for each device | |||
1040 | */ | |||
1041 | ||||
1042 | Scsi_Cmnd * allocate_device (struct request ** reqp, Scsi_Device * device, | |||
1043 | int wait) | |||
1044 | { | |||
1045 | kdev_t dev; | |||
1046 | struct request * req = NULL((void *) 0); | |||
1047 | int tablesize; | |||
1048 | unsigned long flags; | |||
1049 | struct buffer_head * bh, *bhp; | |||
1050 | struct Scsi_Host * host; | |||
1051 | Scsi_Cmnd * SCpnt = NULL((void *) 0); | |||
1052 | Scsi_Cmnd * SCwait = NULL((void *) 0); | |||
1053 | Scsi_Cmnd * found = NULL((void *) 0); | |||
1054 | ||||
1055 | if (!device) | |||
1056 | panic ("No device passed to allocate_device().\n"); | |||
1057 | ||||
1058 | if (reqp) req = *reqp; | |||
1059 | ||||
1060 | /* See if this request has already been queued by an interrupt routine */ | |||
1061 | if (req) { | |||
1062 | if(req->rq_status == RQ_INACTIVE(-1)) return NULL((void *) 0); | |||
1063 | dev = req->rq_dev; | |||
1064 | } else | |||
1065 | dev = 0; /* unused */ | |||
1066 | ||||
1067 | host = device->host; | |||
1068 | ||||
1069 | if (intr_count && SCSI_BLOCK(host)((host->block && host_active && host != host_active ) || (host->can_queue && host->host_busy >= host ->can_queue))) return NULL((void *) 0); | |||
1070 | ||||
1071 | while (1==1){ | |||
1072 | if (!device->single_lun) { | |||
1073 | SCpnt = device->device_queue; | |||
1074 | while(SCpnt){ | |||
1075 | SCwait = SCpnt; | |||
1076 | if(SCpnt->request.rq_status == RQ_INACTIVE(-1)) break; | |||
1077 | SCpnt = SCpnt->device_next; | |||
1078 | } | |||
1079 | } else { | |||
1080 | SCpnt = device->host->host_queue; | |||
1081 | while(SCpnt){ | |||
1082 | if(SCpnt->channel == device->channel | |||
1083 | && SCpnt->target == device->id) { | |||
1084 | if (SCpnt->lun == device->lun) { | |||
1085 | SCwait = SCpnt; | |||
1086 | if(found == NULL((void *) 0) | |||
1087 | && SCpnt->request.rq_status == RQ_INACTIVE(-1)) | |||
1088 | { | |||
1089 | found=SCpnt; | |||
1090 | } | |||
1091 | } | |||
1092 | if(SCpnt->request.rq_status != RQ_INACTIVE(-1)) { | |||
1093 | /* | |||
1094 | * I think that we should really limit things to one | |||
1095 | * outstanding command per device - this is what tends | |||
1096 | * to trip up buggy firmware. | |||
1097 | */ | |||
1098 | found = NULL((void *) 0); | |||
1099 | break; | |||
1100 | } | |||
1101 | } | |||
1102 | SCpnt = SCpnt->next; | |||
1103 | } | |||
1104 | SCpnt = found; | |||
1105 | } | |||
1106 | ||||
1107 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1108 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1109 | /* See if this request has already been queued by an interrupt routine | |||
1110 | */ | |||
1111 | if (req && (req->rq_status == RQ_INACTIVE(-1) || req->rq_dev != dev)) { | |||
1112 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1113 | return NULL((void *) 0); | |||
1114 | } | |||
1115 | if (!SCpnt || SCpnt->request.rq_status != RQ_INACTIVE(-1)) /* Might have changed */ | |||
1116 | { | |||
1117 | #if 1 /* NEW CODE */ | |||
1118 | if (wait && SCwait && SCwait->request.rq_status != RQ_INACTIVE(-1)){ | |||
1119 | sleep_on(&device->device_wait); | |||
1120 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1121 | } else { | |||
1122 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1123 | if (!wait) return NULL((void *) 0); | |||
1124 | if (!SCwait) { | |||
1125 | printk("Attempt to allocate device channel %d," | |||
1126 | " target %d, lun %d\n", device->channel, | |||
1127 | device->id, device->lun); | |||
1128 | panic("No device found in allocate_device\n"); | |||
1129 | } | |||
1130 | } | |||
1131 | #else /* ORIGINAL CODE */ | |||
1132 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1133 | if(!wait) return NULL((void *) 0); | |||
1134 | if (!SCwait) { | |||
1135 | printk("Attempt to allocate device channel %d, target" | |||
1136 | " %d, lun %d\n", device->channel, device->id, | |||
1137 | device->lun); | |||
1138 | panic("No device found in allocate_device\n"); | |||
1139 | } | |||
1140 | SCSI_SLEEP(&device->device_wait,{ if ((SCwait->request.rq_status != (-1))) { struct wait_queue wait = { ((void *) 0), ((void *) 0)}; add_wait_queue(&device ->device_wait, &wait); for(;;) { if ((SCwait->request .rq_status != (-1))) { if (intr_count) panic("scsi: trying to call schedule() in interrupt" ", file %s, line %d.\n", "../linux/src/drivers/scsi/scsi.c", 1141); schedule(); } else break; } remove_wait_queue(&device ->device_wait, &wait); }; } | |||
1141 | (SCwait->request.rq_status != RQ_INACTIVE)){ if ((SCwait->request.rq_status != (-1))) { struct wait_queue wait = { ((void *) 0), ((void *) 0)}; add_wait_queue(&device ->device_wait, &wait); for(;;) { if ((SCwait->request .rq_status != (-1))) { if (intr_count) panic("scsi: trying to call schedule() in interrupt" ", file %s, line %d.\n", "../linux/src/drivers/scsi/scsi.c", 1141); schedule(); } else break; } remove_wait_queue(&device ->device_wait, &wait); }; }; | |||
1142 | #endif | |||
1143 | } else { | |||
1144 | if (req) { | |||
1145 | memcpy(&SCpnt->request, req, sizeof(struct request))(__builtin_constant_p(sizeof(struct request)) ? __constant_memcpy ((&SCpnt->request),(req),(sizeof(struct request))) : __memcpy ((&SCpnt->request),(req),(sizeof(struct request)))); | |||
1146 | tablesize = device->host->sg_tablesize; | |||
1147 | bhp = bh = req->bh; | |||
1148 | if(!tablesize) bh = NULL((void *) 0); | |||
1149 | /* Take a quick look through the table to see how big it is. | |||
1150 | * We already have our copy of req, so we can mess with that | |||
1151 | * if we want to. | |||
1152 | */ | |||
1153 | while(req->nr_sectors && bh){ | |||
1154 | bhp = bhp->b_reqnext; | |||
1155 | if(!bhp || !CONTIGUOUS_BUFFERS(bh,bhp)((bh->b_data+bh->b_size) == bhp->b_data)) tablesize--; | |||
1156 | req->nr_sectors -= bh->b_size >> 9; | |||
1157 | req->sector += bh->b_size >> 9; | |||
1158 | if(!tablesize) break; | |||
1159 | bh = bhp; | |||
1160 | } | |||
1161 | if(req->nr_sectors && bh && bh->b_reqnext){/* Any leftovers? */ | |||
1162 | SCpnt->request.bhtail = bh; | |||
1163 | req->bh = bh->b_reqnext; /* Divide request */ | |||
1164 | bh->b_reqnext = NULL((void *) 0); | |||
1165 | bh = req->bh; | |||
1166 | /* Now reset things so that req looks OK */ | |||
1167 | SCpnt->request.nr_sectors -= req->nr_sectors; | |||
1168 | req->current_nr_sectors = bh->b_size >> 9; | |||
1169 | req->buffer = bh->b_data; | |||
1170 | SCpnt->request.sem = NULL((void *) 0); /* Wait until whole thing done*/ | |||
1171 | } | |||
1172 | else | |||
1173 | { | |||
1174 | req->rq_status = RQ_INACTIVE(-1); | |||
1175 | *reqp = req->next; | |||
1176 | wake_up(&wait_for_request); | |||
1177 | } | |||
1178 | } else { | |||
1179 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; | |||
1180 | SCpnt->request.sem = NULL((void *) 0); /* And no one is waiting for this | |||
1181 | * to complete */ | |||
1182 | } | |||
1183 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1184 | break; | |||
1185 | } | |||
1186 | } | |||
1187 | ||||
1188 | SCpnt->use_sg = 0; /* Reset the scatter-gather flag */ | |||
1189 | SCpnt->old_use_sg = 0; | |||
1190 | SCpnt->transfersize = 0; /* No default transfer size */ | |||
1191 | SCpnt->cmd_len = 0; | |||
1192 | ||||
1193 | SCpnt->underflow = 0; /* Do not flag underflow conditions */ | |||
1194 | ||||
1195 | /* Since not everyone seems to set the device info correctly | |||
1196 | * before Scsi_Cmnd gets send out to scsi_do_command, we do it here. | |||
1197 | */ | |||
1198 | SCpnt->channel = device->channel; | |||
1199 | SCpnt->lun = device->lun; | |||
1200 | SCpnt->target = device->id; | |||
1201 | ||||
1202 | return SCpnt; | |||
1203 | } | |||
1204 | ||||
1205 | /* | |||
1206 | * This is inline because we have stack problemes if we recurse to deeply. | |||
1207 | */ | |||
1208 | ||||
1209 | inlineinline __attribute__((always_inline)) void internal_cmnd (Scsi_Cmnd * SCpnt) | |||
1210 | { | |||
1211 | unsigned long flags, timeout; | |||
1212 | struct Scsi_Host * host; | |||
1213 | #ifdef DEBUG_DELAY | |||
1214 | unsigned long clock; | |||
1215 | #endif | |||
1216 | ||||
1217 | #if DEBUG | |||
1218 | unsigned long *ret = 0; | |||
1219 | #ifdef __mips__ | |||
1220 | __asm__ __volatile__ ("move\t%0,$31":"=r"(ret)); | |||
1221 | #else | |||
1222 | ret = __builtin_return_address(0); | |||
1223 | #endif | |||
1224 | #endif | |||
1225 | ||||
1226 | host = SCpnt->host; | |||
1227 | ||||
1228 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1229 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1230 | /* Assign a unique nonzero serial_number. */ | |||
1231 | if (++serial_number == 0) serial_number = 1; | |||
1232 | SCpnt->serial_number = serial_number; | |||
1233 | ||||
1234 | /* | |||
1235 | * We will wait MIN_RESET_DELAY clock ticks after the last reset so | |||
1236 | * we can avoid the drive not being ready. | |||
1237 | */ | |||
1238 | timeout = host->last_reset + MIN_RESET_DELAY(2*100); | |||
1239 | if (jiffies < timeout) { | |||
1240 | int ticks_remaining = timeout - jiffies; | |||
1241 | /* | |||
1242 | * NOTE: This may be executed from within an interrupt | |||
1243 | * handler! This is bad, but for now, it'll do. The irq | |||
1244 | * level of the interrupt handler has been masked out by the | |||
1245 | * platform dependent interrupt handling code already, so the | |||
1246 | * sti() here will not cause another call to the SCSI host's | |||
1247 | * interrupt handler (assuming there is one irq-level per | |||
1248 | * host). | |||
1249 | */ | |||
1250 | sti()__asm__ __volatile__ ("sti": : :"memory"); | |||
1251 | while (--ticks_remaining >= 0) udelay(1000000/HZ)(__builtin_constant_p(1000000/100) ? __const_udelay((1000000/ 100) * 0x10c6ul) : __udelay(1000000/100)); | |||
1252 | host->last_reset = jiffies - MIN_RESET_DELAY(2*100); | |||
1253 | } | |||
1254 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1255 | ||||
1256 | update_timeout(SCpnt, SCpnt->timeout_per_command); | |||
1257 | ||||
1258 | /* | |||
1259 | * We will use a queued command if possible, otherwise we will emulate the | |||
1260 | * queuing and calling of completion function ourselves. | |||
1261 | */ | |||
1262 | #ifdef DEBUG | |||
1263 | printk("internal_cmnd (host = %d, channel = %d, target = %d, " | |||
1264 | "command = %p, buffer = %p, \nbufflen = %d, done = %p)\n", | |||
1265 | SCpnt->host->host_no, SCpnt->channel, SCpnt->target, SCpnt->cmnd, | |||
1266 | SCpnt->buffer, SCpnt->bufflen, SCpnt->done); | |||
1267 | #endif | |||
1268 | ||||
1269 | if (host->can_queue) | |||
1270 | { | |||
1271 | #ifdef DEBUG | |||
1272 | printk("queuecommand : routine at %p\n", | |||
1273 | host->hostt->queuecommand); | |||
1274 | #endif | |||
1275 | /* This locking tries to prevent all sorts of races between | |||
1276 | * queuecommand and the interrupt code. In effect, | |||
1277 | * we are only allowed to be in queuecommand once at | |||
1278 | * any given time, and we can only be in the interrupt | |||
1279 | * handler and the queuecommand function at the same time | |||
1280 | * when queuecommand is called while servicing the | |||
1281 | * interrupt. | |||
1282 | */ | |||
1283 | ||||
1284 | if(!intr_count && SCpnt->host->irq) | |||
1285 | disable_irq(SCpnt->host->irq); | |||
1286 | ||||
1287 | host->hostt->queuecommand (SCpnt, scsi_done); | |||
1288 | ||||
1289 | if(!intr_count && SCpnt->host->irq) | |||
1290 | enable_irq(SCpnt->host->irq); | |||
1291 | } | |||
1292 | else | |||
1293 | { | |||
1294 | int temp; | |||
1295 | ||||
1296 | #ifdef DEBUG | |||
1297 | printk("command() : routine at %p\n", host->hostt->command); | |||
1298 | #endif | |||
1299 | temp = host->hostt->command (SCpnt); | |||
1300 | SCpnt->result = temp; | |||
1301 | #ifdef DEBUG_DELAY | |||
1302 | clock = jiffies + 4 * HZ100; | |||
1303 | while (jiffies < clock) barrier()__asm__ __volatile__("": : :"memory"); | |||
1304 | printk("done(host = %d, result = %04x) : routine at %p\n", | |||
1305 | host->host_no, temp, host->hostt->command); | |||
1306 | #endif | |||
1307 | scsi_done(SCpnt); | |||
1308 | } | |||
1309 | #ifdef DEBUG | |||
1310 | printk("leaving internal_cmnd()\n"); | |||
1311 | #endif | |||
1312 | } | |||
1313 | ||||
1314 | static void scsi_request_sense (Scsi_Cmnd * SCpnt) | |||
1315 | { | |||
1316 | unsigned long flags; | |||
1317 | ||||
1318 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1319 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1320 | SCpnt->flags |= WAS_SENSE0x04 | ASKED_FOR_SENSE0x20; | |||
1321 | update_timeout(SCpnt, SENSE_TIMEOUT(5*100/10)); | |||
1322 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1323 | ||||
1324 | ||||
1325 | memcpy ((void *) SCpnt->cmnd , (void *) generic_sense,(__builtin_constant_p(sizeof(generic_sense)) ? __constant_memcpy (((void *) SCpnt->cmnd),((void *) generic_sense),(sizeof(generic_sense ))) : __memcpy(((void *) SCpnt->cmnd),((void *) generic_sense ),(sizeof(generic_sense)))) | |||
1326 | sizeof(generic_sense))(__builtin_constant_p(sizeof(generic_sense)) ? __constant_memcpy (((void *) SCpnt->cmnd),((void *) generic_sense),(sizeof(generic_sense ))) : __memcpy(((void *) SCpnt->cmnd),((void *) generic_sense ),(sizeof(generic_sense)))); | |||
1327 | ||||
1328 | SCpnt->cmnd[1] = SCpnt->lun << 5; | |||
1329 | SCpnt->cmnd[4] = sizeof(SCpnt->sense_buffer); | |||
1330 | ||||
1331 | SCpnt->request_buffer = &SCpnt->sense_buffer; | |||
1332 | SCpnt->request_bufflen = sizeof(SCpnt->sense_buffer); | |||
1333 | SCpnt->use_sg = 0; | |||
1334 | SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0])scsi_command_size[((SCpnt->cmnd[0]) >> 5) & 7]; | |||
1335 | internal_cmnd (SCpnt); | |||
1336 | } | |||
1337 | ||||
1338 | ||||
1339 | ||||
1340 | /* | |||
1341 | * scsi_do_cmd sends all the commands out to the low-level driver. It | |||
1342 | * handles the specifics required for each low level driver - ie queued | |||
1343 | * or non queued. It also prevents conflicts when different high level | |||
1344 | * drivers go for the same host at the same time. | |||
1345 | */ | |||
1346 | ||||
1347 | void scsi_do_cmd (Scsi_Cmnd * SCpnt, const void *cmnd , | |||
1348 | void *buffer, unsigned bufflen, void (*done)(Scsi_Cmnd *), | |||
1349 | int timeout, int retries) | |||
1350 | { | |||
1351 | unsigned long flags; | |||
1352 | struct Scsi_Host * host = SCpnt->host; | |||
1353 | ||||
1354 | #ifdef DEBUG | |||
1355 | { | |||
1356 | int i; | |||
1357 | int target = SCpnt->target; | |||
1358 | printk ("scsi_do_cmd (host = %d, channel = %d target = %d, " | |||
1359 | "buffer =%p, bufflen = %d, done = %p, timeout = %d, " | |||
1360 | "retries = %d)\n" | |||
1361 | "command : " , host->host_no, SCpnt->channel, target, buffer, | |||
1362 | bufflen, done, timeout, retries); | |||
1363 | for (i = 0; i < 10; ++i) | |||
1364 | printk ("%02x ", ((unsigned char *) cmnd)[i]); | |||
1365 | printk("\n"); | |||
1366 | } | |||
1367 | #endif | |||
1368 | ||||
1369 | if (!host) | |||
1370 | { | |||
1371 | panic ("Invalid or not present host.\n"); | |||
1372 | } | |||
1373 | ||||
1374 | ||||
1375 | /* | |||
1376 | * We must prevent reentrancy to the lowlevel host driver. This prevents | |||
1377 | * it - we enter a loop until the host we want to talk to is not busy. | |||
1378 | * Race conditions are prevented, as interrupts are disabled in between the | |||
1379 | * time we check for the host being not busy, and the time we mark it busy | |||
1380 | * ourselves. | |||
1381 | */ | |||
1382 | ||||
1383 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1384 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1385 | SCpnt->pid = scsi_pid++; | |||
1386 | ||||
1387 | while (SCSI_BLOCK(host)((host->block && host_active && host != host_active ) || (host->can_queue && host->host_busy >= host ->can_queue))) { | |||
1388 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1389 | SCSI_SLEEP(&host->host_wait, SCSI_BLOCK(host)){ if (((host->block && host_active && host != host_active) || (host->can_queue && host->host_busy >= host->can_queue))) { struct wait_queue wait = { ((void *) 0), ((void *) 0)}; add_wait_queue(&host->host_wait , &wait); for(;;) { if (((host->block && host_active && host != host_active) || (host->can_queue && host->host_busy >= host->can_queue))) { if (intr_count ) panic("scsi: trying to call schedule() in interrupt" ", file %s, line %d.\n" , "../linux/src/drivers/scsi/scsi.c", 1389); schedule(); } else break; } remove_wait_queue(&host->host_wait, &wait ); }; }; | |||
1390 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1391 | } | |||
1392 | ||||
1393 | if (host->block) host_active = host; | |||
1394 | ||||
1395 | host->host_busy++; | |||
1396 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1397 | ||||
1398 | /* | |||
1399 | * Our own function scsi_done (which marks the host as not busy, disables | |||
1400 | * the timeout counter, etc) will be called by us or by the | |||
1401 | * scsi_hosts[host].queuecommand() function needs to also call | |||
1402 | * the completion function for the high level driver. | |||
1403 | */ | |||
1404 | ||||
1405 | memcpy ((void *) SCpnt->data_cmnd , (const void *) cmnd, 12)(__builtin_constant_p(12) ? __constant_memcpy(((void *) SCpnt ->data_cmnd),((const void *) cmnd),(12)) : __memcpy(((void *) SCpnt->data_cmnd),((const void *) cmnd),(12))); | |||
1406 | #if 0 | |||
1407 | SCpnt->host = host; | |||
1408 | SCpnt->channel = channel; | |||
1409 | SCpnt->target = target; | |||
1410 | SCpnt->lun = (SCpnt->data_cmnd[1] >> 5); | |||
1411 | #endif | |||
1412 | SCpnt->reset_chain = NULL((void *) 0); | |||
1413 | SCpnt->serial_number = 0; | |||
1414 | SCpnt->bufflen = bufflen; | |||
1415 | SCpnt->buffer = buffer; | |||
1416 | SCpnt->flags = 0; | |||
1417 | SCpnt->retries = 0; | |||
1418 | SCpnt->allowed = retries; | |||
1419 | SCpnt->done = done; | |||
1420 | SCpnt->timeout_per_command = timeout; | |||
1421 | ||||
1422 | memcpy ((void *) SCpnt->cmnd , (const void *) cmnd, 12)(__builtin_constant_p(12) ? __constant_memcpy(((void *) SCpnt ->cmnd),((const void *) cmnd),(12)) : __memcpy(((void *) SCpnt ->cmnd),((const void *) cmnd),(12))); | |||
1423 | /* Zero the sense buffer. Some host adapters automatically request | |||
1424 | * sense on error. 0 is not a valid sense code. | |||
1425 | */ | |||
1426 | memset ((void *) SCpnt->sense_buffer, 0, sizeof SCpnt->sense_buffer)(__builtin_constant_p(0) ? (__builtin_constant_p((sizeof SCpnt ->sense_buffer)) ? __constant_c_and_count_memset((((void * ) SCpnt->sense_buffer)),((0x01010101UL*(unsigned char)(0)) ),((sizeof SCpnt->sense_buffer))) : __constant_c_memset((( (void *) SCpnt->sense_buffer)),((0x01010101UL*(unsigned char )(0))),((sizeof SCpnt->sense_buffer)))) : (__builtin_constant_p ((sizeof SCpnt->sense_buffer)) ? __memset_generic(((((void *) SCpnt->sense_buffer))),(((0))),(((sizeof SCpnt->sense_buffer )))) : __memset_generic((((void *) SCpnt->sense_buffer)),( (0)),((sizeof SCpnt->sense_buffer))))); | |||
1427 | SCpnt->request_buffer = buffer; | |||
1428 | SCpnt->request_bufflen = bufflen; | |||
1429 | SCpnt->old_use_sg = SCpnt->use_sg; | |||
1430 | if (SCpnt->cmd_len == 0) | |||
1431 | SCpnt->cmd_len = COMMAND_SIZE(SCpnt->cmnd[0])scsi_command_size[((SCpnt->cmnd[0]) >> 5) & 7]; | |||
1432 | SCpnt->old_cmd_len = SCpnt->cmd_len; | |||
1433 | ||||
1434 | /* Start the timer ticking. */ | |||
1435 | ||||
1436 | SCpnt->internal_timeout = NORMAL_TIMEOUT0; | |||
1437 | SCpnt->abort_reason = 0; | |||
1438 | internal_cmnd (SCpnt); | |||
1439 | ||||
1440 | #ifdef DEBUG | |||
1441 | printk ("Leaving scsi_do_cmd()\n"); | |||
1442 | #endif | |||
1443 | } | |||
1444 | ||||
1445 | static int check_sense (Scsi_Cmnd * SCpnt) | |||
1446 | { | |||
1447 | /* If there is no sense information, request it. If we have already | |||
1448 | * requested it, there is no point in asking again - the firmware must | |||
1449 | * be confused. | |||
1450 | */ | |||
1451 | if (((SCpnt->sense_buffer[0] & 0x70) >> 4) != 7) { | |||
1452 | if(!(SCpnt->flags & ASKED_FOR_SENSE0x20)) | |||
1453 | return SUGGEST_SENSE0x80; | |||
1454 | else | |||
1455 | return SUGGEST_RETRY0x10; | |||
1456 | } | |||
1457 | ||||
1458 | SCpnt->flags &= ~ASKED_FOR_SENSE0x20; | |||
1459 | ||||
1460 | #ifdef DEBUG_INIT | |||
1461 | printk("scsi%d, channel%d : ", SCpnt->host->host_no, SCpnt->channel); | |||
1462 | print_sense("", SCpnt); | |||
1463 | printk("\n"); | |||
1464 | #endif | |||
1465 | if (SCpnt->sense_buffer[2] & 0xe0) | |||
1466 | return SUGGEST_ABORT0x20; | |||
1467 | ||||
1468 | switch (SCpnt->sense_buffer[2] & 0xf) | |||
1469 | { | |||
1470 | case NO_SENSE0x00: | |||
1471 | return 0; | |||
1472 | case RECOVERED_ERROR0x01: | |||
1473 | return SUGGEST_IS_OK0xff; | |||
1474 | ||||
1475 | case ABORTED_COMMAND0x0b: | |||
1476 | return SUGGEST_RETRY0x10; | |||
1477 | case NOT_READY0x02: | |||
1478 | case UNIT_ATTENTION0x06: | |||
1479 | /* | |||
1480 | * If we are expecting a CC/UA because of a bus reset that we | |||
1481 | * performed, treat this just as a retry. Otherwise this is | |||
1482 | * information that we should pass up to the upper-level driver | |||
1483 | * so that we can deal with it there. | |||
1484 | */ | |||
1485 | if( SCpnt->device->expecting_cc_ua ) | |||
1486 | { | |||
1487 | SCpnt->device->expecting_cc_ua = 0; | |||
1488 | return SUGGEST_RETRY0x10; | |||
1489 | } | |||
1490 | return SUGGEST_ABORT0x20; | |||
1491 | ||||
1492 | /* these three are not supported */ | |||
1493 | case COPY_ABORTED0x0a: | |||
1494 | case VOLUME_OVERFLOW0x0d: | |||
1495 | case MISCOMPARE0x0e: | |||
1496 | ||||
1497 | case MEDIUM_ERROR0x03: | |||
1498 | return SUGGEST_REMAP0x30; | |||
1499 | case BLANK_CHECK0x08: | |||
1500 | case DATA_PROTECT0x07: | |||
1501 | case HARDWARE_ERROR0x04: | |||
1502 | case ILLEGAL_REQUEST0x05: | |||
1503 | default: | |||
1504 | return SUGGEST_ABORT0x20; | |||
1505 | } | |||
1506 | } | |||
1507 | ||||
1508 | /* This function is the mid-level interrupt routine, which decides how | |||
1509 | * to handle error conditions. Each invocation of this function must | |||
1510 | * do one and *only* one of the following: | |||
1511 | * | |||
1512 | * (1) Call last_cmnd[host].done. This is done for fatal errors and | |||
1513 | * normal completion, and indicates that the handling for this | |||
1514 | * request is complete. | |||
1515 | * (2) Call internal_cmnd to requeue the command. This will result in | |||
1516 | * scsi_done being called again when the retry is complete. | |||
1517 | * (3) Call scsi_request_sense. This asks the host adapter/drive for | |||
1518 | * more information about the error condition. When the information | |||
1519 | * is available, scsi_done will be called again. | |||
1520 | * (4) Call reset(). This is sort of a last resort, and the idea is that | |||
1521 | * this may kick things loose and get the drive working again. reset() | |||
1522 | * automatically calls scsi_request_sense, and thus scsi_done will be | |||
1523 | * called again once the reset is complete. | |||
1524 | * | |||
1525 | * If none of the above actions are taken, the drive in question | |||
1526 | * will hang. If more than one of the above actions are taken by | |||
1527 | * scsi_done, then unpredictable behavior will result. | |||
1528 | */ | |||
1529 | static void scsi_done (Scsi_Cmnd * SCpnt) | |||
1530 | { | |||
1531 | int status=0; | |||
1532 | int exit=0; | |||
1533 | int checked; | |||
1534 | int oldto; | |||
1535 | struct Scsi_Host * host = SCpnt->host; | |||
1536 | int result = SCpnt->result; | |||
1537 | SCpnt->serial_number = 0; | |||
1538 | oldto = update_timeout(SCpnt, 0); | |||
1539 | ||||
1540 | #ifdef DEBUG_TIMEOUT | |||
1541 | if(result) printk("Non-zero result in scsi_done %x %d:%d\n", | |||
1542 | result, SCpnt->target, SCpnt->lun); | |||
1543 | #endif | |||
1544 | ||||
1545 | /* If we requested an abort, (and we got it) then fix up the return | |||
1546 | * status to say why | |||
1547 | */ | |||
1548 | if(host_byte(result)(((result) >> 16) & 0xff) == DID_ABORT0x05 && SCpnt->abort_reason) | |||
1549 | SCpnt->result = result = (result & 0xff00ffff) | | |||
1550 | (SCpnt->abort_reason << 16); | |||
1551 | ||||
1552 | ||||
1553 | #define FINISHED 0 | |||
1554 | #define MAYREDO 1 | |||
1555 | #define REDO 3 | |||
1556 | #define PENDING 4 | |||
1557 | ||||
1558 | #ifdef DEBUG | |||
1559 | printk("In scsi_done(host = %d, result = %06x)\n", host->host_no, result); | |||
1560 | #endif | |||
1561 | ||||
1562 | if(SCpnt->flags & WAS_SENSE0x04) | |||
1563 | { | |||
1564 | SCpnt->use_sg = SCpnt->old_use_sg; | |||
1565 | SCpnt->cmd_len = SCpnt->old_cmd_len; | |||
1566 | } | |||
1567 | ||||
1568 | switch (host_byte(result)(((result) >> 16) & 0xff)) | |||
1569 | { | |||
1570 | case DID_OK0x00: | |||
1571 | if (status_byte(result)(((result) >> 1) & 0x1f) && (SCpnt->flags & WAS_SENSE0x04)) | |||
1572 | /* Failed to obtain sense information */ | |||
1573 | { | |||
1574 | SCpnt->flags &= ~WAS_SENSE0x04; | |||
1575 | #if 0 /* This cannot possibly be correct. */ | |||
1576 | SCpnt->internal_timeout &= ~SENSE_TIMEOUT(5*100/10); | |||
1577 | #endif | |||
1578 | ||||
1579 | if (!(SCpnt->flags & WAS_RESET0x01)) | |||
1580 | { | |||
1581 | printk("scsi%d : channel %d target %d lun %d request sense" | |||
1582 | " failed, performing reset.\n", | |||
1583 | SCpnt->host->host_no, SCpnt->channel, SCpnt->target, | |||
1584 | SCpnt->lun); | |||
1585 | scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS0x01); | |||
1586 | return; | |||
1587 | } | |||
1588 | else | |||
1589 | { | |||
1590 | exit = (DRIVER_HARD0x07 | SUGGEST_ABORT0x20); | |||
1591 | status = FINISHED; | |||
1592 | } | |||
1593 | } | |||
1594 | else switch(msg_byte(result)(((result) >> 8) & 0xff)) | |||
1595 | { | |||
1596 | case COMMAND_COMPLETE0x00: | |||
1597 | switch (status_byte(result)(((result) >> 1) & 0x1f)) | |||
1598 | { | |||
1599 | case GOOD0x00: | |||
1600 | if (SCpnt->flags & WAS_SENSE0x04) | |||
1601 | { | |||
1602 | #ifdef DEBUG | |||
1603 | printk ("In scsi_done, GOOD status, COMMAND COMPLETE, " | |||
1604 | "parsing sense information.\n"); | |||
1605 | #endif | |||
1606 | SCpnt->flags &= ~WAS_SENSE0x04; | |||
1607 | #if 0 /* This cannot possibly be correct. */ | |||
1608 | SCpnt->internal_timeout &= ~SENSE_TIMEOUT(5*100/10); | |||
1609 | #endif | |||
1610 | ||||
1611 | switch (checked = check_sense(SCpnt)) | |||
1612 | { | |||
1613 | case SUGGEST_SENSE0x80: | |||
1614 | case 0: | |||
1615 | #ifdef DEBUG | |||
1616 | printk("NO SENSE. status = REDO\n"); | |||
1617 | #endif | |||
1618 | update_timeout(SCpnt, oldto); | |||
1619 | status = REDO; | |||
1620 | break; | |||
1621 | case SUGGEST_IS_OK0xff: | |||
1622 | break; | |||
1623 | case SUGGEST_REMAP0x30: | |||
1624 | #ifdef DEBUG | |||
1625 | printk("SENSE SUGGEST REMAP - status = FINISHED\n"); | |||
1626 | #endif | |||
1627 | status = FINISHED; | |||
1628 | exit = DRIVER_SENSE0x08 | SUGGEST_ABORT0x20; | |||
1629 | break; | |||
1630 | case SUGGEST_RETRY0x10: | |||
1631 | #ifdef DEBUG | |||
1632 | printk("SENSE SUGGEST RETRY - status = MAYREDO\n"); | |||
1633 | #endif | |||
1634 | status = MAYREDO; | |||
1635 | exit = DRIVER_SENSE0x08 | SUGGEST_RETRY0x10; | |||
1636 | break; | |||
1637 | case SUGGEST_ABORT0x20: | |||
1638 | #ifdef DEBUG | |||
1639 | printk("SENSE SUGGEST ABORT - status = FINISHED"); | |||
1640 | #endif | |||
1641 | status = FINISHED; | |||
1642 | exit = DRIVER_SENSE0x08 | SUGGEST_ABORT0x20; | |||
1643 | break; | |||
1644 | default: | |||
1645 | printk ("Internal error %s %d \n", __FILE__"../linux/src/drivers/scsi/scsi.c", | |||
1646 | __LINE__1646); | |||
1647 | } | |||
1648 | } /* end WAS_SENSE */ | |||
1649 | else | |||
1650 | { | |||
1651 | #ifdef DEBUG | |||
1652 | printk("COMMAND COMPLETE message returned, " | |||
1653 | "status = FINISHED. \n"); | |||
1654 | #endif | |||
1655 | exit = DRIVER_OK0x00; | |||
1656 | status = FINISHED; | |||
1657 | } | |||
1658 | break; | |||
1659 | ||||
1660 | case CHECK_CONDITION0x01: | |||
1661 | case COMMAND_TERMINATED0x11: | |||
1662 | switch (check_sense(SCpnt)) | |||
1663 | { | |||
1664 | case 0: | |||
1665 | update_timeout(SCpnt, oldto); | |||
1666 | status = REDO; | |||
1667 | break; | |||
1668 | case SUGGEST_REMAP0x30: | |||
1669 | status = FINISHED; | |||
1670 | exit = DRIVER_SENSE0x08 | SUGGEST_ABORT0x20; | |||
1671 | break; | |||
1672 | case SUGGEST_RETRY0x10: | |||
1673 | status = MAYREDO; | |||
1674 | exit = DRIVER_SENSE0x08 | SUGGEST_RETRY0x10; | |||
1675 | break; | |||
1676 | case SUGGEST_ABORT0x20: | |||
1677 | status = FINISHED; | |||
1678 | exit = DRIVER_SENSE0x08 | SUGGEST_ABORT0x20; | |||
1679 | break; | |||
1680 | case SUGGEST_SENSE0x80: | |||
1681 | scsi_request_sense (SCpnt); | |||
1682 | status = PENDING; | |||
1683 | break; | |||
1684 | } | |||
1685 | break; | |||
1686 | ||||
1687 | case CONDITION_GOOD0x02: | |||
1688 | case INTERMEDIATE_GOOD0x08: | |||
1689 | case INTERMEDIATE_C_GOOD0x0a: | |||
1690 | break; | |||
1691 | ||||
1692 | case BUSY0x04: | |||
1693 | case QUEUE_FULL0x14: | |||
1694 | update_timeout(SCpnt, oldto); | |||
1695 | status = REDO; | |||
1696 | break; | |||
1697 | ||||
1698 | case RESERVATION_CONFLICT0x0c: | |||
1699 | printk("scsi%d, channel %d : RESERVATION CONFLICT performing" | |||
1700 | " reset.\n", SCpnt->host->host_no, SCpnt->channel); | |||
1701 | scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS0x01); | |||
1702 | return; | |||
1703 | #if 0 | |||
1704 | exit = DRIVER_SOFT0x02 | SUGGEST_ABORT0x20; | |||
1705 | status = MAYREDO; | |||
1706 | break; | |||
1707 | #endif | |||
1708 | default: | |||
1709 | printk ("Internal error %s %d \n" | |||
1710 | "status byte = %d \n", __FILE__"../linux/src/drivers/scsi/scsi.c", | |||
1711 | __LINE__1711, status_byte(result)(((result) >> 1) & 0x1f)); | |||
1712 | ||||
1713 | } | |||
1714 | break; | |||
1715 | default: | |||
1716 | panic("scsi: unsupported message byte %d received\n", | |||
1717 | msg_byte(result)(((result) >> 8) & 0xff)); | |||
1718 | } | |||
1719 | break; | |||
1720 | case DID_TIME_OUT0x03: | |||
1721 | #ifdef DEBUG | |||
1722 | printk("Host returned DID_TIME_OUT - "); | |||
1723 | #endif | |||
1724 | ||||
1725 | if (SCpnt->flags & WAS_TIMEDOUT0x02) | |||
1726 | { | |||
1727 | #ifdef DEBUG | |||
1728 | printk("Aborting\n"); | |||
1729 | #endif | |||
1730 | /* | |||
1731 | Allow TEST_UNIT_READY and INQUIRY commands to timeout early | |||
1732 | without causing resets. All other commands should be retried. | |||
1733 | */ | |||
1734 | if (SCpnt->cmnd[0] != TEST_UNIT_READY0x00 && | |||
1735 | SCpnt->cmnd[0] != INQUIRY0x12) | |||
1736 | status = MAYREDO; | |||
1737 | exit = (DRIVER_TIMEOUT0x06 | SUGGEST_ABORT0x20); | |||
1738 | } | |||
1739 | else | |||
1740 | { | |||
1741 | #ifdef DEBUG | |||
1742 | printk ("Retrying.\n"); | |||
1743 | #endif | |||
1744 | SCpnt->flags |= WAS_TIMEDOUT0x02; | |||
1745 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
1746 | status = REDO; | |||
1747 | } | |||
1748 | break; | |||
1749 | case DID_BUS_BUSY0x02: | |||
1750 | case DID_PARITY0x06: | |||
1751 | status = REDO; | |||
1752 | break; | |||
1753 | case DID_NO_CONNECT0x01: | |||
1754 | #ifdef DEBUG | |||
1755 | printk("Couldn't connect.\n"); | |||
1756 | #endif | |||
1757 | exit = (DRIVER_HARD0x07 | SUGGEST_ABORT0x20); | |||
1758 | break; | |||
1759 | case DID_ERROR0x07: | |||
1760 | status = MAYREDO; | |||
1761 | exit = (DRIVER_HARD0x07 | SUGGEST_ABORT0x20); | |||
1762 | break; | |||
1763 | case DID_BAD_TARGET0x04: | |||
1764 | case DID_ABORT0x05: | |||
1765 | exit = (DRIVER_INVALID0x05 | SUGGEST_ABORT0x20); | |||
1766 | break; | |||
1767 | case DID_RESET0x08: | |||
1768 | if (SCpnt->flags & IS_RESETTING0x08) | |||
1769 | { | |||
1770 | SCpnt->flags &= ~IS_RESETTING0x08; | |||
1771 | status = REDO; | |||
1772 | break; | |||
1773 | } | |||
1774 | ||||
1775 | if(msg_byte(result)(((result) >> 8) & 0xff) == GOOD0x00 && | |||
1776 | status_byte(result)(((result) >> 1) & 0x1f) == CHECK_CONDITION0x01) { | |||
1777 | switch (check_sense(SCpnt)) { | |||
1778 | case 0: | |||
1779 | update_timeout(SCpnt, oldto); | |||
1780 | status = REDO; | |||
1781 | break; | |||
1782 | case SUGGEST_REMAP0x30: | |||
1783 | case SUGGEST_RETRY0x10: | |||
1784 | status = MAYREDO; | |||
1785 | exit = DRIVER_SENSE0x08 | SUGGEST_RETRY0x10; | |||
1786 | break; | |||
1787 | case SUGGEST_ABORT0x20: | |||
1788 | status = FINISHED; | |||
1789 | exit = DRIVER_SENSE0x08 | SUGGEST_ABORT0x20; | |||
1790 | break; | |||
1791 | case SUGGEST_SENSE0x80: | |||
1792 | scsi_request_sense (SCpnt); | |||
1793 | status = PENDING; | |||
1794 | break; | |||
1795 | } | |||
1796 | } else { | |||
1797 | status=REDO; | |||
1798 | exit = SUGGEST_RETRY0x10; | |||
1799 | } | |||
1800 | break; | |||
1801 | default : | |||
1802 | exit = (DRIVER_ERROR0x04 | SUGGEST_DIE0x40); | |||
1803 | } | |||
1804 | ||||
1805 | switch (status) | |||
1806 | { | |||
1807 | case FINISHED: | |||
1808 | case PENDING: | |||
1809 | break; | |||
1810 | case MAYREDO: | |||
1811 | #ifdef DEBUG | |||
1812 | printk("In MAYREDO, allowing %d retries, have %d\n", | |||
1813 | SCpnt->allowed, SCpnt->retries); | |||
1814 | #endif | |||
1815 | if ((++SCpnt->retries) < SCpnt->allowed) | |||
1816 | { | |||
1817 | if ((SCpnt->retries >= (SCpnt->allowed >> 1)) | |||
1818 | && !(SCpnt->host->last_reset > 0 && | |||
1819 | jiffies < SCpnt->host->last_reset + MIN_RESET_PERIOD(15*100)) | |||
1820 | && !(SCpnt->flags & WAS_RESET0x01)) | |||
1821 | { | |||
1822 | printk("scsi%d channel %d : resetting for second half of retries.\n", | |||
1823 | SCpnt->host->host_no, SCpnt->channel); | |||
1824 | scsi_reset(SCpnt, SCSI_RESET_SYNCHRONOUS0x01); | |||
1825 | break; | |||
1826 | } | |||
1827 | ||||
1828 | } | |||
1829 | else | |||
1830 | { | |||
1831 | status = FINISHED; | |||
1832 | break; | |||
1833 | } | |||
1834 | /* fall through to REDO */ | |||
1835 | ||||
1836 | case REDO: | |||
1837 | ||||
1838 | if (SCpnt->flags & WAS_SENSE0x04) | |||
1839 | scsi_request_sense(SCpnt); | |||
1840 | else | |||
1841 | { | |||
1842 | memcpy ((void *) SCpnt->cmnd,(__builtin_constant_p(sizeof(SCpnt->data_cmnd)) ? __constant_memcpy (((void *) SCpnt->cmnd),((void*) SCpnt->data_cmnd),(sizeof (SCpnt->data_cmnd))) : __memcpy(((void *) SCpnt->cmnd), ((void*) SCpnt->data_cmnd),(sizeof(SCpnt->data_cmnd)))) | |||
1843 | (void*) SCpnt->data_cmnd,(__builtin_constant_p(sizeof(SCpnt->data_cmnd)) ? __constant_memcpy (((void *) SCpnt->cmnd),((void*) SCpnt->data_cmnd),(sizeof (SCpnt->data_cmnd))) : __memcpy(((void *) SCpnt->cmnd), ((void*) SCpnt->data_cmnd),(sizeof(SCpnt->data_cmnd)))) | |||
1844 | sizeof(SCpnt->data_cmnd))(__builtin_constant_p(sizeof(SCpnt->data_cmnd)) ? __constant_memcpy (((void *) SCpnt->cmnd),((void*) SCpnt->data_cmnd),(sizeof (SCpnt->data_cmnd))) : __memcpy(((void *) SCpnt->cmnd), ((void*) SCpnt->data_cmnd),(sizeof(SCpnt->data_cmnd)))); | |||
1845 | SCpnt->request_buffer = SCpnt->buffer; | |||
1846 | SCpnt->request_bufflen = SCpnt->bufflen; | |||
1847 | SCpnt->use_sg = SCpnt->old_use_sg; | |||
1848 | SCpnt->cmd_len = SCpnt->old_cmd_len; | |||
1849 | internal_cmnd (SCpnt); | |||
1850 | } | |||
1851 | break; | |||
1852 | default: | |||
1853 | INTERNAL_ERROR(panic ("Internal error in file %s, line %d.\n", "../linux/src/drivers/scsi/scsi.c" , 1853)); | |||
1854 | } | |||
1855 | ||||
1856 | if (status == FINISHED) { | |||
1857 | #ifdef DEBUG | |||
1858 | printk("Calling done function - at address %p\n", SCpnt->done); | |||
1859 | #endif | |||
1860 | host->host_busy--; /* Indicate that we are free */ | |||
1861 | ||||
1862 | if (host->block && host->host_busy == 0) { | |||
1863 | host_active = NULL((void *) 0); | |||
1864 | ||||
1865 | /* For block devices "wake_up" is done in end_scsi_request */ | |||
1866 | if (MAJOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) >> 8) != SCSI_DISK_MAJOR8 && | |||
1867 | MAJOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) >> 8) != SCSI_CDROM_MAJOR11) { | |||
1868 | struct Scsi_Host * next; | |||
1869 | ||||
1870 | for (next = host->block; next != host; next = next->block) | |||
1871 | wake_up(&next->host_wait); | |||
1872 | } | |||
1873 | ||||
1874 | } | |||
1875 | ||||
1876 | wake_up(&host->host_wait); | |||
1877 | SCpnt->result = result | ((exit & 0xff) << 24); | |||
1878 | SCpnt->use_sg = SCpnt->old_use_sg; | |||
1879 | SCpnt->cmd_len = SCpnt->old_cmd_len; | |||
1880 | SCpnt->done (SCpnt); | |||
1881 | } | |||
1882 | ||||
1883 | #undef FINISHED | |||
1884 | #undef REDO | |||
1885 | #undef MAYREDO | |||
1886 | #undef PENDING | |||
1887 | } | |||
1888 | ||||
1889 | /* | |||
1890 | * The scsi_abort function interfaces with the abort() function of the host | |||
1891 | * we are aborting, and causes the current command to not complete. The | |||
1892 | * caller should deal with any error messages or status returned on the | |||
1893 | * next call. | |||
1894 | * | |||
1895 | * This will not be called reentrantly for a given host. | |||
1896 | */ | |||
1897 | ||||
1898 | /* | |||
1899 | * Since we're nice guys and specified that abort() and reset() | |||
1900 | * can be non-reentrant. The internal_timeout flags are used for | |||
1901 | * this. | |||
1902 | */ | |||
1903 | ||||
1904 | ||||
1905 | int scsi_abort (Scsi_Cmnd * SCpnt, int why) | |||
1906 | { | |||
1907 | int oldto; | |||
1908 | unsigned long flags; | |||
1909 | struct Scsi_Host * host = SCpnt->host; | |||
1910 | ||||
1911 | while(1) | |||
1912 | { | |||
1913 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1914 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1915 | ||||
1916 | /* | |||
1917 | * Protect against races here. If the command is done, or we are | |||
1918 | * on a different command forget it. | |||
1919 | */ | |||
1920 | if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) { | |||
1921 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1922 | return 0; | |||
1923 | } | |||
1924 | ||||
1925 | if (SCpnt->internal_timeout & IN_ABORT1) | |||
1926 | { | |||
1927 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1928 | while (SCpnt->internal_timeout & IN_ABORT1) | |||
1929 | barrier()__asm__ __volatile__("": : :"memory"); | |||
1930 | } | |||
1931 | else | |||
1932 | { | |||
1933 | SCpnt->internal_timeout |= IN_ABORT1; | |||
1934 | oldto = update_timeout(SCpnt, ABORT_TIMEOUT(5*100/10)); | |||
1935 | ||||
1936 | if ((SCpnt->flags & IS_RESETTING0x08) && SCpnt->device->soft_reset) { | |||
1937 | /* OK, this command must have died when we did the | |||
1938 | * reset. The device itself must have lied. | |||
1939 | */ | |||
1940 | printk("Stale command on %d %d:%d appears to have died when" | |||
1941 | " the bus was reset\n", | |||
1942 | SCpnt->channel, SCpnt->target, SCpnt->lun); | |||
1943 | } | |||
1944 | ||||
1945 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1946 | if (!host->host_busy) { | |||
1947 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
1948 | update_timeout(SCpnt, oldto); | |||
1949 | return 0; | |||
1950 | } | |||
1951 | printk("scsi : aborting command due to timeout : pid %lu, scsi%d," | |||
1952 | " channel %d, id %d, lun %d ", | |||
1953 | SCpnt->pid, SCpnt->host->host_no, (int) SCpnt->channel, | |||
1954 | (int) SCpnt->target, (int) SCpnt->lun); | |||
1955 | print_command (SCpnt->cmnd); | |||
1956 | if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) | |||
1957 | return 0; | |||
1958 | SCpnt->abort_reason = why; | |||
1959 | switch(host->hostt->abort(SCpnt)) { | |||
1960 | /* We do not know how to abort. Try waiting another | |||
1961 | * time increment and see if this helps. Set the | |||
1962 | * WAS_TIMEDOUT flag set so we do not try this twice | |||
1963 | */ | |||
1964 | case SCSI_ABORT_BUSY3: /* Tough call - returning 1 from | |||
1965 | * this is too severe | |||
1966 | */ | |||
1967 | case SCSI_ABORT_SNOOZE0: | |||
1968 | if(why == DID_TIME_OUT0x03) { | |||
1969 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1970 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1971 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
1972 | if(SCpnt->flags & WAS_TIMEDOUT0x02) { | |||
1973 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1974 | return 1; /* Indicate we cannot handle this. | |||
1975 | * We drop down into the reset handler | |||
1976 | * and try again | |||
1977 | */ | |||
1978 | } else { | |||
1979 | SCpnt->flags |= WAS_TIMEDOUT0x02; | |||
1980 | oldto = SCpnt->timeout_per_command; | |||
1981 | update_timeout(SCpnt, oldto); | |||
1982 | } | |||
1983 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1984 | } | |||
1985 | return 0; | |||
1986 | case SCSI_ABORT_PENDING2: | |||
1987 | if(why != DID_TIME_OUT0x03) { | |||
1988 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
1989 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
1990 | update_timeout(SCpnt, oldto); | |||
1991 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
1992 | } | |||
1993 | return 0; | |||
1994 | case SCSI_ABORT_SUCCESS1: | |||
1995 | /* We should have already aborted this one. No | |||
1996 | * need to adjust timeout | |||
1997 | */ | |||
1998 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
1999 | return 0; | |||
2000 | case SCSI_ABORT_NOT_RUNNING4: | |||
2001 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
2002 | update_timeout(SCpnt, 0); | |||
2003 | return 0; | |||
2004 | case SCSI_ABORT_ERROR5: | |||
2005 | default: | |||
2006 | SCpnt->internal_timeout &= ~IN_ABORT1; | |||
2007 | return 1; | |||
2008 | } | |||
2009 | } | |||
2010 | } | |||
2011 | } | |||
2012 | ||||
2013 | ||||
2014 | /* Mark a single SCSI Device as having been reset. */ | |||
2015 | ||||
2016 | static inlineinline __attribute__((always_inline)) void scsi_mark_device_reset(Scsi_Device *Device) | |||
2017 | { | |||
2018 | Device->was_reset = 1; | |||
2019 | Device->expecting_cc_ua = 1; | |||
2020 | } | |||
2021 | ||||
2022 | ||||
2023 | /* Mark all SCSI Devices on a specific Host as having been reset. */ | |||
2024 | ||||
2025 | void scsi_mark_host_reset(struct Scsi_Host *Host) | |||
2026 | { | |||
2027 | Scsi_Cmnd *SCpnt; | |||
2028 | for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
2029 | scsi_mark_device_reset(SCpnt->device); | |||
2030 | } | |||
2031 | ||||
2032 | ||||
2033 | /* Mark all SCSI Devices on a specific Host Bus as having been reset. */ | |||
2034 | ||||
2035 | void scsi_mark_bus_reset(struct Scsi_Host *Host, int channel) | |||
2036 | { | |||
2037 | Scsi_Cmnd *SCpnt; | |||
2038 | for (SCpnt = Host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
2039 | if (SCpnt->channel == channel) | |||
2040 | scsi_mark_device_reset(SCpnt->device); | |||
2041 | } | |||
2042 | ||||
2043 | ||||
2044 | int scsi_reset (Scsi_Cmnd * SCpnt, unsigned int reset_flags) | |||
2045 | { | |||
2046 | int temp; | |||
2047 | unsigned long flags; | |||
2048 | Scsi_Cmnd * SCpnt1; | |||
2049 | struct Scsi_Host * host = SCpnt->host; | |||
2050 | ||||
2051 | printk("SCSI bus is being reset for host %d channel %d.\n", | |||
2052 | host->host_no, SCpnt->channel); | |||
2053 | ||||
2054 | #if 0 | |||
2055 | /* | |||
2056 | * First of all, we need to make a recommendation to the low-level | |||
2057 | * driver as to whether a BUS_DEVICE_RESET should be performed, | |||
2058 | * or whether we should do a full BUS_RESET. There is no simple | |||
2059 | * algorithm here - we basically use a series of heuristics | |||
2060 | * to determine what we should do. | |||
2061 | */ | |||
2062 | SCpnt->host->suggest_bus_reset = FALSE0; | |||
2063 | ||||
2064 | /* | |||
2065 | * First see if all of the active devices on the bus have | |||
2066 | * been jammed up so that we are attempting resets. If so, | |||
2067 | * then suggest a bus reset. Forcing a bus reset could | |||
2068 | * result in some race conditions, but no more than | |||
2069 | * you would usually get with timeouts. We will cross | |||
2070 | * that bridge when we come to it. | |||
2071 | * | |||
2072 | * This is actually a pretty bad idea, since a sequence of | |||
2073 | * commands will often timeout together and this will cause a | |||
2074 | * Bus Device Reset followed immediately by a SCSI Bus Reset. | |||
2075 | * If all of the active devices really are jammed up, the | |||
2076 | * Bus Device Reset will quickly timeout and scsi_times_out | |||
2077 | * will follow up with a SCSI Bus Reset anyway. | |||
2078 | */ | |||
2079 | SCpnt1 = host->host_queue; | |||
2080 | while(SCpnt1) { | |||
2081 | if( SCpnt1->request.rq_status != RQ_INACTIVE(-1) | |||
2082 | && (SCpnt1->flags & (WAS_RESET0x01 | IS_RESETTING0x08)) == 0 ) | |||
2083 | break; | |||
2084 | SCpnt1 = SCpnt1->next; | |||
2085 | } | |||
2086 | if( SCpnt1 == NULL((void *) 0) ) { | |||
2087 | reset_flags |= SCSI_RESET_SUGGEST_BUS_RESET0x04; | |||
2088 | } | |||
2089 | ||||
2090 | /* | |||
2091 | * If the code that called us is suggesting a hard reset, then | |||
2092 | * definitely request it. This usually occurs because a | |||
2093 | * BUS_DEVICE_RESET times out. | |||
2094 | * | |||
2095 | * Passing reset_flags along takes care of this automatically. | |||
2096 | */ | |||
2097 | if( reset_flags & SCSI_RESET_SUGGEST_BUS_RESET0x04 ) { | |||
2098 | SCpnt->host->suggest_bus_reset = TRUE1; | |||
2099 | } | |||
2100 | #endif | |||
2101 | ||||
2102 | while (1) { | |||
2103 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2104 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2105 | ||||
2106 | /* | |||
2107 | * Protect against races here. If the command is done, or we are | |||
2108 | * on a different command forget it. | |||
2109 | */ | |||
2110 | if (reset_flags & SCSI_RESET_ASYNCHRONOUS0x02) | |||
2111 | if (SCpnt->serial_number != SCpnt->serial_number_at_timeout) { | |||
2112 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2113 | return 0; | |||
2114 | } | |||
2115 | ||||
2116 | if (SCpnt->internal_timeout & IN_RESET2) | |||
2117 | { | |||
2118 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2119 | while (SCpnt->internal_timeout & IN_RESET2) | |||
2120 | barrier()__asm__ __volatile__("": : :"memory"); | |||
2121 | } | |||
2122 | else | |||
2123 | { | |||
2124 | SCpnt->internal_timeout |= IN_RESET2; | |||
2125 | update_timeout(SCpnt, RESET_TIMEOUT(5*100/10)); | |||
2126 | ||||
2127 | if (host->host_busy) | |||
2128 | { | |||
2129 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2130 | SCpnt1 = host->host_queue; | |||
2131 | while(SCpnt1) { | |||
2132 | if (SCpnt1->request.rq_status != RQ_INACTIVE(-1)) { | |||
2133 | #if 0 | |||
2134 | if (!(SCpnt1->flags & IS_RESETTING0x08) && | |||
2135 | !(SCpnt1->internal_timeout & IN_ABORT1)) | |||
2136 | scsi_abort(SCpnt1, DID_RESET0x08); | |||
2137 | #endif | |||
2138 | SCpnt1->flags |= (WAS_RESET0x01 | IS_RESETTING0x08); | |||
2139 | } | |||
2140 | SCpnt1 = SCpnt1->next; | |||
2141 | } | |||
2142 | ||||
2143 | host->last_reset = jiffies; | |||
2144 | temp = host->hostt->reset(SCpnt, reset_flags); | |||
2145 | /* | |||
2146 | This test allows the driver to introduce an additional bus | |||
2147 | settle time delay by setting last_reset up to 20 seconds in | |||
2148 | the future. In the normal case where the driver does not | |||
2149 | modify last_reset, it must be assumed that the actual bus | |||
2150 | reset occurred immediately prior to the return to this code, | |||
2151 | and so last_reset must be updated to the current time, so | |||
2152 | that the delay in internal_cmnd will guarantee at least a | |||
2153 | MIN_RESET_DELAY bus settle time. | |||
2154 | */ | |||
2155 | if ((host->last_reset < jiffies) || | |||
2156 | (host->last_reset > (jiffies + 20 * HZ100))) | |||
2157 | host->last_reset = jiffies; | |||
2158 | } | |||
2159 | else | |||
2160 | { | |||
2161 | if (!host->block) host->host_busy++; | |||
2162 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2163 | host->last_reset = jiffies; | |||
2164 | SCpnt->flags |= (WAS_RESET0x01 | IS_RESETTING0x08); | |||
2165 | temp = host->hostt->reset(SCpnt, reset_flags); | |||
2166 | if ((host->last_reset < jiffies) || | |||
2167 | (host->last_reset > (jiffies + 20 * HZ100))) | |||
2168 | host->last_reset = jiffies; | |||
2169 | if (!host->block) host->host_busy--; | |||
2170 | } | |||
2171 | ||||
2172 | #ifdef DEBUG | |||
2173 | printk("scsi reset function returned %d\n", temp); | |||
2174 | #endif | |||
2175 | ||||
2176 | /* | |||
2177 | * Now figure out what we need to do, based upon | |||
2178 | * what the low level driver said that it did. | |||
2179 | * If the result is SCSI_RESET_SUCCESS, SCSI_RESET_PENDING, | |||
2180 | * or SCSI_RESET_WAKEUP, then the low level driver did a | |||
2181 | * bus device reset or bus reset, so we should go through | |||
2182 | * and mark one or all of the devices on that bus | |||
2183 | * as having been reset. | |||
2184 | */ | |||
2185 | switch(temp & SCSI_RESET_ACTION0xff) { | |||
2186 | case SCSI_RESET_SUCCESS2: | |||
2187 | if (temp & SCSI_RESET_HOST_RESET0x200) | |||
2188 | scsi_mark_host_reset(host); | |||
2189 | else if (temp & SCSI_RESET_BUS_RESET0x100) | |||
2190 | scsi_mark_bus_reset(host, SCpnt->channel); | |||
2191 | else scsi_mark_device_reset(SCpnt->device); | |||
2192 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2193 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2194 | SCpnt->internal_timeout &= ~(IN_RESET2|IN_RESET24|IN_RESET38); | |||
2195 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2196 | return 0; | |||
2197 | case SCSI_RESET_PENDING3: | |||
2198 | if (temp & SCSI_RESET_HOST_RESET0x200) | |||
2199 | scsi_mark_host_reset(host); | |||
2200 | else if (temp & SCSI_RESET_BUS_RESET0x100) | |||
2201 | scsi_mark_bus_reset(host, SCpnt->channel); | |||
2202 | else scsi_mark_device_reset(SCpnt->device); | |||
2203 | case SCSI_RESET_NOT_RUNNING5: | |||
2204 | return 0; | |||
2205 | case SCSI_RESET_PUNT1: | |||
2206 | SCpnt->internal_timeout &= ~(IN_RESET2|IN_RESET24|IN_RESET38); | |||
2207 | scsi_request_sense (SCpnt); | |||
2208 | return 0; | |||
2209 | case SCSI_RESET_WAKEUP4: | |||
2210 | if (temp & SCSI_RESET_HOST_RESET0x200) | |||
2211 | scsi_mark_host_reset(host); | |||
2212 | else if (temp & SCSI_RESET_BUS_RESET0x100) | |||
2213 | scsi_mark_bus_reset(host, SCpnt->channel); | |||
2214 | else scsi_mark_device_reset(SCpnt->device); | |||
2215 | SCpnt->internal_timeout &= ~(IN_RESET2|IN_RESET24|IN_RESET38); | |||
2216 | scsi_request_sense (SCpnt); | |||
2217 | /* | |||
2218 | * If a bus reset was performed, we | |||
2219 | * need to wake up each and every command | |||
2220 | * that was active on the bus or if it was a HBA | |||
2221 | * reset all active commands on all channels | |||
2222 | */ | |||
2223 | if( temp & SCSI_RESET_HOST_RESET0x200 ) | |||
2224 | { | |||
2225 | SCpnt1 = host->host_queue; | |||
2226 | while(SCpnt1) { | |||
2227 | if (SCpnt1->request.rq_status != RQ_INACTIVE(-1) | |||
2228 | && SCpnt1 != SCpnt) | |||
2229 | scsi_request_sense (SCpnt1); | |||
2230 | SCpnt1 = SCpnt1->next; | |||
2231 | } | |||
2232 | } else if( temp & SCSI_RESET_BUS_RESET0x100 ) { | |||
2233 | SCpnt1 = host->host_queue; | |||
2234 | while(SCpnt1) { | |||
2235 | if(SCpnt1->request.rq_status != RQ_INACTIVE(-1) | |||
2236 | && SCpnt1 != SCpnt | |||
2237 | && SCpnt1->channel == SCpnt->channel) | |||
2238 | scsi_request_sense (SCpnt); | |||
2239 | SCpnt1 = SCpnt1->next; | |||
2240 | } | |||
2241 | } | |||
2242 | return 0; | |||
2243 | case SCSI_RESET_SNOOZE0: | |||
2244 | /* In this case, we set the timeout field to 0 | |||
2245 | * so that this command does not time out any more, | |||
2246 | * and we return 1 so that we get a message on the | |||
2247 | * screen. | |||
2248 | */ | |||
2249 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2250 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2251 | SCpnt->internal_timeout &= ~(IN_RESET2|IN_RESET24|IN_RESET38); | |||
2252 | update_timeout(SCpnt, 0); | |||
2253 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2254 | /* If you snooze, you lose... */ | |||
2255 | case SCSI_RESET_ERROR6: | |||
2256 | default: | |||
2257 | return 1; | |||
2258 | } | |||
2259 | ||||
2260 | return temp; | |||
2261 | } | |||
2262 | } | |||
2263 | } | |||
2264 | ||||
2265 | ||||
2266 | static void scsi_main_timeout(void) | |||
2267 | { | |||
2268 | /* | |||
2269 | * We must not enter update_timeout with a timeout condition still pending. | |||
2270 | */ | |||
2271 | ||||
2272 | int timed_out; | |||
2273 | unsigned long flags; | |||
2274 | struct Scsi_Host * host; | |||
2275 | Scsi_Cmnd * SCpnt = NULL((void *) 0); | |||
2276 | ||||
2277 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2278 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2279 | ||||
2280 | update_timeout(NULL((void *) 0), 0); | |||
2281 | ||||
2282 | /* | |||
2283 | * Find all timers such that they have 0 or negative (shouldn't happen) | |||
2284 | * time remaining on them. | |||
2285 | */ | |||
2286 | timed_out = 0; | |||
2287 | for (host = scsi_hostlist; host; host = host->next) { | |||
2288 | for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
2289 | if (SCpnt->timeout == -1) | |||
2290 | { | |||
2291 | SCpnt->timeout = 0; | |||
2292 | SCpnt->serial_number_at_timeout = SCpnt->serial_number; | |||
2293 | ++timed_out; | |||
2294 | } | |||
2295 | } | |||
2296 | if (timed_out > 0) { | |||
2297 | for (host = scsi_hostlist; host; host = host->next) { | |||
2298 | for (SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
2299 | if (SCpnt->serial_number_at_timeout > 0 && | |||
2300 | SCpnt->serial_number_at_timeout == SCpnt->serial_number) | |||
2301 | { | |||
2302 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2303 | scsi_times_out(SCpnt); | |||
2304 | SCpnt->serial_number_at_timeout = 0; | |||
2305 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2306 | } | |||
2307 | } | |||
2308 | } | |||
2309 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2310 | } | |||
2311 | ||||
2312 | /* | |||
2313 | * The strategy is to cause the timer code to call scsi_times_out() | |||
2314 | * when the soonest timeout is pending. | |||
2315 | * The arguments are used when we are queueing a new command, because | |||
2316 | * we do not want to subtract the time used from this time, but when we | |||
2317 | * set the timer, we want to take this value into account. | |||
2318 | */ | |||
2319 | ||||
2320 | static int update_timeout(Scsi_Cmnd * SCset, int timeout) | |||
2321 | { | |||
2322 | unsigned int least, used; | |||
2323 | unsigned int oldto; | |||
2324 | unsigned long flags; | |||
2325 | struct Scsi_Host * host; | |||
2326 | Scsi_Cmnd * SCpnt = NULL((void *) 0); | |||
2327 | ||||
2328 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2329 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2330 | ||||
2331 | oldto = 0; | |||
2332 | ||||
2333 | /* | |||
2334 | * This routine can be a performance bottleneck under high loads, since | |||
2335 | * it is called twice per SCSI operation: once when internal_cmnd is | |||
2336 | * called, and again when scsi_done completes the command. To limit | |||
2337 | * the load this routine can cause, we shortcut processing if no clock | |||
2338 | * ticks have occurred since the last time it was called. | |||
2339 | */ | |||
2340 | ||||
2341 | if (jiffies == time_start && timer_table[SCSI_TIMER18].expires > 0) { | |||
2342 | if(SCset){ | |||
2343 | oldto = SCset->timeout; | |||
2344 | SCset->timeout = timeout; | |||
2345 | if (timeout > 0 && | |||
2346 | jiffies + timeout < timer_table[SCSI_TIMER18].expires) | |||
2347 | timer_table[SCSI_TIMER18].expires = jiffies + timeout; | |||
2348 | } | |||
2349 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2350 | return oldto; | |||
2351 | } | |||
2352 | ||||
2353 | /* | |||
2354 | * Figure out how much time has passed since the last time the timeouts | |||
2355 | * were updated | |||
2356 | */ | |||
2357 | used = (time_start) ? (jiffies - time_start) : 0; | |||
2358 | ||||
2359 | /* | |||
2360 | * Find out what is due to timeout soonest, and adjust all timeouts for | |||
2361 | * the amount of time that has passed since the last time we called | |||
2362 | * update_timeout. | |||
2363 | */ | |||
2364 | ||||
2365 | oldto = 0; | |||
2366 | ||||
2367 | if(SCset){ | |||
2368 | oldto = SCset->timeout - used; | |||
2369 | SCset->timeout = timeout; | |||
2370 | } | |||
2371 | ||||
2372 | least = 0xffffffff; | |||
2373 | ||||
2374 | for(host = scsi_hostlist; host; host = host->next) | |||
2375 | for(SCpnt = host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
2376 | if (SCpnt->timeout > 0) { | |||
2377 | if (SCpnt != SCset) | |||
2378 | SCpnt->timeout -= used; | |||
2379 | if(SCpnt->timeout <= 0) SCpnt->timeout = -1; | |||
2380 | if(SCpnt->timeout > 0 && SCpnt->timeout < least) | |||
2381 | least = SCpnt->timeout; | |||
2382 | } | |||
2383 | ||||
2384 | /* | |||
2385 | * If something is due to timeout again, then we will set the next timeout | |||
2386 | * interrupt to occur. Otherwise, timeouts are disabled. | |||
2387 | */ | |||
2388 | ||||
2389 | if (least != 0xffffffff) | |||
2390 | { | |||
2391 | time_start = jiffies; | |||
2392 | timer_table[SCSI_TIMER18].expires = (time_elapsed = least) + jiffies; | |||
2393 | timer_active |= 1 << SCSI_TIMER18; | |||
2394 | } | |||
2395 | else | |||
2396 | { | |||
2397 | timer_table[SCSI_TIMER18].expires = time_start = time_elapsed = 0; | |||
2398 | timer_active &= ~(1 << SCSI_TIMER18); | |||
2399 | } | |||
2400 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2401 | return oldto; | |||
2402 | } | |||
2403 | ||||
2404 | #ifdef CONFIG_MODULES | |||
2405 | static int scsi_register_host(Scsi_Host_Template *); | |||
2406 | static void scsi_unregister_host(Scsi_Host_Template *); | |||
2407 | #endif | |||
2408 | ||||
2409 | void *scsi_malloc(unsigned int len) | |||
2410 | { | |||
2411 | unsigned int nbits, mask; | |||
2412 | unsigned long flags; | |||
2413 | int i, j; | |||
2414 | if(len % SECTOR_SIZE512 != 0 || len > PAGE_SIZE(1 << 12)) | |||
2415 | return NULL((void *) 0); | |||
2416 | ||||
2417 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2418 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2419 | nbits = len >> 9; | |||
2420 | mask = (1 << nbits) - 1; | |||
2421 | ||||
2422 | for(i=0;i < dma_sectors / SECTORS_PER_PAGE((1 << 12)/512); i++) | |||
2423 | for(j=0; j<=SECTORS_PER_PAGE((1 << 12)/512) - nbits; j++){ | |||
2424 | if ((dma_malloc_freelist[i] & (mask << j)) == 0){ | |||
2425 | dma_malloc_freelist[i] |= (mask << j); | |||
2426 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2427 | dma_free_sectors -= nbits; | |||
2428 | #ifdef DEBUG | |||
2429 | printk("SMalloc: %d %p\n",len, dma_malloc_pages[i] + (j << 9)); | |||
2430 | #endif | |||
2431 | return (void *) ((unsigned long) dma_malloc_pages[i] + (j << 9)); | |||
2432 | } | |||
2433 | } | |||
2434 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2435 | return NULL((void *) 0); /* Nope. No more */ | |||
2436 | } | |||
2437 | ||||
2438 | int scsi_free(void *obj, unsigned int len) | |||
2439 | { | |||
2440 | unsigned int page, sector, nbits, mask; | |||
2441 | unsigned long flags; | |||
2442 | ||||
2443 | #ifdef DEBUG | |||
2444 | unsigned long ret = 0; | |||
2445 | ||||
2446 | #ifdef __mips__ | |||
2447 | __asm__ __volatile__ ("move\t%0,$31":"=r"(ret)); | |||
2448 | #else | |||
2449 | ret = __builtin_return_address(0); | |||
2450 | #endif | |||
2451 | printk("scsi_free %p %d\n",obj, len); | |||
2452 | #endif | |||
2453 | ||||
2454 | for (page = 0; page < dma_sectors / SECTORS_PER_PAGE((1 << 12)/512); page++) { | |||
2455 | unsigned long page_addr = (unsigned long) dma_malloc_pages[page]; | |||
2456 | if ((unsigned long) obj >= page_addr && | |||
2457 | (unsigned long) obj < page_addr + PAGE_SIZE(1 << 12)) | |||
2458 | { | |||
2459 | sector = (((unsigned long) obj) - page_addr) >> 9; | |||
2460 | ||||
2461 | nbits = len >> 9; | |||
2462 | mask = (1 << nbits) - 1; | |||
2463 | ||||
2464 | if ((mask << sector) >= (1 << SECTORS_PER_PAGE((1 << 12)/512))) | |||
2465 | panic ("scsi_free:Bad memory alignment"); | |||
2466 | ||||
2467 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
2468 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
2469 | if((dma_malloc_freelist[page] & | |||
2470 | (mask << sector)) != (mask<<sector)){ | |||
2471 | #ifdef DEBUG | |||
2472 | printk("scsi_free(obj=%p, len=%d) called from %08lx\n", | |||
2473 | obj, len, ret); | |||
2474 | #endif | |||
2475 | panic("scsi_free:Trying to free unused memory"); | |||
2476 | } | |||
2477 | dma_free_sectors += nbits; | |||
2478 | dma_malloc_freelist[page] &= ~(mask << sector); | |||
2479 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
2480 | return 0; | |||
2481 | } | |||
2482 | } | |||
2483 | panic("scsi_free:Bad offset"); | |||
2484 | } | |||
2485 | ||||
2486 | ||||
2487 | int scsi_loadable_module_flag; /* Set after we scan builtin drivers */ | |||
2488 | ||||
2489 | void * scsi_init_malloc(unsigned int size, int priority) | |||
2490 | { | |||
2491 | void * retval; | |||
2492 | ||||
2493 | /* | |||
2494 | * For buffers used by the DMA pool, we assume page aligned | |||
2495 | * structures. | |||
2496 | */ | |||
2497 | if ((size % PAGE_SIZE(1 << 12)) == 0) { | |||
2498 | int order, a_size; | |||
2499 | for (order = 0, a_size = PAGE_SIZE(1 << 12); | |||
2500 | a_size < size; order++, a_size <<= 1) | |||
2501 | ; | |||
2502 | retval = (void *) __get_dma_pages(priority & GFP_LEVEL_MASK,__get_free_pages((priority & 0xf),(order),1) | |||
2503 | order)__get_free_pages((priority & 0xf),(order),1); | |||
2504 | } else | |||
2505 | retval = kmalloclinux_kmalloc(size, priority); | |||
2506 | ||||
2507 | if (retval) | |||
2508 | memset(retval, 0, size)(__builtin_constant_p(0) ? (__builtin_constant_p((size)) ? __constant_c_and_count_memset (((retval)),((0x01010101UL*(unsigned char)(0))),((size))) : __constant_c_memset (((retval)),((0x01010101UL*(unsigned char)(0))),((size)))) : ( __builtin_constant_p((size)) ? __memset_generic((((retval))), (((0))),(((size)))) : __memset_generic(((retval)),((0)),((size ))))); | |||
2509 | return retval; | |||
2510 | } | |||
2511 | ||||
2512 | ||||
2513 | void scsi_init_free(char * ptr, unsigned int size) | |||
2514 | { | |||
2515 | /* | |||
2516 | * We need this special code here because the DMA pool assumes | |||
2517 | * page aligned data. Besides, it is wasteful to allocate | |||
2518 | * page sized chunks with kmalloc. | |||
2519 | */ | |||
2520 | if ((size % PAGE_SIZE(1 << 12)) == 0) { | |||
2521 | int order, a_size; | |||
2522 | ||||
2523 | for (order = 0, a_size = PAGE_SIZE(1 << 12); | |||
2524 | a_size < size; order++, a_size <<= 1) | |||
2525 | ; | |||
2526 | free_pages((unsigned long)ptr, order); | |||
2527 | } else | |||
2528 | kfreelinux_kfree(ptr); | |||
2529 | } | |||
2530 | ||||
2531 | void scsi_build_commandblocks(Scsi_Device * SDpnt) | |||
2532 | { | |||
2533 | struct Scsi_Host *host = SDpnt->host; | |||
2534 | int j; | |||
2535 | Scsi_Cmnd * SCpnt; | |||
2536 | ||||
2537 | if (SDpnt->queue_depth == 0) | |||
2538 | SDpnt->queue_depth = host->cmd_per_lun; | |||
2539 | SDpnt->device_queue = NULL((void *) 0); | |||
2540 | ||||
2541 | for(j=0;j<SDpnt->queue_depth;j++){ | |||
2542 | SCpnt = (Scsi_Cmnd *) | |||
2543 | scsi_init_malloc(sizeof(Scsi_Cmnd), | |||
2544 | GFP_ATOMIC0x01 | | |||
2545 | (host->unchecked_isa_dma ? GFP_DMA0x80 : 0)); | |||
2546 | SCpnt->host = host; | |||
2547 | SCpnt->device = SDpnt; | |||
2548 | SCpnt->target = SDpnt->id; | |||
2549 | SCpnt->lun = SDpnt->lun; | |||
2550 | SCpnt->channel = SDpnt->channel; | |||
2551 | SCpnt->request.rq_status = RQ_INACTIVE(-1); | |||
2552 | SCpnt->use_sg = 0; | |||
2553 | SCpnt->old_use_sg = 0; | |||
2554 | SCpnt->old_cmd_len = 0; | |||
2555 | SCpnt->timeout = 0; | |||
2556 | SCpnt->underflow = 0; | |||
2557 | SCpnt->transfersize = 0; | |||
2558 | SCpnt->serial_number = 0; | |||
2559 | SCpnt->serial_number_at_timeout = 0; | |||
2560 | SCpnt->host_scribble = NULL((void *) 0); | |||
2561 | if(host->host_queue) | |||
2562 | host->host_queue->prev = SCpnt; | |||
2563 | SCpnt->next = host->host_queue; | |||
2564 | SCpnt->prev = NULL((void *) 0); | |||
2565 | host->host_queue = SCpnt; | |||
2566 | SCpnt->device_next = SDpnt->device_queue; | |||
2567 | SDpnt->device_queue = SCpnt; | |||
2568 | } | |||
2569 | SDpnt->has_cmdblocks = 1; | |||
2570 | } | |||
2571 | ||||
2572 | /* | |||
2573 | * scsi_dev_init() is our initialization routine, which in turn calls host | |||
2574 | * initialization, bus scanning, and sd/st initialization routines. | |||
2575 | */ | |||
2576 | ||||
2577 | int scsi_dev_init(void) | |||
2578 | { | |||
2579 | Scsi_Device * SDpnt; | |||
2580 | struct Scsi_Host * shpnt; | |||
2581 | struct Scsi_Device_Template * sdtpnt; | |||
2582 | #ifdef FOO_ON_YOU | |||
2583 | return; | |||
2584 | #endif | |||
2585 | ||||
2586 | /* Yes we're here... */ | |||
2587 | #if CONFIG_PROC_FS1 | |||
2588 | dispatch_scsi_info_ptr = dispatch_scsi_info; | |||
2589 | #endif | |||
2590 | ||||
2591 | /* Init a few things so we can "malloc" memory. */ | |||
2592 | scsi_loadable_module_flag = 0; | |||
2593 | ||||
2594 | timer_table[SCSI_TIMER18].fn = scsi_main_timeout; | |||
2595 | timer_table[SCSI_TIMER18].expires = 0; | |||
2596 | ||||
2597 | #ifdef CONFIG_MODULES | |||
2598 | register_symtab(&scsi_symbol_table); | |||
2599 | #endif | |||
2600 | ||||
2601 | /* Register the /proc/scsi/scsi entry */ | |||
2602 | #if CONFIG_PROC_FS1 | |||
2603 | proc_scsi_register(0, &proc_scsi_scsi); | |||
2604 | #endif | |||
2605 | ||||
2606 | /* initialize all hosts */ | |||
2607 | scsi_init(); | |||
2608 | ||||
2609 | scsi_devices = (Scsi_Device *) NULL((void *) 0); | |||
2610 | ||||
2611 | for (shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) { | |||
2612 | scan_scsis(shpnt,0,0,0,0); /* scan for scsi devices */ | |||
2613 | if (shpnt->select_queue_depths != NULL((void *) 0)) | |||
2614 | (shpnt->select_queue_depths)(shpnt, scsi_devices); | |||
2615 | } | |||
2616 | ||||
2617 | printk("scsi : detected "); | |||
2618 | for (sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
2619 | if (sdtpnt->dev_noticed && sdtpnt->name) | |||
2620 | printk("%d SCSI %s%s ", sdtpnt->dev_noticed, sdtpnt->name, | |||
2621 | (sdtpnt->dev_noticed != 1) ? "s" : ""); | |||
2622 | printk("total.\n"); | |||
2623 | ||||
2624 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
2625 | if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)(); | |||
2626 | ||||
2627 | for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) { | |||
2628 | SDpnt->scsi_request_fn = NULL((void *) 0); | |||
2629 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
2630 | if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt); | |||
2631 | if(SDpnt->attached) scsi_build_commandblocks(SDpnt); | |||
2632 | } | |||
2633 | ||||
2634 | ||||
2635 | /* | |||
2636 | * This should build the DMA pool. | |||
2637 | */ | |||
2638 | resize_dma_pool(); | |||
2639 | ||||
2640 | /* | |||
2641 | * OK, now we finish the initialization by doing spin-up, read | |||
2642 | * capacity, etc, etc | |||
2643 | */ | |||
2644 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
2645 | if(sdtpnt->finish && sdtpnt->nr_dev) | |||
2646 | (*sdtpnt->finish)(); | |||
2647 | ||||
2648 | scsi_loadable_module_flag = 1; | |||
2649 | ||||
2650 | return 0; | |||
2651 | } | |||
2652 | ||||
2653 | static void print_inquiry(unsigned char *data) | |||
2654 | { | |||
2655 | int i; | |||
2656 | ||||
2657 | printk(" Vendor: "); | |||
2658 | for (i = 8; i < 16; i++) | |||
2659 | { | |||
2660 | if (data[i] >= 0x20 && i < data[4] + 5) | |||
2661 | printk("%c", data[i]); | |||
2662 | else | |||
2663 | printk(" "); | |||
2664 | } | |||
2665 | ||||
2666 | printk(" Model: "); | |||
2667 | for (i = 16; i < 32; i++) | |||
2668 | { | |||
2669 | if (data[i] >= 0x20 && i < data[4] + 5) | |||
2670 | printk("%c", data[i]); | |||
2671 | else | |||
2672 | printk(" "); | |||
2673 | } | |||
2674 | ||||
2675 | printk(" Rev: "); | |||
2676 | for (i = 32; i < 36; i++) | |||
2677 | { | |||
2678 | if (data[i] >= 0x20 && i < data[4] + 5) | |||
2679 | printk("%c", data[i]); | |||
2680 | else | |||
2681 | printk(" "); | |||
2682 | } | |||
2683 | ||||
2684 | printk("\n"); | |||
2685 | ||||
2686 | i = data[0] & 0x1f; | |||
2687 | ||||
2688 | printk(" Type: %s ", | |||
2689 | i < MAX_SCSI_DEVICE_CODE10 ? scsi_device_types[i] : "Unknown " ); | |||
2690 | printk(" ANSI SCSI revision: %02x", data[2] & 0x07); | |||
2691 | if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1) | |||
2692 | printk(" CCS\n"); | |||
2693 | else | |||
2694 | printk("\n"); | |||
2695 | } | |||
2696 | ||||
2697 | ||||
2698 | #ifdef CONFIG_PROC_FS1 | |||
2699 | int scsi_proc_info(char *buffer, char **start, off_t offset, int length, | |||
2700 | int hostno, int inout) | |||
2701 | { | |||
2702 | Scsi_Cmnd *SCpnt; | |||
2703 | struct Scsi_Device_Template *SDTpnt; | |||
2704 | Scsi_Device *scd, *scd_h = NULL((void *) 0); | |||
2705 | struct Scsi_Host *HBA_ptr; | |||
2706 | char *p; | |||
2707 | int host, channel, id, lun; | |||
2708 | int size, len = 0; | |||
2709 | off_t begin = 0; | |||
2710 | off_t pos = 0; | |||
2711 | ||||
2712 | scd = scsi_devices; | |||
2713 | HBA_ptr = scsi_hostlist; | |||
2714 | ||||
2715 | if(inout == 0) { | |||
2716 | size = sprintflinux_sprintf(buffer+len,"Attached devices: %s\n", (scd)?"":"none"); | |||
2717 | len += size; | |||
2718 | pos = begin + len; | |||
2719 | while (HBA_ptr) { | |||
2720 | #if 0 | |||
2721 | size += sprintflinux_sprintf(buffer+len,"scsi%2d: %s\n", (int) HBA_ptr->host_no, | |||
2722 | HBA_ptr->hostt->procname); | |||
2723 | len += size; | |||
2724 | pos = begin + len; | |||
2725 | #endif | |||
2726 | scd = scsi_devices; | |||
2727 | while (scd) { | |||
2728 | if (scd->host == HBA_ptr) { | |||
2729 | proc_print_scsidevice(scd, buffer, &size, len); | |||
2730 | len += size; | |||
2731 | pos = begin + len; | |||
2732 | ||||
2733 | if (pos < offset) { | |||
2734 | len = 0; | |||
2735 | begin = pos; | |||
2736 | } | |||
2737 | if (pos > offset + length) | |||
2738 | goto stop_output; | |||
2739 | } | |||
2740 | scd = scd->next; | |||
2741 | } | |||
2742 | HBA_ptr = HBA_ptr->next; | |||
2743 | } | |||
2744 | ||||
2745 | stop_output: | |||
2746 | *start=buffer+(offset-begin); /* Start of wanted data */ | |||
2747 | len-=(offset-begin); /* Start slop */ | |||
2748 | if(len>length) | |||
2749 | len = length; /* Ending slop */ | |||
2750 | return (len); | |||
2751 | } | |||
2752 | ||||
2753 | if(!buffer || length < 25 || strncmp("scsi", buffer, 4)) | |||
2754 | return(-EINVAL22); | |||
2755 | ||||
2756 | /* | |||
2757 | * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi | |||
2758 | * with "0 1 2 3" replaced by your "Host Channel Id Lun". | |||
2759 | * Consider this feature BETA. | |||
2760 | * CAUTION: This is not for hotplugging your peripherals. As | |||
2761 | * SCSI was not designed for this you could damage your | |||
2762 | * hardware ! | |||
2763 | * However perhaps it is legal to switch on an | |||
2764 | * already connected device. It is perhaps not | |||
2765 | * guaranteed this device doesn't corrupt an ongoing data transfer. | |||
2766 | */ | |||
2767 | if(!strncmp("add-single-device", buffer + 5, 17)) { | |||
2768 | p = buffer + 23; | |||
2769 | ||||
2770 | host = simple_strtoul(p, &p, 0); | |||
2771 | channel = simple_strtoul(p+1, &p, 0); | |||
2772 | id = simple_strtoul(p+1, &p, 0); | |||
2773 | lun = simple_strtoul(p+1, &p, 0); | |||
2774 | ||||
2775 | printk("scsi singledevice %d %d %d %d\n", host, channel, | |||
2776 | id, lun); | |||
2777 | ||||
2778 | while(scd && (scd->host->host_no != host | |||
2779 | || scd->channel != channel | |||
2780 | || scd->id != id | |||
2781 | || scd->lun != lun)) { | |||
2782 | scd = scd->next; | |||
2783 | } | |||
2784 | if(scd) | |||
2785 | return(-ENOSYS38); /* We do not yet support unplugging */ | |||
2786 | while(HBA_ptr && HBA_ptr->host_no != host) | |||
2787 | HBA_ptr = HBA_ptr->next; | |||
2788 | ||||
2789 | if(!HBA_ptr) | |||
2790 | return(-ENXIO6); | |||
2791 | ||||
2792 | scan_scsis (HBA_ptr, 1, channel, id, lun); | |||
2793 | return(length); | |||
2794 | ||||
2795 | } | |||
2796 | ||||
2797 | /* | |||
2798 | * Usage: echo "scsi remove-single-device 0 1 2 3" >/proc/scsi/scsi | |||
2799 | * with "0 1 2 3" replaced by your "Host Channel Id Lun". | |||
2800 | * | |||
2801 | * Consider this feature pre-BETA. | |||
2802 | * | |||
2803 | * CAUTION: This is not for hotplugging your peripherals. As | |||
2804 | * SCSI was not designed for this you could damage your | |||
2805 | * hardware and thoroughly confuse the SCSI subsystem. | |||
2806 | * | |||
2807 | */ | |||
2808 | else if(!strncmp("remove-single-device", buffer + 5, 20)) { | |||
2809 | p = buffer + 26; | |||
2810 | ||||
2811 | host = simple_strtoul(p, &p, 0); | |||
2812 | channel = simple_strtoul(p+1, &p, 0); | |||
2813 | id = simple_strtoul(p+1, &p, 0); | |||
2814 | lun = simple_strtoul(p+1, &p, 0); | |||
2815 | ||||
2816 | while(scd != NULL((void *) 0)) { | |||
2817 | if(scd->host->host_no == host | |||
2818 | && scd->channel == channel | |||
2819 | && scd->id == id | |||
2820 | && scd->lun == lun){ | |||
2821 | break; | |||
2822 | } | |||
2823 | scd_h = scd; | |||
2824 | scd = scd->next; | |||
2825 | } | |||
2826 | ||||
2827 | if(scd == NULL((void *) 0)) | |||
2828 | return(-ENODEV19); /* there is no such device attached */ | |||
2829 | ||||
2830 | if(scd->access_count) | |||
2831 | return(-EBUSY16); | |||
2832 | ||||
2833 | SDTpnt = scsi_devicelist; | |||
2834 | while(SDTpnt != NULL((void *) 0)) { | |||
2835 | if(SDTpnt->detach) (*SDTpnt->detach)(scd); | |||
2836 | SDTpnt = SDTpnt->next; | |||
2837 | } | |||
2838 | ||||
2839 | if(scd->attached == 0) { | |||
2840 | /* | |||
2841 | * Nobody is using this device any more. | |||
2842 | * Free all of the command structures. | |||
2843 | */ | |||
2844 | for(SCpnt=scd->host->host_queue; SCpnt; SCpnt = SCpnt->next){ | |||
2845 | if(SCpnt->device == scd) { | |||
2846 | if(SCpnt->prev != NULL((void *) 0)) | |||
2847 | SCpnt->prev->next = SCpnt->next; | |||
2848 | if(SCpnt->next != NULL((void *) 0)) | |||
2849 | SCpnt->next->prev = SCpnt->prev; | |||
2850 | if(SCpnt == scd->host->host_queue) | |||
2851 | scd->host->host_queue = SCpnt->next; | |||
2852 | scsi_init_free((char *) SCpnt, sizeof(*SCpnt)); | |||
2853 | } | |||
2854 | } | |||
2855 | /* Now we can remove the device structure */ | |||
2856 | if(scd_h != NULL((void *) 0)) { | |||
2857 | scd_h->next = scd->next; | |||
2858 | } else if (scsi_devices == scd) { | |||
2859 | /* We had a hit on the first entry of the device list */ | |||
2860 | scsi_devices = scd->next; | |||
2861 | } | |||
2862 | scsi_init_free((char *) scd, sizeof(Scsi_Device)); | |||
2863 | } else { | |||
2864 | return(-EBUSY16); | |||
2865 | } | |||
2866 | return(0); | |||
2867 | } | |||
2868 | return(-EINVAL22); | |||
2869 | } | |||
2870 | #endif | |||
2871 | ||||
2872 | /* | |||
2873 | * Go through the device list and recompute the most appropriate size | |||
2874 | * for the dma pool. Then grab more memory (as required). | |||
2875 | */ | |||
2876 | static void resize_dma_pool(void) | |||
2877 | { | |||
2878 | int i; | |||
2879 | unsigned long size; | |||
2880 | struct Scsi_Host * shpnt; | |||
2881 | struct Scsi_Host * host = NULL((void *) 0); | |||
2882 | Scsi_Device * SDpnt; | |||
2883 | unsigned long flags; | |||
2884 | FreeSectorBitmap * new_dma_malloc_freelist = NULL((void *) 0); | |||
2885 | unsigned int new_dma_sectors = 0; | |||
2886 | unsigned int new_need_isa_buffer = 0; | |||
2887 | unsigned char ** new_dma_malloc_pages = NULL((void *) 0); | |||
2888 | ||||
2889 | if( !scsi_devices ) | |||
2890 | { | |||
2891 | /* | |||
2892 | * Free up the DMA pool. | |||
2893 | */ | |||
2894 | if( dma_free_sectors != dma_sectors ) | |||
2895 | panic("SCSI DMA pool memory leak %d %d\n",dma_free_sectors,dma_sectors); | |||
2896 | ||||
2897 | for(i=0; i < dma_sectors / SECTORS_PER_PAGE((1 << 12)/512); i++) | |||
2898 | scsi_init_free(dma_malloc_pages[i], PAGE_SIZE(1 << 12)); | |||
2899 | if (dma_malloc_pages) | |||
2900 | scsi_init_free((char *) dma_malloc_pages, | |||
2901 | (dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(*dma_malloc_pages)); | |||
2902 | dma_malloc_pages = NULL((void *) 0); | |||
2903 | if (dma_malloc_freelist) | |||
2904 | scsi_init_free((char *) dma_malloc_freelist, | |||
2905 | (dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(*dma_malloc_freelist)); | |||
2906 | dma_malloc_freelist = NULL((void *) 0); | |||
2907 | dma_sectors = 0; | |||
2908 | dma_free_sectors = 0; | |||
2909 | return; | |||
2910 | } | |||
2911 | /* Next, check to see if we need to extend the DMA buffer pool */ | |||
2912 | ||||
2913 | new_dma_sectors = 2*SECTORS_PER_PAGE((1 << 12)/512); /* Base value we use */ | |||
2914 | ||||
2915 | if (high_memory-1 > ISA_DMA_THRESHOLD(0x00ffffff)) | |||
2916 | scsi_need_isa_bounce_buffers = 1; | |||
2917 | else | |||
2918 | scsi_need_isa_bounce_buffers = 0; | |||
2919 | ||||
2920 | if (scsi_devicelist) | |||
2921 | for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) | |||
2922 | new_dma_sectors += SECTORS_PER_PAGE((1 << 12)/512); /* Increment for each host */ | |||
2923 | ||||
2924 | for (SDpnt=scsi_devices; SDpnt; SDpnt = SDpnt->next) { | |||
2925 | host = SDpnt->host; | |||
2926 | ||||
2927 | /* | |||
2928 | * sd and sr drivers allocate scatterlists. | |||
2929 | * sr drivers may allocate for each command 1x2048 or 2x1024 extra | |||
2930 | * buffers for 2k sector size and 1k fs. | |||
2931 | * sg driver allocates buffers < 4k. | |||
2932 | * st driver does not need buffers from the dma pool. | |||
2933 | * estimate 4k buffer/command for devices of unknown type (should panic). | |||
2934 | */ | |||
2935 | if (SDpnt->type == TYPE_WORM0x04 || SDpnt->type == TYPE_ROM0x05 || | |||
2936 | SDpnt->type == TYPE_DISK0x00 || SDpnt->type == TYPE_MOD0x07) { | |||
2937 | new_dma_sectors += ((host->sg_tablesize * | |||
2938 | sizeof(struct scatterlist) + 511) >> 9) * | |||
2939 | SDpnt->queue_depth; | |||
2940 | if (SDpnt->type == TYPE_WORM0x04 || SDpnt->type == TYPE_ROM0x05) | |||
2941 | new_dma_sectors += (2048 >> 9) * SDpnt->queue_depth; | |||
2942 | } | |||
2943 | else if (SDpnt->type == TYPE_SCANNER0x06 || | |||
2944 | SDpnt->type == TYPE_PROCESSOR0x03 || | |||
2945 | SDpnt->type == TYPE_MEDIUM_CHANGER0x08) { | |||
2946 | new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth; | |||
2947 | } | |||
2948 | else { | |||
2949 | if (SDpnt->type != TYPE_TAPE0x01) { | |||
2950 | printk("resize_dma_pool: unknown device type %d\n", SDpnt->type); | |||
2951 | new_dma_sectors += (4096 >> 9) * SDpnt->queue_depth; | |||
2952 | } | |||
2953 | } | |||
2954 | ||||
2955 | if(host->unchecked_isa_dma && | |||
2956 | scsi_need_isa_bounce_buffers && | |||
2957 | SDpnt->type != TYPE_TAPE0x01) { | |||
2958 | new_dma_sectors += (PAGE_SIZE(1 << 12) >> 9) * host->sg_tablesize * | |||
2959 | SDpnt->queue_depth; | |||
2960 | new_need_isa_buffer++; | |||
2961 | } | |||
2962 | } | |||
2963 | ||||
2964 | #ifdef DEBUG_INIT | |||
2965 | printk("resize_dma_pool: needed dma sectors = %d\n", new_dma_sectors); | |||
2966 | #endif | |||
2967 | ||||
2968 | /* limit DMA memory to 32MB: */ | |||
2969 | new_dma_sectors = (new_dma_sectors + 15) & 0xfff0; | |||
2970 | ||||
2971 | /* | |||
2972 | * We never shrink the buffers - this leads to | |||
2973 | * race conditions that I would rather not even think | |||
2974 | * about right now. | |||
2975 | */ | |||
2976 | if( new_dma_sectors < dma_sectors ) | |||
2977 | new_dma_sectors = dma_sectors; | |||
2978 | ||||
2979 | if (new_dma_sectors) | |||
2980 | { | |||
2981 | size = (new_dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(FreeSectorBitmap); | |||
2982 | new_dma_malloc_freelist = (FreeSectorBitmap *) scsi_init_malloc(size, GFP_ATOMIC0x01); | |||
2983 | memset(new_dma_malloc_freelist, 0, size)(__builtin_constant_p(0) ? (__builtin_constant_p((size)) ? __constant_c_and_count_memset (((new_dma_malloc_freelist)),((0x01010101UL*(unsigned char)(0 ))),((size))) : __constant_c_memset(((new_dma_malloc_freelist )),((0x01010101UL*(unsigned char)(0))),((size)))) : (__builtin_constant_p ((size)) ? __memset_generic((((new_dma_malloc_freelist))),((( 0))),(((size)))) : __memset_generic(((new_dma_malloc_freelist )),((0)),((size))))); | |||
2984 | ||||
2985 | size = (new_dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(*new_dma_malloc_pages); | |||
2986 | new_dma_malloc_pages = (unsigned char **) scsi_init_malloc(size, GFP_ATOMIC0x01); | |||
2987 | memset(new_dma_malloc_pages, 0, size)(__builtin_constant_p(0) ? (__builtin_constant_p((size)) ? __constant_c_and_count_memset (((new_dma_malloc_pages)),((0x01010101UL*(unsigned char)(0))) ,((size))) : __constant_c_memset(((new_dma_malloc_pages)),((0x01010101UL *(unsigned char)(0))),((size)))) : (__builtin_constant_p((size )) ? __memset_generic((((new_dma_malloc_pages))),(((0))),(((size )))) : __memset_generic(((new_dma_malloc_pages)),((0)),((size ))))); | |||
2988 | } | |||
2989 | ||||
2990 | /* | |||
2991 | * If we need more buffers, expand the list. | |||
2992 | */ | |||
2993 | if( new_dma_sectors > dma_sectors ) { | |||
2994 | for(i=dma_sectors / SECTORS_PER_PAGE((1 << 12)/512); i< new_dma_sectors / SECTORS_PER_PAGE((1 << 12)/512); i++) | |||
2995 | new_dma_malloc_pages[i] = (unsigned char *) | |||
2996 | scsi_init_malloc(PAGE_SIZE(1 << 12), GFP_ATOMIC0x01 | GFP_DMA0x80); | |||
2997 | } | |||
2998 | ||||
2999 | /* When we dick with the actual DMA list, we need to | |||
3000 | * protect things | |||
3001 | */ | |||
3002 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
3003 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
3004 | if (dma_malloc_freelist) | |||
3005 | { | |||
3006 | size = (dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(FreeSectorBitmap); | |||
3007 | memcpy(new_dma_malloc_freelist, dma_malloc_freelist, size)(__builtin_constant_p(size) ? __constant_memcpy((new_dma_malloc_freelist ),(dma_malloc_freelist),(size)) : __memcpy((new_dma_malloc_freelist ),(dma_malloc_freelist),(size))); | |||
3008 | scsi_init_free((char *) dma_malloc_freelist, size); | |||
3009 | } | |||
3010 | dma_malloc_freelist = new_dma_malloc_freelist; | |||
3011 | ||||
3012 | if (dma_malloc_pages) | |||
3013 | { | |||
3014 | size = (dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(*dma_malloc_pages); | |||
3015 | memcpy(new_dma_malloc_pages, dma_malloc_pages, size)(__builtin_constant_p(size) ? __constant_memcpy((new_dma_malloc_pages ),(dma_malloc_pages),(size)) : __memcpy((new_dma_malloc_pages ),(dma_malloc_pages),(size))); | |||
3016 | scsi_init_free((char *) dma_malloc_pages, size); | |||
3017 | } | |||
3018 | ||||
3019 | dma_free_sectors += new_dma_sectors - dma_sectors; | |||
3020 | dma_malloc_pages = new_dma_malloc_pages; | |||
3021 | dma_sectors = new_dma_sectors; | |||
3022 | need_isa_buffer = new_need_isa_buffer; | |||
3023 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
3024 | ||||
3025 | #ifdef DEBUG_INIT | |||
3026 | printk("resize_dma_pool: dma free sectors = %d\n", dma_free_sectors); | |||
3027 | printk("resize_dma_pool: dma sectors = %d\n", dma_sectors); | |||
3028 | printk("resize_dma_pool: need isa buffers = %d\n", need_isa_buffer); | |||
3029 | #endif | |||
3030 | } | |||
3031 | ||||
3032 | #ifdef CONFIG_MODULES /* a big #ifdef block... */ | |||
3033 | ||||
3034 | /* | |||
3035 | * This entry point should be called by a loadable module if it is trying | |||
3036 | * add a low level scsi driver to the system. | |||
3037 | */ | |||
3038 | static int scsi_register_host(Scsi_Host_Template * tpnt) | |||
3039 | { | |||
3040 | int pcount; | |||
3041 | struct Scsi_Host * shpnt; | |||
3042 | Scsi_Device * SDpnt; | |||
3043 | struct Scsi_Device_Template * sdtpnt; | |||
3044 | const char * name; | |||
3045 | ||||
3046 | if (tpnt->next || !tpnt->detect) return 1;/* Must be already loaded, or | |||
3047 | * no detect routine available | |||
3048 | */ | |||
3049 | pcount = next_scsi_host; | |||
3050 | if ((tpnt->present = tpnt->detect(tpnt))) | |||
3051 | { | |||
3052 | if(pcount == next_scsi_host) { | |||
3053 | if(tpnt->present > 1) { | |||
3054 | printk("Failure to register low-level scsi driver"); | |||
3055 | scsi_unregister_host(tpnt); | |||
3056 | return 1; | |||
3057 | } | |||
3058 | /* The low-level driver failed to register a driver. We | |||
3059 | * can do this now. | |||
3060 | */ | |||
3061 | scsi_register(tpnt,0); | |||
3062 | } | |||
3063 | tpnt->next = scsi_hosts; /* Add to the linked list */ | |||
3064 | scsi_hosts = tpnt; | |||
3065 | ||||
3066 | /* Add the new driver to /proc/scsi */ | |||
3067 | #if CONFIG_PROC_FS1 | |||
3068 | build_proc_dir_entries(tpnt); | |||
3069 | #endif | |||
3070 | ||||
3071 | for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) | |||
3072 | if(shpnt->hostt == tpnt) | |||
3073 | { | |||
3074 | if(tpnt->info) | |||
3075 | name = tpnt->info(shpnt); | |||
3076 | else | |||
3077 | name = tpnt->name; | |||
3078 | printk ("scsi%d : %s\n", /* And print a little message */ | |||
3079 | shpnt->host_no, name); | |||
3080 | } | |||
3081 | ||||
3082 | printk ("scsi : %d host%s.\n", next_scsi_host, | |||
3083 | (next_scsi_host == 1) ? "" : "s"); | |||
3084 | ||||
3085 | scsi_make_blocked_list(); | |||
3086 | ||||
3087 | /* The next step is to call scan_scsis here. This generates the | |||
3088 | * Scsi_Devices entries | |||
3089 | */ | |||
3090 | ||||
3091 | for(shpnt=scsi_hostlist; shpnt; shpnt = shpnt->next) | |||
3092 | if(shpnt->hostt == tpnt) { | |||
3093 | scan_scsis(shpnt,0,0,0,0); | |||
3094 | if (shpnt->select_queue_depths != NULL((void *) 0)) | |||
3095 | (shpnt->select_queue_depths)(shpnt, scsi_devices); | |||
3096 | } | |||
3097 | ||||
3098 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
3099 | if(sdtpnt->init && sdtpnt->dev_noticed) (*sdtpnt->init)(); | |||
3100 | ||||
3101 | /* Next we create the Scsi_Cmnd structures for this host */ | |||
3102 | ||||
3103 | for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next) | |||
3104 | if(SDpnt->host->hostt == tpnt) | |||
3105 | { | |||
3106 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
3107 | if(sdtpnt->attach) (*sdtpnt->attach)(SDpnt); | |||
3108 | if(SDpnt->attached) scsi_build_commandblocks(SDpnt); | |||
3109 | } | |||
3110 | ||||
3111 | /* | |||
3112 | * Now that we have all of the devices, resize the DMA pool, | |||
3113 | * as required. */ | |||
3114 | resize_dma_pool(); | |||
3115 | ||||
3116 | ||||
3117 | /* This does any final handling that is required. */ | |||
3118 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
3119 | if(sdtpnt->finish && sdtpnt->nr_dev) | |||
3120 | (*sdtpnt->finish)(); | |||
3121 | } | |||
3122 | ||||
3123 | #if defined(USE_STATIC_SCSI_MEMORY) | |||
3124 | printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n", | |||
3125 | (scsi_memory_upper_value - scsi_memory_lower_value) / 1024, | |||
3126 | (scsi_init_memory_start - scsi_memory_lower_value) / 1024, | |||
3127 | (scsi_memory_upper_value - scsi_init_memory_start) / 1024); | |||
3128 | #endif | |||
3129 | ||||
3130 | MOD_INC_USE_COUNTdo { } while (0); | |||
3131 | return 0; | |||
3132 | } | |||
3133 | ||||
3134 | /* | |||
3135 | * Similarly, this entry point should be called by a loadable module if it | |||
3136 | * is trying to remove a low level scsi driver from the system. | |||
3137 | */ | |||
3138 | static void scsi_unregister_host(Scsi_Host_Template * tpnt) | |||
3139 | { | |||
3140 | Scsi_Host_Template * SHT, *SHTp; | |||
3141 | Scsi_Device *sdpnt, * sdppnt, * sdpnt1; | |||
3142 | Scsi_Cmnd * SCpnt; | |||
3143 | unsigned long flags; | |||
3144 | struct Scsi_Device_Template * sdtpnt; | |||
3145 | struct Scsi_Host * shpnt, *sh1; | |||
3146 | int pcount; | |||
3147 | ||||
3148 | /* First verify that this host adapter is completely free with no pending | |||
3149 | * commands */ | |||
3150 | ||||
3151 | for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next) | |||
3152 | if(sdpnt->host->hostt == tpnt && sdpnt->host->hostt->usage_count | |||
3153 | && *sdpnt->host->hostt->usage_count) return; | |||
3154 | ||||
3155 | for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) | |||
3156 | { | |||
3157 | if (shpnt->hostt != tpnt) continue; | |||
3158 | for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
3159 | { | |||
3160 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); | |||
3161 | cli()__asm__ __volatile__ ("cli": : :"memory"); | |||
3162 | if(SCpnt->request.rq_status != RQ_INACTIVE(-1)) { | |||
3163 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
3164 | for(SCpnt = shpnt->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
3165 | if(SCpnt->request.rq_status == RQ_SCSI_DISCONNECTING0xffe0) | |||
3166 | SCpnt->request.rq_status = RQ_INACTIVE(-1); | |||
3167 | printk("Device busy???\n"); | |||
3168 | return; | |||
3169 | } | |||
3170 | SCpnt->request.rq_status = RQ_SCSI_DISCONNECTING0xffe0; /* Mark as busy */ | |||
3171 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); | |||
3172 | } | |||
3173 | } | |||
3174 | /* Next we detach the high level drivers from the Scsi_Device structures */ | |||
3175 | ||||
3176 | for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next) | |||
3177 | if(sdpnt->host->hostt == tpnt) | |||
3178 | { | |||
3179 | for(sdtpnt = scsi_devicelist; sdtpnt; sdtpnt = sdtpnt->next) | |||
3180 | if(sdtpnt->detach) (*sdtpnt->detach)(sdpnt); | |||
3181 | /* If something still attached, punt */ | |||
3182 | if (sdpnt->attached) { | |||
3183 | printk("Attached usage count = %d\n", sdpnt->attached); | |||
3184 | return; | |||
3185 | } | |||
3186 | } | |||
3187 | ||||
3188 | /* Next we free up the Scsi_Cmnd structures for this host */ | |||
3189 | ||||
3190 | for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt->next) | |||
3191 | if(sdpnt->host->hostt == tpnt) | |||
3192 | while (sdpnt->host->host_queue) { | |||
3193 | SCpnt = sdpnt->host->host_queue->next; | |||
3194 | scsi_init_free((char *) sdpnt->host->host_queue, sizeof(Scsi_Cmnd)); | |||
3195 | sdpnt->host->host_queue = SCpnt; | |||
3196 | if (SCpnt) SCpnt->prev = NULL((void *) 0); | |||
3197 | sdpnt->has_cmdblocks = 0; | |||
3198 | } | |||
3199 | ||||
3200 | /* Next free up the Scsi_Device structures for this host */ | |||
3201 | ||||
3202 | sdppnt = NULL((void *) 0); | |||
3203 | for(sdpnt = scsi_devices; sdpnt; sdpnt = sdpnt1) | |||
3204 | { | |||
3205 | sdpnt1 = sdpnt->next; | |||
3206 | if (sdpnt->host->hostt == tpnt) { | |||
3207 | if (sdppnt) | |||
3208 | sdppnt->next = sdpnt->next; | |||
3209 | else | |||
3210 | scsi_devices = sdpnt->next; | |||
3211 | scsi_init_free((char *) sdpnt, sizeof (Scsi_Device)); | |||
3212 | } else | |||
3213 | sdppnt = sdpnt; | |||
3214 | } | |||
3215 | ||||
3216 | /* Next we go through and remove the instances of the individual hosts | |||
3217 | * that were detected */ | |||
3218 | ||||
3219 | shpnt = scsi_hostlist; | |||
3220 | while(shpnt) { | |||
3221 | sh1 = shpnt->next; | |||
3222 | if(shpnt->hostt == tpnt) { | |||
3223 | if(shpnt->loaded_as_module) { | |||
3224 | pcount = next_scsi_host; | |||
3225 | /* Remove the /proc/scsi directory entry */ | |||
3226 | #if CONFIG_PROC_FS1 | |||
3227 | proc_scsi_unregister(tpnt->proc_dir, | |||
3228 | shpnt->host_no + PROC_SCSI_FILE); | |||
3229 | #endif | |||
3230 | if(tpnt->release) | |||
3231 | (*tpnt->release)(shpnt); | |||
3232 | else { | |||
3233 | /* This is the default case for the release function. | |||
3234 | * It should do the right thing for most correctly | |||
3235 | * written host adapters. | |||
3236 | */ | |||
3237 | if (shpnt->irq) free_irq(shpnt->irq, NULL((void *) 0)); | |||
3238 | if (shpnt->dma_channel != 0xff) free_dma(shpnt->dma_channel); | |||
3239 | if (shpnt->io_port && shpnt->n_io_port) | |||
3240 | release_region(shpnt->io_port, shpnt->n_io_port); | |||
3241 | } | |||
3242 | if(pcount == next_scsi_host) scsi_unregister(shpnt); | |||
3243 | tpnt->present--; | |||
3244 | } | |||
3245 | } | |||
3246 | shpnt = sh1; | |||
3247 | } | |||
3248 | ||||
3249 | /* | |||
3250 | * If there are absolutely no more hosts left, it is safe | |||
3251 | * to completely nuke the DMA pool. The resize operation will | |||
3252 | * do the right thing and free everything. | |||
3253 | */ | |||
3254 | if( !scsi_devices ) | |||
3255 | resize_dma_pool(); | |||
3256 | ||||
3257 | printk ("scsi : %d host%s.\n", next_scsi_host, | |||
3258 | (next_scsi_host == 1) ? "" : "s"); | |||
3259 | ||||
3260 | #if defined(USE_STATIC_SCSI_MEMORY) | |||
3261 | printk ("SCSI memory: total %ldKb, used %ldKb, free %ldKb.\n", | |||
3262 | (scsi_memory_upper_value - scsi_memory_lower_value) / 1024, | |||
3263 | (scsi_init_memory_start - scsi_memory_lower_value) / 1024, | |||
3264 | (scsi_memory_upper_value - scsi_init_memory_start) / 1024); | |||
3265 | #endif | |||
3266 | ||||
3267 | scsi_make_blocked_list(); | |||
3268 | ||||
3269 | /* There were some hosts that were loaded at boot time, so we cannot | |||
3270 | do any more than this */ | |||
3271 | if (tpnt->present) return; | |||
3272 | ||||
3273 | /* OK, this is the very last step. Remove this host adapter from the | |||
3274 | linked list. */ | |||
3275 | for(SHTp=NULL((void *) 0), SHT=scsi_hosts; SHT; SHTp=SHT, SHT=SHT->next) | |||
3276 | if(SHT == tpnt) { | |||
3277 | if(SHTp) | |||
3278 | SHTp->next = SHT->next; | |||
3279 | else | |||
3280 | scsi_hosts = SHT->next; | |||
3281 | SHT->next = NULL((void *) 0); | |||
3282 | break; | |||
3283 | } | |||
3284 | ||||
3285 | /* Rebuild the /proc/scsi directory entries */ | |||
3286 | #if CONFIG_PROC_FS1 | |||
3287 | proc_scsi_unregister(tpnt->proc_dir, tpnt->proc_dir->low_ino); | |||
3288 | #endif | |||
3289 | MOD_DEC_USE_COUNTdo { } while (0); | |||
3290 | } | |||
3291 | ||||
3292 | /* | |||
3293 | * This entry point should be called by a loadable module if it is trying | |||
3294 | * add a high level scsi driver to the system. | |||
3295 | */ | |||
3296 | static int scsi_register_device_module(struct Scsi_Device_Template * tpnt) | |||
3297 | { | |||
3298 | Scsi_Device * SDpnt; | |||
3299 | ||||
3300 | if (tpnt->next) return 1; | |||
3301 | ||||
3302 | scsi_register_device(tpnt); | |||
3303 | /* | |||
3304 | * First scan the devices that we know about, and see if we notice them. | |||
3305 | */ | |||
3306 | ||||
3307 | for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next) | |||
3308 | if(tpnt->detect) SDpnt->attached += (*tpnt->detect)(SDpnt); | |||
3309 | ||||
3310 | /* | |||
3311 | * If any of the devices would match this driver, then perform the | |||
3312 | * init function. | |||
3313 | */ | |||
3314 | if(tpnt->init && tpnt->dev_noticed) | |||
3315 | if ((*tpnt->init)()) return 1; | |||
3316 | ||||
3317 | /* | |||
3318 | * Now actually connect the devices to the new driver. | |||
3319 | */ | |||
3320 | for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next) | |||
3321 | { | |||
3322 | if(tpnt->attach) (*tpnt->attach)(SDpnt); | |||
3323 | /* | |||
3324 | * If this driver attached to the device, and we no longer | |||
3325 | * have anything attached, release the scsi command blocks. | |||
3326 | */ | |||
3327 | if(SDpnt->attached && SDpnt->has_cmdblocks == 0) | |||
3328 | scsi_build_commandblocks(SDpnt); | |||
3329 | } | |||
3330 | ||||
3331 | /* | |||
3332 | * This does any final handling that is required. | |||
3333 | */ | |||
3334 | if(tpnt->finish && tpnt->nr_dev) (*tpnt->finish)(); | |||
3335 | MOD_INC_USE_COUNTdo { } while (0); | |||
3336 | return 0; | |||
3337 | } | |||
3338 | ||||
3339 | static int scsi_unregister_device(struct Scsi_Device_Template * tpnt) | |||
3340 | { | |||
3341 | Scsi_Device * SDpnt; | |||
3342 | Scsi_Cmnd * SCpnt; | |||
3343 | struct Scsi_Device_Template * spnt; | |||
3344 | struct Scsi_Device_Template * prev_spnt; | |||
3345 | ||||
3346 | /* | |||
3347 | * If we are busy, this is not going to fly. | |||
3348 | */ | |||
3349 | if( *tpnt->usage_count != 0) return 0; | |||
3350 | /* | |||
3351 | * Next, detach the devices from the driver. | |||
3352 | */ | |||
3353 | ||||
3354 | for(SDpnt = scsi_devices; SDpnt; SDpnt = SDpnt->next) | |||
3355 | { | |||
3356 | if(tpnt->detach) (*tpnt->detach)(SDpnt); | |||
3357 | if(SDpnt->attached == 0) | |||
3358 | { | |||
3359 | /* | |||
3360 | * Nobody is using this device any more. Free all of the | |||
3361 | * command structures. | |||
3362 | */ | |||
3363 | for(SCpnt = SDpnt->host->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
3364 | { | |||
3365 | if(SCpnt->device == SDpnt) | |||
3366 | { | |||
3367 | if(SCpnt->prev != NULL((void *) 0)) | |||
3368 | SCpnt->prev->next = SCpnt->next; | |||
3369 | if(SCpnt->next != NULL((void *) 0)) | |||
3370 | SCpnt->next->prev = SCpnt->prev; | |||
3371 | if(SCpnt == SDpnt->host->host_queue) | |||
3372 | SDpnt->host->host_queue = SCpnt->next; | |||
3373 | scsi_init_free((char *) SCpnt, sizeof(*SCpnt)); | |||
3374 | } | |||
3375 | } | |||
3376 | SDpnt->has_cmdblocks = 0; | |||
3377 | } | |||
3378 | } | |||
3379 | /* | |||
3380 | * Extract the template from the linked list. | |||
3381 | */ | |||
3382 | spnt = scsi_devicelist; | |||
3383 | prev_spnt = NULL((void *) 0); | |||
3384 | while(spnt != tpnt) | |||
3385 | { | |||
3386 | prev_spnt = spnt; | |||
3387 | spnt = spnt->next; | |||
3388 | } | |||
3389 | if(prev_spnt == NULL((void *) 0)) | |||
3390 | scsi_devicelist = tpnt->next; | |||
3391 | else | |||
3392 | prev_spnt->next = spnt->next; | |||
3393 | ||||
3394 | MOD_DEC_USE_COUNTdo { } while (0); | |||
3395 | /* | |||
3396 | * Final cleanup for the driver is done in the driver sources in the | |||
3397 | * cleanup function. | |||
3398 | */ | |||
3399 | return 0; | |||
3400 | } | |||
3401 | ||||
3402 | ||||
3403 | int scsi_register_module(int module_type, void * ptr) | |||
3404 | { | |||
3405 | switch(module_type){ | |||
3406 | case MODULE_SCSI_HA1: | |||
3407 | return scsi_register_host((Scsi_Host_Template *) ptr); | |||
3408 | ||||
3409 | /* Load upper level device handler of some kind */ | |||
3410 | case MODULE_SCSI_DEV4: | |||
3411 | #ifdef CONFIG_KERNELD | |||
3412 | if (scsi_hosts == NULL((void *) 0)) | |||
3413 | request_module("scsi_hostadapter"); | |||
3414 | #endif | |||
3415 | return scsi_register_device_module((struct Scsi_Device_Template *) ptr); | |||
3416 | /* The rest of these are not yet implemented */ | |||
3417 | ||||
3418 | /* Load constants.o */ | |||
3419 | case MODULE_SCSI_CONST2: | |||
3420 | ||||
3421 | /* Load specialized ioctl handler for some device. Intended for | |||
3422 | * cdroms that have non-SCSI2 audio command sets. */ | |||
3423 | case MODULE_SCSI_IOCTL3: | |||
3424 | ||||
3425 | default: | |||
3426 | return 1; | |||
3427 | } | |||
3428 | } | |||
3429 | ||||
3430 | void scsi_unregister_module(int module_type, void * ptr) | |||
3431 | { | |||
3432 | switch(module_type) { | |||
3433 | case MODULE_SCSI_HA1: | |||
3434 | scsi_unregister_host((Scsi_Host_Template *) ptr); | |||
3435 | break; | |||
3436 | case MODULE_SCSI_DEV4: | |||
3437 | scsi_unregister_device((struct Scsi_Device_Template *) ptr); | |||
3438 | break; | |||
3439 | /* The rest of these are not yet implemented. */ | |||
3440 | case MODULE_SCSI_CONST2: | |||
3441 | case MODULE_SCSI_IOCTL3: | |||
3442 | break; | |||
3443 | default: | |||
3444 | } | |||
3445 | return; | |||
3446 | } | |||
3447 | ||||
3448 | #endif /* CONFIG_MODULES */ | |||
3449 | ||||
3450 | #ifdef DEBUG_TIMEOUT | |||
3451 | static void | |||
3452 | scsi_dump_status(void) | |||
3453 | { | |||
3454 | int i; | |||
3455 | struct Scsi_Host * shpnt; | |||
3456 | Scsi_Cmnd * SCpnt; | |||
3457 | printk("Dump of scsi parameters:\n"); | |||
3458 | i = 0; | |||
3459 | for(shpnt = scsi_hostlist; shpnt; shpnt = shpnt->next) | |||
3460 | for(SCpnt=shpnt->host_queue; SCpnt; SCpnt = SCpnt->next) | |||
3461 | { | |||
3462 | /* (0) 0:0:0:0 (802 123434 8 8 0) (3 3 2) (%d %d %d) %d %x */ | |||
3463 | printk("(%d) %d:%d:%d:%d (%s %ld %ld %ld %d) (%d %d %x) (%d %d %d) %x %x %x\n", | |||
3464 | i++, SCpnt->host->host_no, | |||
3465 | SCpnt->channel, | |||
3466 | SCpnt->target, | |||
3467 | SCpnt->lun, | |||
3468 | kdevname(SCpnt->request.rq_dev), | |||
3469 | SCpnt->request.sector, | |||
3470 | SCpnt->request.nr_sectors, | |||
3471 | SCpnt->request.current_nr_sectors, | |||
3472 | SCpnt->use_sg, | |||
3473 | SCpnt->retries, | |||
3474 | SCpnt->allowed, | |||
3475 | SCpnt->flags, | |||
3476 | SCpnt->timeout_per_command, | |||
3477 | SCpnt->timeout, | |||
3478 | SCpnt->internal_timeout, | |||
3479 | SCpnt->cmnd[0], | |||
3480 | SCpnt->sense_buffer[2], | |||
3481 | SCpnt->result); | |||
3482 | } | |||
3483 | printk("wait_for_request = %p\n", wait_for_request); | |||
3484 | /* Now dump the request lists for each block device */ | |||
3485 | printk("Dump of pending block device requests\n"); | |||
3486 | for(i=0; i<MAX_BLKDEV128; i++) | |||
3487 | if(blk_dev[i].current_request) | |||
3488 | { | |||
3489 | struct request * req; | |||
3490 | printk("%d: ", i); | |||
3491 | req = blk_dev[i].current_request; | |||
3492 | while(req) { | |||
3493 | printk("(%s %d %ld %ld %ld) ", | |||
3494 | kdevname(req->rq_dev), | |||
3495 | req->cmd, | |||
3496 | req->sector, | |||
3497 | req->nr_sectors, | |||
3498 | req->current_nr_sectors); | |||
3499 | req = req->next; | |||
3500 | } | |||
3501 | printk("\n"); | |||
3502 | } | |||
3503 | } | |||
3504 | #endif | |||
3505 | ||||
3506 | #ifdef MODULE | |||
3507 | ||||
3508 | int init_module(void) { | |||
3509 | unsigned long size; | |||
3510 | ||||
3511 | /* | |||
3512 | * This makes /proc/scsi visible. | |||
3513 | */ | |||
3514 | #if CONFIG_PROC_FS1 | |||
3515 | dispatch_scsi_info_ptr = dispatch_scsi_info; | |||
3516 | #endif | |||
3517 | ||||
3518 | timer_table[SCSI_TIMER18].fn = scsi_main_timeout; | |||
3519 | timer_table[SCSI_TIMER18].expires = 0; | |||
3520 | register_symtab(&scsi_symbol_table); | |||
3521 | scsi_loadable_module_flag = 1; | |||
3522 | ||||
3523 | /* Register the /proc/scsi/scsi entry */ | |||
3524 | #if CONFIG_PROC_FS1 | |||
3525 | proc_scsi_register(0, &proc_scsi_scsi); | |||
3526 | #endif | |||
3527 | ||||
3528 | ||||
3529 | dma_sectors = PAGE_SIZE(1 << 12) / SECTOR_SIZE512; | |||
3530 | dma_free_sectors= dma_sectors; | |||
3531 | /* | |||
3532 | * Set up a minimal DMA buffer list - this will be used during scan_scsis | |||
3533 | * in some cases. | |||
3534 | */ | |||
3535 | ||||
3536 | /* One bit per sector to indicate free/busy */ | |||
3537 | size = (dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(FreeSectorBitmap); | |||
3538 | dma_malloc_freelist = (unsigned char *) scsi_init_malloc(size, GFP_ATOMIC0x01); | |||
3539 | memset(dma_malloc_freelist, 0, size)(__builtin_constant_p(0) ? (__builtin_constant_p((size)) ? __constant_c_and_count_memset (((dma_malloc_freelist)),((0x01010101UL*(unsigned char)(0))), ((size))) : __constant_c_memset(((dma_malloc_freelist)),((0x01010101UL *(unsigned char)(0))),((size)))) : (__builtin_constant_p((size )) ? __memset_generic((((dma_malloc_freelist))),(((0))),(((size )))) : __memset_generic(((dma_malloc_freelist)),((0)),((size) )))); | |||
3540 | ||||
3541 | /* One pointer per page for the page list */ | |||
3542 | dma_malloc_pages = (unsigned char **) | |||
3543 | scsi_init_malloc((dma_sectors / SECTORS_PER_PAGE((1 << 12)/512))*sizeof(*dma_malloc_pages), GFP_ATOMIC0x01); | |||
3544 | dma_malloc_pages[0] = (unsigned char *) | |||
3545 | scsi_init_malloc(PAGE_SIZE(1 << 12), GFP_ATOMIC0x01 | GFP_DMA0x80); | |||
3546 | return 0; | |||
3547 | } | |||
3548 | ||||
3549 | void cleanup_module( void) | |||
3550 | { | |||
3551 | #if CONFIG_PROC_FS1 | |||
3552 | proc_scsi_unregister(0, PROC_SCSI_SCSI); | |||
3553 | ||||
3554 | /* No, we're not here anymore. Don't show the /proc/scsi files. */ | |||
3555 | dispatch_scsi_info_ptr = 0L; | |||
3556 | #endif | |||
3557 | ||||
3558 | /* | |||
3559 | * Free up the DMA pool. | |||
3560 | */ | |||
3561 | resize_dma_pool(); | |||
3562 | ||||
3563 | timer_table[SCSI_TIMER18].fn = NULL((void *) 0); | |||
3564 | timer_table[SCSI_TIMER18].expires = 0; | |||
3565 | } | |||
3566 | #endif /* MODULE */ | |||
3567 | ||||
3568 | /* | |||
3569 | * Overrides for Emacs so that we follow Linus's tabbing style. | |||
3570 | * Emacs will notice this stuff at the end of the file and automatically | |||
3571 | * adjust the settings for this buffer only. This must remain at the end | |||
3572 | * of the file. | |||
3573 | * --------------------------------------------------------------------------- | |||
3574 | * Local variables: | |||
3575 | * c-indent-level: 4 | |||
3576 | * c-brace-imaginary-offset: 0 | |||
3577 | * c-brace-offset: -4 | |||
3578 | * c-argdecl-indent: 4 | |||
3579 | * c-label-offset: -4 | |||
3580 | * c-continued-statement-offset: 4 | |||
3581 | * c-continued-brace-offset: 0 | |||
3582 | * indent-tabs-mode: nil | |||
3583 | * tab-width: 8 | |||
3584 | * End: | |||
3585 | */ |