| File: | obj-scan-build/../linux/src/drivers/scsi/sd.c |
| Location: | line 1551, column 9 |
| Description: | Value stored to 'devi' during its initialization is never read |
| 1 | /* |
| 2 | * sd.c Copyright (C) 1992 Drew Eckhardt |
| 3 | * Copyright (C) 1993, 1994, 1995 Eric Youngdale |
| 4 | * |
| 5 | * Linux scsi disk driver |
| 6 | * Initial versions: Drew Eckhardt |
| 7 | * Subsequent revisions: Eric Youngdale |
| 8 | * |
| 9 | * <drew@colorado.edu> |
| 10 | * |
| 11 | * Modified by Eric Youngdale ericy@cais.com to |
| 12 | * add scatter-gather, multiple outstanding request, and other |
| 13 | * enhancements. |
| 14 | * |
| 15 | * Modified by Eric Youngdale eric@aib.com to support loadable |
| 16 | * low-level scsi drivers. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/module.h> |
| 20 | #ifdef MODULE |
| 21 | /* |
| 22 | * This is a variable in scsi.c that is set when we are processing something |
| 23 | * after boot time. By definition, this is true when we are a loadable module |
| 24 | * ourselves. |
| 25 | */ |
| 26 | #define MODULE_FLAGscsi_loadable_module_flag 1 |
| 27 | #else |
| 28 | #define MODULE_FLAGscsi_loadable_module_flag scsi_loadable_module_flag |
| 29 | #endif /* MODULE */ |
| 30 | |
| 31 | #include <linux/fs.h> |
| 32 | #include <linux/kernel.h> |
| 33 | #include <linux/sched.h> |
| 34 | #include <linux/mm.h> |
| 35 | #include <linux/string.h> |
| 36 | #include <linux/errno.h> |
| 37 | #include <linux/interrupt.h> |
| 38 | |
| 39 | #include <asm/system.h> |
| 40 | |
| 41 | #define MAJOR_NR8 SCSI_DISK_MAJOR8 |
| 42 | #include <linux/blk.h> |
| 43 | #include "scsi.h" |
| 44 | #include "hosts.h" |
| 45 | #include "sd.h" |
| 46 | #include <scsi/scsi_ioctl.h> |
| 47 | #include "constants.h" |
| 48 | |
| 49 | #include <linux/genhd.h> |
| 50 | |
| 51 | /* |
| 52 | * static const char RCSid[] = "$Header:"; |
| 53 | */ |
| 54 | |
| 55 | #define MAX_RETRIES5 5 |
| 56 | |
| 57 | /* |
| 58 | * Time out in seconds for disks and Magneto-opticals (which are slower). |
| 59 | */ |
| 60 | |
| 61 | #define SD_TIMEOUT(20 * 100) (20 * HZ100) |
| 62 | #define SD_MOD_TIMEOUT(25 * 100) (25 * HZ100) |
| 63 | |
| 64 | #define CLUSTERABLE_DEVICE(SC)(SC->host->use_clustering && SC->device-> type != 0x07) (SC->host->use_clustering && \ |
| 65 | SC->device->type != TYPE_MOD0x07) |
| 66 | |
| 67 | struct hd_struct * sd; |
| 68 | |
| 69 | Scsi_Disk * rscsi_disks = NULL((void *) 0); |
| 70 | static int * sd_sizes; |
| 71 | static int * sd_blocksizes; |
| 72 | static int * sd_hardsizes; /* Hardware sector size */ |
| 73 | |
| 74 | extern int sd_ioctl(struct inode *, struct file *, unsigned int, unsigned long); |
| 75 | |
| 76 | static int check_scsidisk_media_change(kdev_t); |
| 77 | static int fop_revalidate_scsidisk(kdev_t); |
| 78 | |
| 79 | static int sd_init_onedisk(int); |
| 80 | |
| 81 | static void requeue_sd_request (Scsi_Cmnd * SCpnt); |
| 82 | |
| 83 | static int sd_init(void); |
| 84 | static void sd_finish(void); |
| 85 | static int sd_attach(Scsi_Device *); |
| 86 | static int sd_detect(Scsi_Device *); |
| 87 | static void sd_detach(Scsi_Device *); |
| 88 | |
| 89 | struct Scsi_Device_Template sd_template = |
| 90 | { NULL((void *) 0), "disk", "sd", NULL((void *) 0), TYPE_DISK0x00, |
| 91 | SCSI_DISK_MAJOR8, 0, 0, 0, 1, |
| 92 | sd_detect, sd_init, |
| 93 | sd_finish, sd_attach, sd_detach |
| 94 | }; |
| 95 | |
| 96 | static int sd_open(struct inode * inode, struct file * filp) |
| 97 | { |
| 98 | int target; |
| 99 | target = DEVICE_NR(inode->i_rdev)(((inode->i_rdev) & ((1<<8) - 1)) >> 4); |
| 100 | |
| 101 | if(target >= sd_template.dev_max || !rscsi_disks[target].device) |
| 102 | return -ENXIO6; /* No such device */ |
| 103 | |
| 104 | /* |
| 105 | * Make sure that only one process can do a check_change_disk at one time. |
| 106 | * This is also used to lock out further access when the partition table |
| 107 | * is being re-read. |
| 108 | */ |
| 109 | |
| 110 | while (rscsi_disks[target].device->busy) |
| 111 | barrier()__asm__ __volatile__("": : :"memory"); |
| 112 | if(rscsi_disks[target].device->removable) { |
| 113 | check_disk_change(inode->i_rdev); |
| 114 | |
| 115 | /* |
| 116 | * If the drive is empty, just let the open fail. |
| 117 | */ |
| 118 | if ( !rscsi_disks[target].ready ) |
| 119 | return -ENXIO6; |
| 120 | |
| 121 | /* |
| 122 | * Similarly, if the device has the write protect tab set, |
| 123 | * have the open fail if the user expects to be able to write |
| 124 | * to the thing. |
| 125 | */ |
| 126 | if ( (rscsi_disks[target].write_prot) && (filp->f_mode & 2) ) |
| 127 | return -EROFS30; |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * See if we are requesting a non-existent partition. Do this |
| 132 | * after checking for disk change. |
| 133 | */ |
| 134 | if(sd_sizes[MINOR(inode->i_rdev)((inode->i_rdev) & ((1<<8) - 1))] == 0) |
| 135 | return -ENXIO6; |
| 136 | |
| 137 | if(rscsi_disks[target].device->removable) |
| 138 | if(!rscsi_disks[target].device->access_count) |
| 139 | sd_ioctl(inode, NULL((void *) 0), SCSI_IOCTL_DOORLOCK0x5380, 0); |
| 140 | |
| 141 | rscsi_disks[target].device->access_count++; |
| 142 | if (rscsi_disks[target].device->host->hostt->usage_count) |
| 143 | (*rscsi_disks[target].device->host->hostt->usage_count)++; |
| 144 | if(sd_template.usage_count) (*sd_template.usage_count)++; |
| 145 | return 0; |
| 146 | } |
| 147 | |
| 148 | static void sd_release(struct inode * inode, struct file * file) |
| 149 | { |
| 150 | int target; |
| 151 | fsync_dev(inode->i_rdev); |
| 152 | |
| 153 | target = DEVICE_NR(inode->i_rdev)(((inode->i_rdev) & ((1<<8) - 1)) >> 4); |
| 154 | |
| 155 | rscsi_disks[target].device->access_count--; |
| 156 | if (rscsi_disks[target].device->host->hostt->usage_count) |
| 157 | (*rscsi_disks[target].device->host->hostt->usage_count)--; |
| 158 | if(sd_template.usage_count) (*sd_template.usage_count)--; |
| 159 | |
| 160 | if(rscsi_disks[target].device->removable) { |
| 161 | if(!rscsi_disks[target].device->access_count) |
| 162 | sd_ioctl(inode, NULL((void *) 0), SCSI_IOCTL_DOORUNLOCK0x5381, 0); |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | static void sd_geninit(struct gendisk *); |
| 167 | |
| 168 | static struct file_operations sd_fops = { |
| 169 | NULL((void *) 0), /* lseek - default */ |
| 170 | block_read, /* read - general block-dev read */ |
| 171 | block_write, /* write - general block-dev write */ |
| 172 | NULL((void *) 0), /* readdir - bad */ |
| 173 | NULL((void *) 0), /* select */ |
| 174 | sd_ioctl, /* ioctl */ |
| 175 | NULL((void *) 0), /* mmap */ |
| 176 | sd_open, /* open code */ |
| 177 | sd_release, /* release */ |
| 178 | block_fsync, /* fsync */ |
| 179 | NULL((void *) 0), /* fasync */ |
| 180 | check_scsidisk_media_change, /* Disk change */ |
| 181 | fop_revalidate_scsidisk /* revalidate */ |
| 182 | }; |
| 183 | |
| 184 | static struct gendisk sd_gendisk = { |
| 185 | MAJOR_NR8, /* Major number */ |
| 186 | "sd", /* Major name */ |
| 187 | 4, /* Bits to shift to get real from partition */ |
| 188 | 1 << 4, /* Number of partitions per real */ |
| 189 | 0, /* maximum number of real */ |
| 190 | sd_geninit, /* init function */ |
| 191 | NULL((void *) 0), /* hd struct */ |
| 192 | NULL((void *) 0), /* block sizes */ |
| 193 | 0, /* number */ |
| 194 | NULL((void *) 0), /* internal */ |
| 195 | NULL((void *) 0) /* next */ |
| 196 | }; |
| 197 | |
| 198 | static void sd_geninit (struct gendisk *ignored) |
| 199 | { |
| 200 | int i; |
| 201 | |
| 202 | for (i = 0; i < sd_template.dev_max; ++i) |
| 203 | if(rscsi_disks[i].device) |
| 204 | sd[i << 4].nr_sects = rscsi_disks[i].capacity; |
| 205 | #if 0 |
| 206 | /* No longer needed - we keep track of this as we attach/detach */ |
| 207 | sd_gendisk.nr_real = sd_template.dev_max; |
| 208 | #endif |
| 209 | } |
| 210 | |
| 211 | /* |
| 212 | * rw_intr is the interrupt routine for the device driver. |
| 213 | * It will be notified on the end of a SCSI read / write, and |
| 214 | * will take one of several actions based on success or failure. |
| 215 | */ |
| 216 | |
| 217 | static void rw_intr (Scsi_Cmnd *SCpnt) |
| 218 | { |
| 219 | int result = SCpnt->result; |
| 220 | int this_count = SCpnt->bufflen >> 9; |
| 221 | int good_sectors = (result == 0 ? this_count : 0); |
| 222 | int block_sectors = 1; |
| 223 | |
| 224 | #ifdef DEBUG |
| 225 | printk("sd%c : rw_intr(%d, %d)\n", 'a' + MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1)), |
| 226 | SCpnt->host->host_no, result); |
| 227 | #endif |
| 228 | |
| 229 | /* |
| 230 | Handle MEDIUM ERRORs that indicate partial success. Since this is a |
| 231 | relatively rare error condition, no care is taken to avoid unnecessary |
| 232 | additional work such as memcpy's that could be avoided. |
| 233 | */ |
| 234 | |
| 235 | if (driver_byte(result)(((result) >> 24) & 0xff) != 0 && /* An error occurred */ |
| 236 | SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */ |
| 237 | SCpnt->sense_buffer[2] == MEDIUM_ERROR0x03) |
| 238 | { |
| 239 | long error_sector = (SCpnt->sense_buffer[3] << 24) | |
| 240 | (SCpnt->sense_buffer[4] << 16) | |
| 241 | (SCpnt->sense_buffer[5] << 8) | |
| 242 | SCpnt->sense_buffer[6]; |
| 243 | int sector_size = |
| 244 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].sector_size; |
| 245 | if (SCpnt->request.bh != NULL((void *) 0)) |
| 246 | block_sectors = SCpnt->request.bh->b_size >> 9; |
| 247 | if (sector_size == 1024) |
| 248 | { |
| 249 | error_sector <<= 1; |
| 250 | if (block_sectors < 2) block_sectors = 2; |
| 251 | } |
| 252 | else if (sector_size == 256) |
| 253 | error_sector >>= 1; |
| 254 | error_sector -= sd[MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1))].start_sect; |
| 255 | error_sector &= ~ (block_sectors - 1); |
| 256 | good_sectors = error_sector - SCpnt->request.sector; |
| 257 | if (good_sectors < 0 || good_sectors >= this_count) |
| 258 | good_sectors = 0; |
| 259 | } |
| 260 | |
| 261 | /* |
| 262 | * Handle RECOVERED ERRORs that indicate success after recovery action |
| 263 | * by the target device. |
| 264 | */ |
| 265 | |
| 266 | if (SCpnt->sense_buffer[0] == 0xF0 && /* Sense data is valid */ |
| 267 | SCpnt->sense_buffer[2] == RECOVERED_ERROR0x01) |
| 268 | { |
| 269 | printk("scsidisk recovered I/O error: dev %s, sector %lu, absolute sector %lu\n", |
| 270 | kdevname(SCpnt->request.rq_dev), SCpnt->request.sector, |
| 271 | SCpnt->request.sector + sd[MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1))].start_sect); |
| 272 | good_sectors = this_count; |
| 273 | result = 0; |
| 274 | } |
| 275 | |
| 276 | /* |
| 277 | * First case : we assume that the command succeeded. One of two things |
| 278 | * will happen here. Either we will be finished, or there will be more |
| 279 | * sectors that we were unable to read last time. |
| 280 | */ |
| 281 | |
| 282 | if (good_sectors > 0) { |
| 283 | |
| 284 | #ifdef DEBUG |
| 285 | printk("sd%c : %d sectors remain.\n", 'a' + MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1)), |
| 286 | SCpnt->request.nr_sectors); |
| 287 | printk("use_sg is %d\n ",SCpnt->use_sg); |
| 288 | #endif |
| 289 | if (SCpnt->use_sg) { |
| 290 | struct scatterlist * sgpnt; |
| 291 | int i; |
| 292 | sgpnt = (struct scatterlist *) SCpnt->buffer; |
| 293 | for(i=0; i<SCpnt->use_sg; i++) { |
| 294 | #ifdef DEBUG |
| 295 | printk(":%x %x %d\n",sgpnt[i].alt_address, sgpnt[i].address, |
| 296 | sgpnt[i].length); |
| 297 | #endif |
| 298 | if (sgpnt[i].alt_address) { |
| 299 | if (SCpnt->request.cmd == READ0) |
| 300 | memcpy(sgpnt[i].alt_address, sgpnt[i].address,(__builtin_constant_p(sgpnt[i].length) ? __constant_memcpy((sgpnt [i].alt_address),(sgpnt[i].address),(sgpnt[i].length)) : __memcpy ((sgpnt[i].alt_address),(sgpnt[i].address),(sgpnt[i].length)) ) |
| 301 | sgpnt[i].length)(__builtin_constant_p(sgpnt[i].length) ? __constant_memcpy((sgpnt [i].alt_address),(sgpnt[i].address),(sgpnt[i].length)) : __memcpy ((sgpnt[i].alt_address),(sgpnt[i].address),(sgpnt[i].length)) ); |
| 302 | scsi_free(sgpnt[i].address, sgpnt[i].length); |
| 303 | } |
| 304 | } |
| 305 | |
| 306 | /* Free list of scatter-gather pointers */ |
| 307 | scsi_free(SCpnt->buffer, SCpnt->sglist_len); |
| 308 | } else { |
| 309 | if (SCpnt->buffer != SCpnt->request.buffer) { |
| 310 | #ifdef DEBUG |
| 311 | printk("nosg: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer, |
| 312 | SCpnt->bufflen); |
| 313 | #endif |
| 314 | if (SCpnt->request.cmd == READ0) |
| 315 | memcpy(SCpnt->request.buffer, SCpnt->buffer,(__builtin_constant_p(SCpnt->bufflen) ? __constant_memcpy( (SCpnt->request.buffer),(SCpnt->buffer),(SCpnt->bufflen )) : __memcpy((SCpnt->request.buffer),(SCpnt->buffer),( SCpnt->bufflen))) |
| 316 | SCpnt->bufflen)(__builtin_constant_p(SCpnt->bufflen) ? __constant_memcpy( (SCpnt->request.buffer),(SCpnt->buffer),(SCpnt->bufflen )) : __memcpy((SCpnt->request.buffer),(SCpnt->buffer),( SCpnt->bufflen))); |
| 317 | scsi_free(SCpnt->buffer, SCpnt->bufflen); |
| 318 | } |
| 319 | } |
| 320 | /* |
| 321 | * If multiple sectors are requested in one buffer, then |
| 322 | * they will have been finished off by the first command. |
| 323 | * If not, then we have a multi-buffer command. |
| 324 | */ |
| 325 | if (SCpnt->request.nr_sectors > this_count) |
| 326 | { |
| 327 | SCpnt->request.errors = 0; |
| 328 | |
| 329 | if (!SCpnt->request.bh) |
| 330 | { |
| 331 | #ifdef DEBUG |
| 332 | printk("sd%c : handling page request, no buffer\n", |
| 333 | 'a' + MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1))); |
| 334 | #endif |
| 335 | /* |
| 336 | * The SCpnt->request.nr_sectors field is always done in |
| 337 | * 512 byte sectors, even if this really isn't the case. |
| 338 | */ |
| 339 | panic("sd.c: linked page request (%lx %x)", |
| 340 | SCpnt->request.sector, this_count); |
| 341 | } |
| 342 | } |
| 343 | SCpnt = end_scsi_request(SCpnt, 1, good_sectors); |
| 344 | if (result == 0) |
| 345 | { |
| 346 | requeue_sd_request(SCpnt); |
| 347 | return; |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | if (good_sectors == 0) { |
| 352 | |
| 353 | /* Free up any indirection buffers we allocated for DMA purposes. */ |
| 354 | if (SCpnt->use_sg) { |
| 355 | struct scatterlist * sgpnt; |
| 356 | int i; |
| 357 | sgpnt = (struct scatterlist *) SCpnt->buffer; |
| 358 | for(i=0; i<SCpnt->use_sg; i++) { |
| 359 | #ifdef DEBUG |
| 360 | printk("err: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer, |
| 361 | SCpnt->bufflen); |
| 362 | #endif |
| 363 | if (sgpnt[i].alt_address) { |
| 364 | scsi_free(sgpnt[i].address, sgpnt[i].length); |
| 365 | } |
| 366 | } |
| 367 | scsi_free(SCpnt->buffer, SCpnt->sglist_len); /* Free list of scatter-gather pointers */ |
| 368 | } else { |
| 369 | #ifdef DEBUG |
| 370 | printk("nosgerr: %x %x %d\n",SCpnt->request.buffer, SCpnt->buffer, |
| 371 | SCpnt->bufflen); |
| 372 | #endif |
| 373 | if (SCpnt->buffer != SCpnt->request.buffer) |
| 374 | scsi_free(SCpnt->buffer, SCpnt->bufflen); |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | /* |
| 379 | * Now, if we were good little boys and girls, Santa left us a request |
| 380 | * sense buffer. We can extract information from this, so we |
| 381 | * can choose a block to remap, etc. |
| 382 | */ |
| 383 | |
| 384 | if (driver_byte(result)(((result) >> 24) & 0xff) != 0) { |
| 385 | if (suggestion(result)((((result) >> 24) & 0xff) & 0xf0) == SUGGEST_REMAP0x30) { |
| 386 | #ifdef REMAP |
| 387 | /* |
| 388 | * Not yet implemented. A read will fail after being remapped, |
| 389 | * a write will call the strategy routine again. |
| 390 | */ |
| 391 | if rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].remap |
| 392 | { |
| 393 | result = 0; |
| 394 | } |
| 395 | else |
| 396 | #endif |
| 397 | } |
| 398 | |
| 399 | if ((SCpnt->sense_buffer[0] & 0x7f) == 0x70) { |
| 400 | if ((SCpnt->sense_buffer[2] & 0xf) == UNIT_ATTENTION0x06) { |
| 401 | if(rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->removable) { |
| 402 | /* detected disc change. set a bit and quietly refuse |
| 403 | * further access. |
| 404 | */ |
| 405 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->changed = 1; |
| 406 | SCpnt = end_scsi_request(SCpnt, 0, this_count); |
| 407 | requeue_sd_request(SCpnt); |
| 408 | return; |
| 409 | } |
| 410 | else |
| 411 | { |
| 412 | /* |
| 413 | * Must have been a power glitch, or a bus reset. |
| 414 | * Could not have been a media change, so we just retry |
| 415 | * the request and see what happens. |
| 416 | */ |
| 417 | requeue_sd_request(SCpnt); |
| 418 | return; |
| 419 | } |
| 420 | } |
| 421 | } |
| 422 | |
| 423 | |
| 424 | /* If we had an ILLEGAL REQUEST returned, then we may have |
| 425 | * performed an unsupported command. The only thing this should be |
| 426 | * would be a ten byte read where only a six byte read was supported. |
| 427 | * Also, on a system where READ CAPACITY failed, we have read past |
| 428 | * the end of the disk. |
| 429 | */ |
| 430 | |
| 431 | if (SCpnt->sense_buffer[2] == ILLEGAL_REQUEST0x05) { |
| 432 | if (rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].ten) { |
| 433 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].ten = 0; |
| 434 | requeue_sd_request(SCpnt); |
| 435 | result = 0; |
| 436 | } else { |
| 437 | /* ???? */ |
| 438 | } |
| 439 | } |
| 440 | |
| 441 | if (SCpnt->sense_buffer[2] == MEDIUM_ERROR0x03) { |
| 442 | printk("scsi%d: MEDIUM ERROR on channel %d, id %d, lun %d, CDB: ", |
| 443 | SCpnt->host->host_no, (int) SCpnt->channel, |
| 444 | (int) SCpnt->target, (int) SCpnt->lun); |
| 445 | print_command(SCpnt->cmnd); |
| 446 | print_sense("sd", SCpnt); |
| 447 | SCpnt = end_scsi_request(SCpnt, 0, block_sectors); |
| 448 | requeue_sd_request(SCpnt); |
| 449 | return; |
| 450 | } |
| 451 | } /* driver byte != 0 */ |
| 452 | if (result) { |
| 453 | printk("SCSI disk error : host %d channel %d id %d lun %d return code = %x\n", |
| 454 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->host->host_no, |
| 455 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->channel, |
| 456 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->id, |
| 457 | rscsi_disks[DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4)].device->lun, result); |
| 458 | |
| 459 | if (driver_byte(result)(((result) >> 24) & 0xff) & DRIVER_SENSE0x08) |
| 460 | print_sense("sd", SCpnt); |
| 461 | SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.current_nr_sectors); |
| 462 | requeue_sd_request(SCpnt); |
| 463 | return; |
| 464 | } |
| 465 | } |
| 466 | |
| 467 | /* |
| 468 | * requeue_sd_request() is the request handler function for the sd driver. |
| 469 | * Its function in life is to take block device requests, and translate |
| 470 | * them to SCSI commands. |
| 471 | */ |
| 472 | |
| 473 | static void do_sd_request (void) |
| 474 | { |
| 475 | Scsi_Cmnd * SCpnt = NULL((void *) 0); |
| 476 | Scsi_Device * SDev; |
| 477 | struct request * req = NULL((void *) 0); |
| 478 | unsigned long flags; |
| 479 | int flag = 0; |
| 480 | |
| 481 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); |
| 482 | while (1==1){ |
| 483 | cli()__asm__ __volatile__ ("cli": : :"memory"); |
| 484 | if (CURRENT(blk_dev[8].current_request) != NULL((void *) 0) && CURRENT(blk_dev[8].current_request)->rq_status == RQ_INACTIVE(-1)) { |
| 485 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); |
| 486 | return; |
| 487 | } |
| 488 | |
| 489 | INIT_SCSI_REQUESTif (!(blk_dev[8].current_request)) { (do_sd = (((void *) 0))) ; __asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory" ); return; } if ((((blk_dev[8].current_request)->rq_dev) >> 8) != 8) panic("scsidisk" ": request list destroyed"); if (( blk_dev[8].current_request)->bh) { if (!buffer_locked((blk_dev [8].current_request)->bh)) panic("scsidisk" ": block not locked" ); }; |
| 490 | SDev = rscsi_disks[DEVICE_NR(CURRENT->rq_dev)((((blk_dev[8].current_request)->rq_dev) & ((1<< 8) - 1)) >> 4)].device; |
| 491 | |
| 492 | /* |
| 493 | * I am not sure where the best place to do this is. We need |
| 494 | * to hook in a place where we are likely to come if in user |
| 495 | * space. |
| 496 | */ |
| 497 | if( SDev->was_reset ) |
| 498 | { |
| 499 | /* |
| 500 | * We need to relock the door, but we might |
| 501 | * be in an interrupt handler. Only do this |
| 502 | * from user space, since we do not want to |
| 503 | * sleep from an interrupt. |
| 504 | */ |
| 505 | if( SDev->removable && !intr_count ) |
| 506 | { |
| 507 | scsi_ioctl(SDev, SCSI_IOCTL_DOORLOCK0x5380, 0); |
| 508 | /* scsi_ioctl may allow CURRENT to change, so start over. */ |
| 509 | SDev->was_reset = 0; |
| 510 | continue; |
| 511 | } |
| 512 | SDev->was_reset = 0; |
| 513 | } |
| 514 | |
| 515 | /* We have to be careful here. allocate_device will get a free pointer, |
| 516 | * but there is no guarantee that it is queueable. In normal usage, |
| 517 | * we want to call this, because other types of devices may have the |
| 518 | * host all tied up, and we want to make sure that we have at least |
| 519 | * one request pending for this type of device. We can also come |
| 520 | * through here while servicing an interrupt, because of the need to |
| 521 | * start another command. If we call allocate_device more than once, |
| 522 | * then the system can wedge if the command is not queueable. The |
| 523 | * request_queueable function is safe because it checks to make sure |
| 524 | * that the host is able to take another command before it returns |
| 525 | * a pointer. |
| 526 | */ |
| 527 | |
| 528 | if (flag++ == 0) |
| 529 | SCpnt = allocate_device(&CURRENT(blk_dev[8].current_request), |
| 530 | rscsi_disks[DEVICE_NR(CURRENT->rq_dev)((((blk_dev[8].current_request)->rq_dev) & ((1<< 8) - 1)) >> 4)].device, 0); |
| 531 | else SCpnt = NULL((void *) 0); |
| 532 | |
| 533 | /* |
| 534 | * The following restore_flags leads to latency problems. FIXME. |
| 535 | * Using a "sti()" gets rid of the latency problems but causes |
| 536 | * race conditions and crashes. |
| 537 | */ |
| 538 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); |
| 539 | |
| 540 | /* This is a performance enhancement. We dig down into the request |
| 541 | * list and try to find a queueable request (i.e. device not busy, |
| 542 | * and host able to accept another command. If we find one, then we |
| 543 | * queue it. This can make a big difference on systems with more than |
| 544 | * one disk drive. We want to have the interrupts off when monkeying |
| 545 | * with the request list, because otherwise the kernel might try to |
| 546 | * slip in a request in between somewhere. |
| 547 | */ |
| 548 | |
| 549 | if (!SCpnt && sd_template.nr_dev > 1){ |
| 550 | struct request *req1; |
| 551 | req1 = NULL((void *) 0); |
| 552 | cli()__asm__ __volatile__ ("cli": : :"memory"); |
| 553 | req = CURRENT(blk_dev[8].current_request); |
| 554 | while(req){ |
| 555 | SCpnt = request_queueable(req, |
| 556 | rscsi_disks[DEVICE_NR(req->rq_dev)(((req->rq_dev) & ((1<<8) - 1)) >> 4)].device); |
| 557 | if(SCpnt) break; |
| 558 | req1 = req; |
| 559 | req = req->next; |
| 560 | } |
| 561 | if (SCpnt && req->rq_status == RQ_INACTIVE(-1)) { |
| 562 | if (req == CURRENT(blk_dev[8].current_request)) |
| 563 | CURRENT(blk_dev[8].current_request) = CURRENT(blk_dev[8].current_request)->next; |
| 564 | else |
| 565 | req1->next = req->next; |
| 566 | } |
| 567 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); |
| 568 | } |
| 569 | |
| 570 | if (!SCpnt) return; /* Could not find anything to do */ |
| 571 | |
| 572 | /* Queue command */ |
| 573 | requeue_sd_request(SCpnt); |
| 574 | } /* While */ |
| 575 | } |
| 576 | |
| 577 | static void requeue_sd_request (Scsi_Cmnd * SCpnt) |
| 578 | { |
| 579 | int dev, devm, block, this_count; |
| 580 | unsigned char cmd[10]; |
| 581 | int bounce_size, contiguous; |
| 582 | int max_sg; |
| 583 | struct buffer_head * bh, *bhp; |
| 584 | char * buff, *bounce_buffer; |
| 585 | |
| 586 | repeat: |
| 587 | |
| 588 | if(!SCpnt || SCpnt->request.rq_status == RQ_INACTIVE(-1)) { |
| 589 | do_sd_request(); |
| 590 | return; |
| 591 | } |
| 592 | |
| 593 | devm = MINOR(SCpnt->request.rq_dev)((SCpnt->request.rq_dev) & ((1<<8) - 1)); |
| 594 | dev = DEVICE_NR(SCpnt->request.rq_dev)(((SCpnt->request.rq_dev) & ((1<<8) - 1)) >> 4); |
| 595 | |
| 596 | block = SCpnt->request.sector; |
| 597 | this_count = 0; |
| 598 | |
| 599 | #ifdef DEBUG |
| 600 | printk("Doing sd request, dev = %d, block = %d\n", devm, block); |
| 601 | #endif |
| 602 | |
| 603 | if (devm >= (sd_template.dev_max << 4) || |
| 604 | !rscsi_disks[dev].device || |
| 605 | block + SCpnt->request.nr_sectors > sd[devm].nr_sects) |
| 606 | { |
| 607 | SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); |
| 608 | goto repeat; |
| 609 | } |
| 610 | |
| 611 | block += sd[devm].start_sect; |
| 612 | |
| 613 | if (rscsi_disks[dev].device->changed) |
| 614 | { |
| 615 | /* |
| 616 | * quietly refuse to do anything to a changed disc until the changed |
| 617 | * bit has been reset |
| 618 | */ |
| 619 | /* printk("SCSI disk has been changed. Prohibiting further I/O.\n"); */ |
| 620 | SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); |
| 621 | goto repeat; |
| 622 | } |
| 623 | |
| 624 | #ifdef DEBUG |
| 625 | printk("sd%c : real dev = /dev/sd%c, block = %d\n", |
| 626 | 'a' + devm, dev, block); |
| 627 | #endif |
| 628 | |
| 629 | /* |
| 630 | * If we have a 1K hardware sectorsize, prevent access to single |
| 631 | * 512 byte sectors. In theory we could handle this - in fact |
| 632 | * the scsi cdrom driver must be able to handle this because |
| 633 | * we typically use 1K blocksizes, and cdroms typically have |
| 634 | * 2K hardware sectorsizes. Of course, things are simpler |
| 635 | * with the cdrom, since it is read-only. For performance |
| 636 | * reasons, the filesystems should be able to handle this |
| 637 | * and not force the scsi disk driver to use bounce buffers |
| 638 | * for this. |
| 639 | */ |
| 640 | if (rscsi_disks[dev].sector_size == 1024) |
| 641 | if((block & 1) || (SCpnt->request.nr_sectors & 1)) { |
| 642 | printk("sd.c:Bad block number requested"); |
| 643 | SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); |
| 644 | goto repeat; |
| 645 | } |
| 646 | |
| 647 | switch (SCpnt->request.cmd) |
| 648 | { |
| 649 | case WRITE1 : |
| 650 | if (!rscsi_disks[dev].device->writeable) |
| 651 | { |
| 652 | SCpnt = end_scsi_request(SCpnt, 0, SCpnt->request.nr_sectors); |
| 653 | goto repeat; |
| 654 | } |
| 655 | cmd[0] = WRITE_60x0a; |
| 656 | break; |
| 657 | case READ0 : |
| 658 | cmd[0] = READ_60x08; |
| 659 | break; |
| 660 | default : |
| 661 | panic ("Unknown sd command %d\n", SCpnt->request.cmd); |
| 662 | } |
| 663 | |
| 664 | SCpnt->this_count = 0; |
| 665 | |
| 666 | /* If the host adapter can deal with very large scatter-gather |
| 667 | * requests, it is a waste of time to cluster |
| 668 | */ |
| 669 | contiguous = (!CLUSTERABLE_DEVICE(SCpnt)(SCpnt->host->use_clustering && SCpnt->device ->type != 0x07) ? 0 :1); |
| 670 | bounce_buffer = NULL((void *) 0); |
| 671 | bounce_size = (SCpnt->request.nr_sectors << 9); |
| 672 | |
| 673 | /* First see if we need a bounce buffer for this request. If we do, make |
| 674 | * sure that we can allocate a buffer. Do not waste space by allocating |
| 675 | * a bounce buffer if we are straddling the 16Mb line |
| 676 | */ |
| 677 | if (contiguous && SCpnt->request.bh && |
| 678 | ((long) SCpnt->request.bh->b_data) |
| 679 | + (SCpnt->request.nr_sectors << 9) - 1 > ISA_DMA_THRESHOLD(0x00ffffff) |
| 680 | && SCpnt->host->unchecked_isa_dma) { |
| 681 | if(((long) SCpnt->request.bh->b_data) > ISA_DMA_THRESHOLD(0x00ffffff)) |
| 682 | bounce_buffer = (char *) scsi_malloc(bounce_size); |
| 683 | if(!bounce_buffer) contiguous = 0; |
| 684 | } |
| 685 | |
| 686 | if(contiguous && SCpnt->request.bh && SCpnt->request.bh->b_reqnext) |
| 687 | for(bh = SCpnt->request.bh, bhp = bh->b_reqnext; bhp; bh = bhp, |
| 688 | bhp = bhp->b_reqnext) { |
| 689 | if(!CONTIGUOUS_BUFFERS(bh,bhp)((bh->b_data+bh->b_size) == bhp->b_data)) { |
| 690 | if(bounce_buffer) scsi_free(bounce_buffer, bounce_size); |
| 691 | contiguous = 0; |
| 692 | break; |
| 693 | } |
| 694 | } |
| 695 | if (!SCpnt->request.bh || contiguous) { |
| 696 | |
| 697 | /* case of page request (i.e. raw device), or unlinked buffer */ |
| 698 | this_count = SCpnt->request.nr_sectors; |
| 699 | buff = SCpnt->request.buffer; |
| 700 | SCpnt->use_sg = 0; |
| 701 | |
| 702 | } else if (SCpnt->host->sg_tablesize == 0 || |
| 703 | (need_isa_buffer && dma_free_sectors <= 10)) { |
| 704 | |
| 705 | /* Case of host adapter that cannot scatter-gather. We also |
| 706 | * come here if we are running low on DMA buffer memory. We set |
| 707 | * a threshold higher than that we would need for this request so |
| 708 | * we leave room for other requests. Even though we would not need |
| 709 | * it all, we need to be conservative, because if we run low enough |
| 710 | * we have no choice but to panic. |
| 711 | */ |
| 712 | if (SCpnt->host->sg_tablesize != 0 && |
| 713 | need_isa_buffer && |
| 714 | dma_free_sectors <= 10) |
| 715 | printk("Warning: SCSI DMA buffer space running low. Using non scatter-gather I/O.\n"); |
| 716 | |
| 717 | this_count = SCpnt->request.current_nr_sectors; |
| 718 | buff = SCpnt->request.buffer; |
| 719 | SCpnt->use_sg = 0; |
| 720 | |
| 721 | } else { |
| 722 | |
| 723 | /* Scatter-gather capable host adapter */ |
| 724 | struct scatterlist * sgpnt; |
| 725 | int count, this_count_max; |
| 726 | int counted; |
| 727 | |
| 728 | bh = SCpnt->request.bh; |
| 729 | this_count = 0; |
| 730 | this_count_max = (rscsi_disks[dev].ten ? 0xffff : 0xff); |
| 731 | count = 0; |
| 732 | bhp = NULL((void *) 0); |
| 733 | while(bh) { |
| 734 | if ((this_count + (bh->b_size >> 9)) > this_count_max) break; |
| 735 | if(!bhp || !CONTIGUOUS_BUFFERS(bhp,bh)((bhp->b_data+bhp->b_size) == bh->b_data) || |
| 736 | !CLUSTERABLE_DEVICE(SCpnt)(SCpnt->host->use_clustering && SCpnt->device ->type != 0x07) || |
| 737 | (SCpnt->host->unchecked_isa_dma && |
| 738 | ((unsigned long) bh->b_data-1) == ISA_DMA_THRESHOLD(0x00ffffff))) { |
| 739 | if (count < SCpnt->host->sg_tablesize) count++; |
| 740 | else break; |
| 741 | } |
| 742 | this_count += (bh->b_size >> 9); |
| 743 | bhp = bh; |
| 744 | bh = bh->b_reqnext; |
| 745 | } |
| 746 | #if 0 |
| 747 | if(SCpnt->host->unchecked_isa_dma && |
| 748 | ((unsigned int) SCpnt->request.bh->b_data-1) == ISA_DMA_THRESHOLD(0x00ffffff)) count--; |
| 749 | #endif |
| 750 | SCpnt->use_sg = count; /* Number of chains */ |
| 751 | /* scsi_malloc can only allocate in chunks of 512 bytes */ |
| 752 | count = (SCpnt->use_sg * sizeof(struct scatterlist) + 511) & ~511; |
| 753 | |
| 754 | SCpnt->sglist_len = count; |
| 755 | max_sg = count / sizeof(struct scatterlist); |
| 756 | if(SCpnt->host->sg_tablesize < max_sg) |
| 757 | max_sg = SCpnt->host->sg_tablesize; |
| 758 | sgpnt = (struct scatterlist * ) scsi_malloc(count); |
| 759 | if (!sgpnt) { |
| 760 | printk("Warning - running *really* short on DMA buffers\n"); |
| 761 | SCpnt->use_sg = 0; /* No memory left - bail out */ |
| 762 | this_count = SCpnt->request.current_nr_sectors; |
| 763 | buff = SCpnt->request.buffer; |
| 764 | } else { |
| 765 | memset(sgpnt, 0, count)(__builtin_constant_p(0) ? (__builtin_constant_p((count)) ? __constant_c_and_count_memset (((sgpnt)),((0x01010101UL*(unsigned char)(0))),((count))) : __constant_c_memset (((sgpnt)),((0x01010101UL*(unsigned char)(0))),((count)))) : ( __builtin_constant_p((count)) ? __memset_generic((((sgpnt))), (((0))),(((count)))) : __memset_generic(((sgpnt)),((0)),((count ))))); /* Zero so it is easy to fill, but only |
| 766 | * if memory is available |
| 767 | */ |
| 768 | buff = (char *) sgpnt; |
| 769 | counted = 0; |
| 770 | for(count = 0, bh = SCpnt->request.bh, bhp = bh->b_reqnext; |
| 771 | count < SCpnt->use_sg && bh; |
| 772 | count++, bh = bhp) { |
| 773 | |
| 774 | bhp = bh->b_reqnext; |
| 775 | |
| 776 | if(!sgpnt[count].address) sgpnt[count].address = bh->b_data; |
| 777 | sgpnt[count].length += bh->b_size; |
| 778 | counted += bh->b_size >> 9; |
| 779 | |
| 780 | if (((long) sgpnt[count].address) + sgpnt[count].length - 1 > |
| 781 | ISA_DMA_THRESHOLD(0x00ffffff) && (SCpnt->host->unchecked_isa_dma) && |
| 782 | !sgpnt[count].alt_address) { |
| 783 | sgpnt[count].alt_address = sgpnt[count].address; |
| 784 | /* We try to avoid exhausting the DMA pool, since it is |
| 785 | * easier to control usage here. In other places we might |
| 786 | * have a more pressing need, and we would be screwed if |
| 787 | * we ran out */ |
| 788 | if(dma_free_sectors < (sgpnt[count].length >> 9) + 10) { |
| 789 | sgpnt[count].address = NULL((void *) 0); |
| 790 | } else { |
| 791 | sgpnt[count].address = |
| 792 | (char *) scsi_malloc(sgpnt[count].length); |
| 793 | } |
| 794 | /* If we start running low on DMA buffers, we abort the |
| 795 | * scatter-gather operation, and free all of the memory |
| 796 | * we have allocated. We want to ensure that all scsi |
| 797 | * operations are able to do at least a non-scatter/gather |
| 798 | * operation */ |
| 799 | if(sgpnt[count].address == NULL((void *) 0)){ /* Out of dma memory */ |
| 800 | #if 0 |
| 801 | printk("Warning: Running low on SCSI DMA buffers"); |
| 802 | /* Try switching back to a non s-g operation. */ |
| 803 | while(--count >= 0){ |
| 804 | if(sgpnt[count].alt_address) |
| 805 | scsi_free(sgpnt[count].address, |
| 806 | sgpnt[count].length); |
| 807 | } |
| 808 | this_count = SCpnt->request.current_nr_sectors; |
| 809 | buff = SCpnt->request.buffer; |
| 810 | SCpnt->use_sg = 0; |
| 811 | scsi_free(sgpnt, SCpnt->sglist_len); |
| 812 | #endif |
| 813 | SCpnt->use_sg = count; |
| 814 | this_count = counted -= bh->b_size >> 9; |
| 815 | break; |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | /* Only cluster buffers if we know that we can supply DMA |
| 820 | * buffers large enough to satisfy the request. Do not cluster |
| 821 | * a new request if this would mean that we suddenly need to |
| 822 | * start using DMA bounce buffers */ |
| 823 | if(bhp && CONTIGUOUS_BUFFERS(bh,bhp)((bh->b_data+bh->b_size) == bhp->b_data) |
| 824 | && CLUSTERABLE_DEVICE(SCpnt)(SCpnt->host->use_clustering && SCpnt->device ->type != 0x07)) { |
| 825 | char * tmp; |
| 826 | |
| 827 | if (((long) sgpnt[count].address) + sgpnt[count].length + |
| 828 | bhp->b_size - 1 > ISA_DMA_THRESHOLD(0x00ffffff) && |
| 829 | (SCpnt->host->unchecked_isa_dma) && |
| 830 | !sgpnt[count].alt_address) continue; |
| 831 | |
| 832 | if(!sgpnt[count].alt_address) {count--; continue; } |
| 833 | if(dma_free_sectors > 10) |
| 834 | tmp = (char *) scsi_malloc(sgpnt[count].length |
| 835 | + bhp->b_size); |
| 836 | else { |
| 837 | tmp = NULL((void *) 0); |
| 838 | max_sg = SCpnt->use_sg; |
| 839 | } |
| 840 | if(tmp){ |
| 841 | scsi_free(sgpnt[count].address, sgpnt[count].length); |
| 842 | sgpnt[count].address = tmp; |
| 843 | count--; |
| 844 | continue; |
| 845 | } |
| 846 | |
| 847 | /* If we are allowed another sg chain, then increment |
| 848 | * counter so we can insert it. Otherwise we will end |
| 849 | up truncating */ |
| 850 | |
| 851 | if (SCpnt->use_sg < max_sg) SCpnt->use_sg++; |
| 852 | } /* contiguous buffers */ |
| 853 | } /* for loop */ |
| 854 | |
| 855 | /* This is actually how many we are going to transfer */ |
| 856 | this_count = counted; |
| 857 | |
| 858 | if(count < SCpnt->use_sg || SCpnt->use_sg |
| 859 | > SCpnt->host->sg_tablesize){ |
| 860 | bh = SCpnt->request.bh; |
| 861 | printk("Use sg, count %d %x %d\n", |
| 862 | SCpnt->use_sg, count, dma_free_sectors); |
| 863 | printk("maxsg = %x, counted = %d this_count = %d\n", |
| 864 | max_sg, counted, this_count); |
| 865 | while(bh){ |
| 866 | printk("[%p %lx] ", bh->b_data, bh->b_size); |
| 867 | bh = bh->b_reqnext; |
| 868 | } |
| 869 | if(SCpnt->use_sg < 16) |
| 870 | for(count=0; count<SCpnt->use_sg; count++) |
| 871 | printk("{%d:%p %p %d} ", count, |
| 872 | sgpnt[count].address, |
| 873 | sgpnt[count].alt_address, |
| 874 | sgpnt[count].length); |
| 875 | panic("Ooops"); |
| 876 | } |
| 877 | |
| 878 | if (SCpnt->request.cmd == WRITE1) |
| 879 | for(count=0; count<SCpnt->use_sg; count++) |
| 880 | if(sgpnt[count].alt_address) |
| 881 | memcpy(sgpnt[count].address, sgpnt[count].alt_address,(__builtin_constant_p(sgpnt[count].length) ? __constant_memcpy ((sgpnt[count].address),(sgpnt[count].alt_address),(sgpnt[count ].length)) : __memcpy((sgpnt[count].address),(sgpnt[count].alt_address ),(sgpnt[count].length))) |
| 882 | sgpnt[count].length)(__builtin_constant_p(sgpnt[count].length) ? __constant_memcpy ((sgpnt[count].address),(sgpnt[count].alt_address),(sgpnt[count ].length)) : __memcpy((sgpnt[count].address),(sgpnt[count].alt_address ),(sgpnt[count].length))); |
| 883 | } /* Able to malloc sgpnt */ |
| 884 | } /* Host adapter capable of scatter-gather */ |
| 885 | |
| 886 | /* Now handle the possibility of DMA to addresses > 16Mb */ |
| 887 | |
| 888 | if(SCpnt->use_sg == 0){ |
| 889 | if (((long) buff) + (this_count << 9) - 1 > ISA_DMA_THRESHOLD(0x00ffffff) && |
| 890 | (SCpnt->host->unchecked_isa_dma)) { |
| 891 | if(bounce_buffer) |
| 892 | buff = bounce_buffer; |
| 893 | else |
| 894 | buff = (char *) scsi_malloc(this_count << 9); |
| 895 | if(buff == NULL((void *) 0)) { /* Try backing off a bit if we are low on mem*/ |
| 896 | this_count = SCpnt->request.current_nr_sectors; |
| 897 | buff = (char *) scsi_malloc(this_count << 9); |
| 898 | if(!buff) panic("Ran out of DMA buffers."); |
| 899 | } |
| 900 | if (SCpnt->request.cmd == WRITE1) |
| 901 | memcpy(buff, (char *)SCpnt->request.buffer, this_count << 9)(__builtin_constant_p(this_count << 9) ? __constant_memcpy ((buff),((char *)SCpnt->request.buffer),(this_count << 9)) : __memcpy((buff),((char *)SCpnt->request.buffer),(this_count << 9))); |
| 902 | } |
| 903 | } |
| 904 | #ifdef DEBUG |
| 905 | printk("sd%c : %s %d/%d 512 byte blocks.\n", |
| 906 | 'a' + devm, |
| 907 | (SCpnt->request.cmd == WRITE1) ? "writing" : "reading", |
| 908 | this_count, SCpnt->request.nr_sectors); |
| 909 | #endif |
| 910 | |
| 911 | cmd[1] = (SCpnt->lun << 5) & 0xe0; |
| 912 | |
| 913 | if (rscsi_disks[dev].sector_size == 1024){ |
| 914 | if(block & 1) panic("sd.c:Bad block number requested"); |
| 915 | if(this_count & 1) panic("sd.c:Bad block number requested"); |
| 916 | block = block >> 1; |
| 917 | this_count = this_count >> 1; |
| 918 | } |
| 919 | |
| 920 | if (rscsi_disks[dev].sector_size == 256){ |
| 921 | block = block << 1; |
| 922 | this_count = this_count << 1; |
| 923 | } |
| 924 | |
| 925 | if (((this_count > 0xff) || (block > 0x1fffff)) && rscsi_disks[dev].ten) |
| 926 | { |
| 927 | if (this_count > 0xffff) |
| 928 | this_count = 0xffff; |
| 929 | |
| 930 | cmd[0] += READ_100x28 - READ_60x08 ; |
| 931 | cmd[2] = (unsigned char) (block >> 24) & 0xff; |
| 932 | cmd[3] = (unsigned char) (block >> 16) & 0xff; |
| 933 | cmd[4] = (unsigned char) (block >> 8) & 0xff; |
| 934 | cmd[5] = (unsigned char) block & 0xff; |
| 935 | cmd[6] = cmd[9] = 0; |
| 936 | cmd[7] = (unsigned char) (this_count >> 8) & 0xff; |
| 937 | cmd[8] = (unsigned char) this_count & 0xff; |
| 938 | } |
| 939 | else |
| 940 | { |
| 941 | if (this_count > 0xff) |
| 942 | this_count = 0xff; |
| 943 | |
| 944 | cmd[1] |= (unsigned char) ((block >> 16) & 0x1f); |
| 945 | cmd[2] = (unsigned char) ((block >> 8) & 0xff); |
| 946 | cmd[3] = (unsigned char) block & 0xff; |
| 947 | cmd[4] = (unsigned char) this_count; |
| 948 | cmd[5] = 0; |
| 949 | } |
| 950 | |
| 951 | /* |
| 952 | * We shouldn't disconnect in the middle of a sector, so with a dumb |
| 953 | * host adapter, it's safe to assume that we can at least transfer |
| 954 | * this many bytes between each connect / disconnect. |
| 955 | */ |
| 956 | |
| 957 | SCpnt->transfersize = rscsi_disks[dev].sector_size; |
| 958 | SCpnt->underflow = this_count << 9; |
| 959 | scsi_do_cmd (SCpnt, (void *) cmd, buff, |
| 960 | this_count * rscsi_disks[dev].sector_size, |
| 961 | rw_intr, |
| 962 | (SCpnt->device->type == TYPE_DISK0x00 ? |
| 963 | SD_TIMEOUT(20 * 100) : SD_MOD_TIMEOUT(25 * 100)), |
| 964 | MAX_RETRIES5); |
| 965 | } |
| 966 | |
| 967 | static int check_scsidisk_media_change(kdev_t full_dev){ |
| 968 | int retval; |
| 969 | int target; |
| 970 | struct inode inode; |
| 971 | int flag = 0; |
| 972 | |
| 973 | target = DEVICE_NR(full_dev)(((full_dev) & ((1<<8) - 1)) >> 4); |
| 974 | |
| 975 | if (target >= sd_template.dev_max || |
| 976 | !rscsi_disks[target].device) { |
| 977 | printk("SCSI disk request error: invalid device.\n"); |
| 978 | return 0; |
| 979 | } |
| 980 | |
| 981 | if(!rscsi_disks[target].device->removable) return 0; |
| 982 | |
| 983 | inode.i_rdev = full_dev; /* This is all we really need here */ |
| 984 | |
| 985 | /* Using Start/Stop enables differentiation between drive with |
| 986 | * no cartridge loaded - NOT READY, drive with changed cartridge - |
| 987 | * UNIT ATTENTION, or with same cartridge - GOOD STATUS. |
| 988 | * This also handles drives that auto spin down. eg iomega jaz 1GB |
| 989 | * as this will spin up the drive. |
| 990 | */ |
| 991 | retval = sd_ioctl(&inode, NULL((void *) 0), SCSI_IOCTL_START_UNIT5, 0); |
| 992 | |
| 993 | if(retval){ /* Unable to test, unit probably not ready. This usually |
| 994 | * means there is no disc in the drive. Mark as changed, |
| 995 | * and we will figure it out later once the drive is |
| 996 | * available again. */ |
| 997 | |
| 998 | rscsi_disks[target].ready = 0; |
| 999 | rscsi_disks[target].device->changed = 1; |
| 1000 | return 1; /* This will force a flush, if called from |
| 1001 | * check_disk_change */ |
| 1002 | } |
| 1003 | |
| 1004 | /* |
| 1005 | * for removable scsi disk ( FLOPTICAL ) we have to recognise the |
| 1006 | * presence of disk in the drive. This is kept in the Scsi_Disk |
| 1007 | * struct and tested at open ! Daniel Roche ( dan@lectra.fr ) |
| 1008 | */ |
| 1009 | |
| 1010 | rscsi_disks[target].ready = 1; /* FLOPTICAL */ |
| 1011 | |
| 1012 | retval = rscsi_disks[target].device->changed; |
| 1013 | if(!flag) rscsi_disks[target].device->changed = 0; |
| 1014 | return retval; |
| 1015 | } |
| 1016 | |
| 1017 | static void sd_init_done (Scsi_Cmnd * SCpnt) |
| 1018 | { |
| 1019 | struct request * req; |
| 1020 | |
| 1021 | req = &SCpnt->request; |
| 1022 | req->rq_status = RQ_SCSI_DONE0xfffe; /* Busy, but indicate request done */ |
| 1023 | |
| 1024 | if (req->sem != NULL((void *) 0)) { |
| 1025 | up(req->sem); |
| 1026 | } |
| 1027 | } |
| 1028 | |
| 1029 | static int sd_init_onedisk(int i) |
| 1030 | { |
| 1031 | unsigned char cmd[10]; |
| 1032 | unsigned char *buffer; |
| 1033 | unsigned long spintime; |
| 1034 | int the_result, retries; |
| 1035 | Scsi_Cmnd * SCpnt; |
| 1036 | |
| 1037 | /* We need to retry the READ_CAPACITY because a UNIT_ATTENTION is |
| 1038 | * considered a fatal error, and many devices report such an error |
| 1039 | * just after a scsi bus reset. |
| 1040 | */ |
| 1041 | |
| 1042 | SCpnt = allocate_device(NULL((void *) 0), rscsi_disks[i].device, 1); |
| 1043 | buffer = (unsigned char *) scsi_malloc(512); |
| 1044 | |
| 1045 | spintime = 0; |
| 1046 | |
| 1047 | /* Spin up drives, as required. Only do this at boot time */ |
| 1048 | /* Spinup needs to be done for module loads too. */ |
| 1049 | do{ |
| 1050 | retries = 0; |
| 1051 | while(retries < 3) |
| 1052 | { |
| 1053 | cmd[0] = TEST_UNIT_READY0x00; |
| 1054 | cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; |
| 1055 | memset ((void *) &cmd[2], 0, 8)(__builtin_constant_p(0) ? (__builtin_constant_p((8)) ? __constant_c_and_count_memset ((((void *) &cmd[2])),((0x01010101UL*(unsigned char)(0))) ,((8))) : __constant_c_memset((((void *) &cmd[2])),((0x01010101UL *(unsigned char)(0))),((8)))) : (__builtin_constant_p((8)) ? __memset_generic (((((void *) &cmd[2]))),(((0))),(((8)))) : __memset_generic ((((void *) &cmd[2])),((0)),((8))))); |
| 1056 | SCpnt->cmd_len = 0; |
| 1057 | SCpnt->sense_buffer[0] = 0; |
| 1058 | SCpnt->sense_buffer[2] = 0; |
| 1059 | |
| 1060 | { |
| 1061 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); |
| 1062 | /* Mark as really busy again */ |
| 1063 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; |
| 1064 | SCpnt->request.sem = &sem; |
| 1065 | scsi_do_cmd (SCpnt, |
| 1066 | (void *) cmd, (void *) buffer, |
| 1067 | 512, sd_init_done, SD_TIMEOUT(20 * 100), |
| 1068 | MAX_RETRIES5); |
| 1069 | down(&sem); |
| 1070 | } |
| 1071 | |
| 1072 | the_result = SCpnt->result; |
| 1073 | retries++; |
| 1074 | if( the_result == 0 |
| 1075 | || SCpnt->sense_buffer[2] != UNIT_ATTENTION0x06) |
| 1076 | break; |
| 1077 | } |
| 1078 | |
| 1079 | /* Look for non-removable devices that return NOT_READY. |
| 1080 | * Issue command to spin up drive for these cases. */ |
| 1081 | if(the_result && !rscsi_disks[i].device->removable && |
| 1082 | SCpnt->sense_buffer[2] == NOT_READY0x02) { |
| 1083 | unsigned long time1; |
| 1084 | if(!spintime){ |
| 1085 | #ifdef MACH1 |
| 1086 | printk( "sd%d: Spinning up disk...", i); |
| 1087 | #else |
| 1088 | printk( "sd%c: Spinning up disk...", 'a' + i ); |
| 1089 | #endif |
| 1090 | cmd[0] = START_STOP0x1b; |
| 1091 | cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; |
| 1092 | cmd[1] |= 1; /* Return immediately */ |
| 1093 | memset ((void *) &cmd[2], 0, 8)(__builtin_constant_p(0) ? (__builtin_constant_p((8)) ? __constant_c_and_count_memset ((((void *) &cmd[2])),((0x01010101UL*(unsigned char)(0))) ,((8))) : __constant_c_memset((((void *) &cmd[2])),((0x01010101UL *(unsigned char)(0))),((8)))) : (__builtin_constant_p((8)) ? __memset_generic (((((void *) &cmd[2]))),(((0))),(((8)))) : __memset_generic ((((void *) &cmd[2])),((0)),((8))))); |
| 1094 | cmd[4] = 1; /* Start spin cycle */ |
| 1095 | SCpnt->cmd_len = 0; |
| 1096 | SCpnt->sense_buffer[0] = 0; |
| 1097 | SCpnt->sense_buffer[2] = 0; |
| 1098 | |
| 1099 | { |
| 1100 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); |
| 1101 | /* Mark as really busy again */ |
| 1102 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; |
| 1103 | SCpnt->request.sem = &sem; |
| 1104 | scsi_do_cmd (SCpnt, |
| 1105 | (void *) cmd, (void *) buffer, |
| 1106 | 512, sd_init_done, SD_TIMEOUT(20 * 100), |
| 1107 | MAX_RETRIES5); |
| 1108 | down(&sem); |
| 1109 | } |
| 1110 | |
| 1111 | spintime = jiffies; |
| 1112 | } |
| 1113 | |
| 1114 | time1 = jiffies + HZ100; |
| 1115 | while(jiffies < time1); /* Wait 1 second for next try */ |
| 1116 | printk( "." ); |
| 1117 | } |
| 1118 | } while(the_result && spintime && spintime+100*HZ100 > jiffies); |
| 1119 | if (spintime) { |
| 1120 | if (the_result) |
| 1121 | printk( "not responding...\n" ); |
| 1122 | else |
| 1123 | printk( "ready\n" ); |
| 1124 | } |
| 1125 | |
| 1126 | retries = 3; |
| 1127 | do { |
| 1128 | cmd[0] = READ_CAPACITY0x25; |
| 1129 | cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; |
| 1130 | memset ((void *) &cmd[2], 0, 8)(__builtin_constant_p(0) ? (__builtin_constant_p((8)) ? __constant_c_and_count_memset ((((void *) &cmd[2])),((0x01010101UL*(unsigned char)(0))) ,((8))) : __constant_c_memset((((void *) &cmd[2])),((0x01010101UL *(unsigned char)(0))),((8)))) : (__builtin_constant_p((8)) ? __memset_generic (((((void *) &cmd[2]))),(((0))),(((8)))) : __memset_generic ((((void *) &cmd[2])),((0)),((8))))); |
| 1131 | memset ((void *) buffer, 0, 8)(__builtin_constant_p(0) ? (__builtin_constant_p((8)) ? __constant_c_and_count_memset ((((void *) buffer)),((0x01010101UL*(unsigned char)(0))),((8) )) : __constant_c_memset((((void *) buffer)),((0x01010101UL*( unsigned char)(0))),((8)))) : (__builtin_constant_p((8)) ? __memset_generic (((((void *) buffer))),(((0))),(((8)))) : __memset_generic((( (void *) buffer)),((0)),((8))))); |
| 1132 | SCpnt->cmd_len = 0; |
| 1133 | SCpnt->sense_buffer[0] = 0; |
| 1134 | SCpnt->sense_buffer[2] = 0; |
| 1135 | |
| 1136 | { |
| 1137 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); |
| 1138 | /* Mark as really busy again */ |
| 1139 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; |
| 1140 | SCpnt->request.sem = &sem; |
| 1141 | scsi_do_cmd (SCpnt, |
| 1142 | (void *) cmd, (void *) buffer, |
| 1143 | 8, sd_init_done, SD_TIMEOUT(20 * 100), |
| 1144 | MAX_RETRIES5); |
| 1145 | down(&sem); /* sleep until it is ready */ |
| 1146 | } |
| 1147 | |
| 1148 | the_result = SCpnt->result; |
| 1149 | retries--; |
| 1150 | |
| 1151 | } while(the_result && retries); |
| 1152 | |
| 1153 | SCpnt->request.rq_status = RQ_INACTIVE(-1); /* Mark as not busy */ |
| 1154 | |
| 1155 | wake_up(&SCpnt->device->device_wait); |
| 1156 | |
| 1157 | /* Wake up a process waiting for device */ |
| 1158 | |
| 1159 | /* |
| 1160 | * The SCSI standard says: |
| 1161 | * "READ CAPACITY is necessary for self configuring software" |
| 1162 | * While not mandatory, support of READ CAPACITY is strongly encouraged. |
| 1163 | * We used to die if we couldn't successfully do a READ CAPACITY. |
| 1164 | * But, now we go on about our way. The side effects of this are |
| 1165 | * |
| 1166 | * 1. We can't know block size with certainty. I have said "512 bytes |
| 1167 | * is it" as this is most common. |
| 1168 | * |
| 1169 | * 2. Recovery from when some one attempts to read past the end of the |
| 1170 | * raw device will be slower. |
| 1171 | */ |
| 1172 | |
| 1173 | if (the_result) |
| 1174 | { |
| 1175 | #ifdef MACH1 |
| 1176 | printk ("sd%d : READ CAPACITY failed.\n" |
| 1177 | "sd%d : status = %x, message = %02x, host = %d, driver = %02x \n", |
| 1178 | i, i, |
| 1179 | #else |
| 1180 | printk ("sd%c : READ CAPACITY failed.\n" |
| 1181 | "sd%c : status = %x, message = %02x, host = %d, driver = %02x \n", |
| 1182 | 'a' + i, 'a' + i, |
| 1183 | #endif |
| 1184 | status_byte(the_result)(((the_result) >> 1) & 0x1f), |
| 1185 | msg_byte(the_result)(((the_result) >> 8) & 0xff), |
| 1186 | host_byte(the_result)(((the_result) >> 16) & 0xff), |
| 1187 | driver_byte(the_result)(((the_result) >> 24) & 0xff) |
| 1188 | ); |
| 1189 | if (driver_byte(the_result)(((the_result) >> 24) & 0xff) & DRIVER_SENSE0x08) |
| 1190 | #ifdef MACH1 |
| 1191 | printk("sd%d : extended sense code = %1x \n", |
| 1192 | i, SCpnt->sense_buffer[2] & 0xf); |
| 1193 | #else |
| 1194 | printk("sd%c : extended sense code = %1x \n", |
| 1195 | 'a' + i, SCpnt->sense_buffer[2] & 0xf); |
| 1196 | #endif |
| 1197 | else |
| 1198 | #ifdef MACH1 |
| 1199 | printk("sd%d : sense not available. \n", i); |
| 1200 | #else |
| 1201 | printk("sd%c : sense not available. \n", 'a' + i); |
| 1202 | #endif |
| 1203 | |
| 1204 | #ifdef MACH1 |
| 1205 | printk("sd%d : block size assumed to be 512 bytes, disk size 1GB. \n", |
| 1206 | i); |
| 1207 | #else |
| 1208 | printk("sd%c : block size assumed to be 512 bytes, disk size 1GB. \n", |
| 1209 | 'a' + i); |
| 1210 | #endif |
| 1211 | rscsi_disks[i].capacity = 0x1fffff; |
| 1212 | rscsi_disks[i].sector_size = 512; |
| 1213 | |
| 1214 | /* Set dirty bit for removable devices if not ready - sometimes drives |
| 1215 | * will not report this properly. */ |
| 1216 | if(rscsi_disks[i].device->removable && |
| 1217 | SCpnt->sense_buffer[2] == NOT_READY0x02) |
| 1218 | rscsi_disks[i].device->changed = 1; |
| 1219 | |
| 1220 | } |
| 1221 | else |
| 1222 | { |
| 1223 | /* |
| 1224 | * FLOPTICAL , if read_capa is ok , drive is assumed to be ready |
| 1225 | */ |
| 1226 | rscsi_disks[i].ready = 1; |
| 1227 | |
| 1228 | rscsi_disks[i].capacity = 1 + ((buffer[0] << 24) | |
| 1229 | (buffer[1] << 16) | |
| 1230 | (buffer[2] << 8) | |
| 1231 | buffer[3]); |
| 1232 | |
| 1233 | rscsi_disks[i].sector_size = (buffer[4] << 24) | |
| 1234 | (buffer[5] << 16) | (buffer[6] << 8) | buffer[7]; |
| 1235 | |
| 1236 | if (rscsi_disks[i].sector_size == 0) { |
| 1237 | rscsi_disks[i].sector_size = 512; |
| 1238 | #ifdef MACH1 |
| 1239 | printk("sd%d : sector size 0 reported, assuming 512.\n", i); |
| 1240 | #else |
| 1241 | printk("sd%c : sector size 0 reported, assuming 512.\n", 'a' + i); |
| 1242 | #endif |
| 1243 | } |
| 1244 | |
| 1245 | |
| 1246 | if (rscsi_disks[i].sector_size != 512 && |
| 1247 | rscsi_disks[i].sector_size != 1024 && |
| 1248 | rscsi_disks[i].sector_size != 256) |
| 1249 | { |
| 1250 | #ifdef MACH1 |
| 1251 | printk ("sd%d : unsupported sector size %d.\n", |
| 1252 | i, rscsi_disks[i].sector_size); |
| 1253 | #else |
| 1254 | printk ("sd%c : unsupported sector size %d.\n", |
| 1255 | 'a' + i, rscsi_disks[i].sector_size); |
| 1256 | #endif |
| 1257 | if(rscsi_disks[i].device->removable){ |
| 1258 | rscsi_disks[i].capacity = 0; |
| 1259 | } else { |
| 1260 | printk ("scsi : deleting disk entry.\n"); |
| 1261 | rscsi_disks[i].device = NULL((void *) 0); |
| 1262 | sd_template.nr_dev--; |
| 1263 | sd_gendisk.nr_real--; |
| 1264 | return i; |
| 1265 | } |
| 1266 | } |
| 1267 | { |
| 1268 | /* |
| 1269 | * The msdos fs needs to know the hardware sector size |
| 1270 | * So I have created this table. See ll_rw_blk.c |
| 1271 | * Jacques Gelinas (Jacques@solucorp.qc.ca) |
| 1272 | */ |
| 1273 | int m, mb; |
| 1274 | int sz_quot, sz_rem; |
| 1275 | int hard_sector = rscsi_disks[i].sector_size; |
| 1276 | /* There are 16 minors allocated for each major device */ |
| 1277 | for (m=i<<4; m<((i+1)<<4); m++){ |
| 1278 | sd_hardsizes[m] = hard_sector; |
| 1279 | } |
| 1280 | mb = rscsi_disks[i].capacity / 1024 * hard_sector / 1024; |
| 1281 | /* sz = div(m/100, 10); this seems to not be in the libr */ |
| 1282 | m = (mb + 50) / 100; |
| 1283 | sz_quot = m / 10; |
| 1284 | sz_rem = m - (10 * sz_quot); |
| 1285 | #ifdef MACH1 |
| 1286 | printk ("SCSI device sd%d: hdwr sector= %d bytes." |
| 1287 | " Sectors= %d [%d MB] [%d.%1d GB]\n", |
| 1288 | i, hard_sector, rscsi_disks[i].capacity, |
| 1289 | mb, sz_quot, sz_rem); |
| 1290 | #else |
| 1291 | printk ("SCSI device sd%c: hdwr sector= %d bytes." |
| 1292 | " Sectors= %d [%d MB] [%d.%1d GB]\n", |
| 1293 | i+'a', hard_sector, rscsi_disks[i].capacity, |
| 1294 | mb, sz_quot, sz_rem); |
| 1295 | #endif |
| 1296 | } |
| 1297 | if(rscsi_disks[i].sector_size == 1024) |
| 1298 | rscsi_disks[i].capacity <<= 1; /* Change into 512 byte sectors */ |
| 1299 | if(rscsi_disks[i].sector_size == 256) |
| 1300 | rscsi_disks[i].capacity >>= 1; /* Change into 512 byte sectors */ |
| 1301 | } |
| 1302 | |
| 1303 | |
| 1304 | /* |
| 1305 | * Unless otherwise specified, this is not write protected. |
| 1306 | */ |
| 1307 | rscsi_disks[i].write_prot = 0; |
| 1308 | if ( rscsi_disks[i].device->removable && rscsi_disks[i].ready ) { |
| 1309 | /* FLOPTICAL */ |
| 1310 | |
| 1311 | /* |
| 1312 | * for removable scsi disk ( FLOPTICAL ) we have to recognise |
| 1313 | * the Write Protect Flag. This flag is kept in the Scsi_Disk struct |
| 1314 | * and tested at open ! |
| 1315 | * Daniel Roche ( dan@lectra.fr ) |
| 1316 | */ |
| 1317 | |
| 1318 | memset ((void *) &cmd[0], 0, 8)(__builtin_constant_p(0) ? (__builtin_constant_p((8)) ? __constant_c_and_count_memset ((((void *) &cmd[0])),((0x01010101UL*(unsigned char)(0))) ,((8))) : __constant_c_memset((((void *) &cmd[0])),((0x01010101UL *(unsigned char)(0))),((8)))) : (__builtin_constant_p((8)) ? __memset_generic (((((void *) &cmd[0]))),(((0))),(((8)))) : __memset_generic ((((void *) &cmd[0])),((0)),((8))))); |
| 1319 | cmd[0] = MODE_SENSE0x1a; |
| 1320 | cmd[1] = (rscsi_disks[i].device->lun << 5) & 0xe0; |
| 1321 | cmd[2] = 1; /* page code 1 ?? */ |
| 1322 | cmd[4] = 12; |
| 1323 | SCpnt->cmd_len = 0; |
| 1324 | SCpnt->sense_buffer[0] = 0; |
| 1325 | SCpnt->sense_buffer[2] = 0; |
| 1326 | |
| 1327 | /* same code as READCAPA !! */ |
| 1328 | { |
| 1329 | struct semaphore sem = MUTEX_LOCKED((struct semaphore) { 0, 0, 0, ((void *) 0) }); |
| 1330 | SCpnt->request.rq_status = RQ_SCSI_BUSY0xffff; /* Mark as really busy again */ |
| 1331 | SCpnt->request.sem = &sem; |
| 1332 | scsi_do_cmd (SCpnt, |
| 1333 | (void *) cmd, (void *) buffer, |
| 1334 | 512, sd_init_done, SD_TIMEOUT(20 * 100), |
| 1335 | MAX_RETRIES5); |
| 1336 | down(&sem); |
| 1337 | } |
| 1338 | |
| 1339 | the_result = SCpnt->result; |
| 1340 | SCpnt->request.rq_status = RQ_INACTIVE(-1); /* Mark as not busy */ |
| 1341 | wake_up(&SCpnt->device->device_wait); |
| 1342 | |
| 1343 | if ( the_result ) { |
| 1344 | #ifdef MACH1 |
| 1345 | printk ("sd%d: test WP failed, assume Write Protected\n",i); |
| 1346 | #else |
| 1347 | printk ("sd%c: test WP failed, assume Write Protected\n",i+'a'); |
| 1348 | #endif |
| 1349 | rscsi_disks[i].write_prot = 1; |
| 1350 | } else { |
| 1351 | rscsi_disks[i].write_prot = ((buffer[2] & 0x80) != 0); |
| 1352 | #ifdef MACH1 |
| 1353 | printk ("sd%d: Write Protect is %s\n",i, |
| 1354 | rscsi_disks[i].write_prot ? "on" : "off"); |
| 1355 | #else |
| 1356 | printk ("sd%c: Write Protect is %s\n",i+'a', |
| 1357 | rscsi_disks[i].write_prot ? "on" : "off"); |
| 1358 | #endif |
| 1359 | } |
| 1360 | |
| 1361 | } /* check for write protect */ |
| 1362 | |
| 1363 | rscsi_disks[i].ten = 1; |
| 1364 | rscsi_disks[i].remap = 1; |
| 1365 | scsi_free(buffer, 512); |
| 1366 | return i; |
| 1367 | } |
| 1368 | |
| 1369 | /* |
| 1370 | * The sd_init() function looks at all SCSI drives present, determines |
| 1371 | * their size, and reads partition table entries for them. |
| 1372 | */ |
| 1373 | |
| 1374 | static int sd_registered = 0; |
| 1375 | |
| 1376 | static int sd_init() |
| 1377 | { |
| 1378 | int i; |
| 1379 | |
| 1380 | if (sd_template.dev_noticed == 0) return 0; |
| 1381 | |
| 1382 | if(!sd_registered) { |
| 1383 | if (register_blkdev(MAJOR_NR8,"sd",&sd_fops)) { |
| 1384 | printk("Unable to get major %d for SCSI disk\n",MAJOR_NR8); |
| 1385 | return 1; |
| 1386 | } |
| 1387 | sd_registered++; |
| 1388 | } |
| 1389 | |
| 1390 | /* We do not support attaching loadable devices yet. */ |
| 1391 | if(rscsi_disks) return 0; |
| 1392 | |
| 1393 | sd_template.dev_max = sd_template.dev_noticed + SD_EXTRA_DEVS2; |
| 1394 | |
| 1395 | rscsi_disks = (Scsi_Disk *) |
| 1396 | scsi_init_malloc(sd_template.dev_max * sizeof(Scsi_Disk), GFP_ATOMIC0x01); |
| 1397 | memset(rscsi_disks, 0, sd_template.dev_max * sizeof(Scsi_Disk))(__builtin_constant_p(0) ? (__builtin_constant_p((sd_template .dev_max * sizeof(Scsi_Disk))) ? __constant_c_and_count_memset (((rscsi_disks)),((0x01010101UL*(unsigned char)(0))),((sd_template .dev_max * sizeof(Scsi_Disk)))) : __constant_c_memset(((rscsi_disks )),((0x01010101UL*(unsigned char)(0))),((sd_template.dev_max * sizeof(Scsi_Disk))))) : (__builtin_constant_p((sd_template.dev_max * sizeof(Scsi_Disk))) ? __memset_generic((((rscsi_disks))),( ((0))),(((sd_template.dev_max * sizeof(Scsi_Disk))))) : __memset_generic (((rscsi_disks)),((0)),((sd_template.dev_max * sizeof(Scsi_Disk )))))); |
| 1398 | |
| 1399 | sd_sizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * |
| 1400 | sizeof(int), GFP_ATOMIC0x01); |
| 1401 | memset(sd_sizes, 0, (sd_template.dev_max << 4) * sizeof(int))(__builtin_constant_p(0) ? (__builtin_constant_p(((sd_template .dev_max << 4) * sizeof(int))) ? __constant_c_and_count_memset (((sd_sizes)),((0x01010101UL*(unsigned char)(0))),(((sd_template .dev_max << 4) * sizeof(int)))) : __constant_c_memset(( (sd_sizes)),((0x01010101UL*(unsigned char)(0))),(((sd_template .dev_max << 4) * sizeof(int))))) : (__builtin_constant_p (((sd_template.dev_max << 4) * sizeof(int))) ? __memset_generic ((((sd_sizes))),(((0))),((((sd_template.dev_max << 4) * sizeof(int))))) : __memset_generic(((sd_sizes)),((0)),(((sd_template .dev_max << 4) * sizeof(int)))))); |
| 1402 | |
| 1403 | sd_blocksizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * |
| 1404 | sizeof(int), GFP_ATOMIC0x01); |
| 1405 | |
| 1406 | sd_hardsizes = (int *) scsi_init_malloc((sd_template.dev_max << 4) * |
| 1407 | sizeof(int), GFP_ATOMIC0x01); |
| 1408 | |
| 1409 | for(i=0;i<(sd_template.dev_max << 4);i++){ |
| 1410 | sd_blocksizes[i] = 1024; |
| 1411 | sd_hardsizes[i] = 512; |
| 1412 | } |
| 1413 | blksize_size[MAJOR_NR8] = sd_blocksizes; |
| 1414 | hardsect_size[MAJOR_NR8] = sd_hardsizes; |
| 1415 | sd = (struct hd_struct *) scsi_init_malloc((sd_template.dev_max << 4) * |
| 1416 | sizeof(struct hd_struct), |
| 1417 | GFP_ATOMIC0x01); |
| 1418 | |
| 1419 | |
| 1420 | sd_gendisk.max_nr = sd_template.dev_max; |
| 1421 | sd_gendisk.part = sd; |
| 1422 | sd_gendisk.sizes = sd_sizes; |
| 1423 | sd_gendisk.real_devices = (void *) rscsi_disks; |
| 1424 | return 0; |
| 1425 | } |
| 1426 | |
| 1427 | static void sd_finish(void) |
| 1428 | { |
| 1429 | struct gendisk *gendisk; |
| 1430 | int i; |
| 1431 | |
| 1432 | blk_dev[MAJOR_NR8].request_fn = DEVICE_REQUESTdo_sd_request; |
| 1433 | |
| 1434 | for (gendisk = gendisk_head; gendisk != NULL((void *) 0); gendisk = gendisk->next) |
| 1435 | if (gendisk == &sd_gendisk) |
| 1436 | break; |
| 1437 | if (gendisk == NULL((void *) 0)) |
| 1438 | { |
| 1439 | sd_gendisk.next = gendisk_head; |
| 1440 | gendisk_head = &sd_gendisk; |
| 1441 | } |
| 1442 | |
| 1443 | for (i = 0; i < sd_template.dev_max; ++i) |
| 1444 | if (!rscsi_disks[i].capacity && |
| 1445 | rscsi_disks[i].device) |
| 1446 | { |
| 1447 | if (MODULE_FLAGscsi_loadable_module_flag |
| 1448 | && !rscsi_disks[i].has_part_table) { |
| 1449 | sd_sizes[i << 4] = rscsi_disks[i].capacity; |
| 1450 | /* revalidate does sd_init_onedisk via MAYBE_REINIT*/ |
| 1451 | revalidate_scsidisk(MKDEV(MAJOR_NR, i << 4)(((8) << 8) | (i << 4)), 0); |
| 1452 | } |
| 1453 | else |
| 1454 | i=sd_init_onedisk(i); |
| 1455 | rscsi_disks[i].has_part_table = 1; |
| 1456 | } |
| 1457 | |
| 1458 | /* If our host adapter is capable of scatter-gather, then we increase |
| 1459 | * the read-ahead to 16 blocks (32 sectors). If not, we use |
| 1460 | * a two block (4 sector) read ahead. |
| 1461 | */ |
| 1462 | if(rscsi_disks[0].device && rscsi_disks[0].device->host->sg_tablesize) |
| 1463 | read_ahead[MAJOR_NR8] = 120; /* 120 sector read-ahead */ |
| 1464 | else |
| 1465 | read_ahead[MAJOR_NR8] = 4; /* 4 sector read-ahead */ |
| 1466 | |
| 1467 | return; |
| 1468 | } |
| 1469 | |
| 1470 | static int sd_detect(Scsi_Device * SDp){ |
| 1471 | if(SDp->type != TYPE_DISK0x00 && SDp->type != TYPE_MOD0x07) return 0; |
| 1472 | |
| 1473 | #ifdef MACH1 |
| 1474 | printk("Detected scsi %sdisk sd%d at scsi%d, channel %d, id %d, lun %d\n", |
| 1475 | SDp->removable ? "removable " : "", |
| 1476 | sd_template.dev_noticed++, |
| 1477 | SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); |
| 1478 | #else |
| 1479 | printk("Detected scsi %sdisk sd%c at scsi%d, channel %d, id %d, lun %d\n", |
| 1480 | SDp->removable ? "removable " : "", |
| 1481 | 'a'+ (sd_template.dev_noticed++), |
| 1482 | SDp->host->host_no, SDp->channel, SDp->id, SDp->lun); |
| 1483 | #endif |
| 1484 | |
| 1485 | return 1; |
| 1486 | } |
| 1487 | |
| 1488 | static int sd_attach(Scsi_Device * SDp){ |
| 1489 | Scsi_Disk * dpnt; |
| 1490 | int i; |
| 1491 | |
| 1492 | if(SDp->type != TYPE_DISK0x00 && SDp->type != TYPE_MOD0x07) return 0; |
| 1493 | |
| 1494 | if(sd_template.nr_dev >= sd_template.dev_max) { |
| 1495 | SDp->attached--; |
| 1496 | return 1; |
| 1497 | } |
| 1498 | |
| 1499 | for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) |
| 1500 | if(!dpnt->device) break; |
| 1501 | |
| 1502 | if(i >= sd_template.dev_max) panic ("scsi_devices corrupt (sd)"); |
| 1503 | |
| 1504 | SDp->scsi_request_fn = do_sd_request; |
| 1505 | rscsi_disks[i].device = SDp; |
| 1506 | rscsi_disks[i].has_part_table = 0; |
| 1507 | sd_template.nr_dev++; |
| 1508 | sd_gendisk.nr_real++; |
| 1509 | return 0; |
| 1510 | } |
| 1511 | |
| 1512 | #define DEVICE_BUSYrscsi_disks[target].device->busy rscsi_disks[target].device->busy |
| 1513 | #define USAGErscsi_disks[target].device->access_count rscsi_disks[target].device->access_count |
| 1514 | #define CAPACITYrscsi_disks[target].capacity rscsi_disks[target].capacity |
| 1515 | #define MAYBE_REINITsd_init_onedisk(target) sd_init_onedisk(target) |
| 1516 | #define GENDISK_STRUCTsd_gendisk sd_gendisk |
| 1517 | |
| 1518 | /* This routine is called to flush all partitions and partition tables |
| 1519 | * for a changed scsi disk, and then re-read the new partition table. |
| 1520 | * If we are revalidating a disk because of a media change, then we |
| 1521 | * enter with usage == 0. If we are using an ioctl, we automatically have |
| 1522 | * usage == 1 (we need an open channel to use an ioctl :-), so this |
| 1523 | * is our limit. |
| 1524 | */ |
| 1525 | int revalidate_scsidisk(kdev_t dev, int maxusage){ |
| 1526 | int target; |
| 1527 | struct gendisk * gdev; |
| 1528 | unsigned long flags; |
| 1529 | int max_p; |
| 1530 | int start; |
| 1531 | int i; |
| 1532 | |
| 1533 | target = DEVICE_NR(dev)(((dev) & ((1<<8) - 1)) >> 4); |
| 1534 | gdev = &GENDISK_STRUCTsd_gendisk; |
| 1535 | |
| 1536 | save_flags(flags)__asm__ __volatile__("pushf ; pop %0" : "=r" (flags): :"memory" ); |
| 1537 | cli()__asm__ __volatile__ ("cli": : :"memory"); |
| 1538 | if (DEVICE_BUSYrscsi_disks[target].device->busy || USAGErscsi_disks[target].device->access_count > maxusage) { |
| 1539 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); |
| 1540 | printk("Device busy for revalidation (usage=%d)\n", USAGErscsi_disks[target].device->access_count); |
| 1541 | return -EBUSY16; |
| 1542 | } |
| 1543 | DEVICE_BUSYrscsi_disks[target].device->busy = 1; |
| 1544 | restore_flags(flags)__asm__ __volatile__("push %0 ; popf": :"g" (flags):"memory"); |
| 1545 | |
| 1546 | max_p = gdev->max_p; |
| 1547 | start = target << gdev->minor_shift; |
| 1548 | |
| 1549 | for (i=max_p - 1; i >=0 ; i--) { |
| 1550 | int minor = start+i; |
| 1551 | kdev_t devi = MKDEV(MAJOR_NR, minor)(((8) << 8) | (minor)); |
Value stored to 'devi' during its initialization is never read | |
| 1552 | sync_dev(devi); |
| 1553 | invalidate_inodes(devi); |
| 1554 | invalidate_buffers(devi); |
| 1555 | gdev->part[minor].start_sect = 0; |
| 1556 | gdev->part[minor].nr_sects = 0; |
| 1557 | /* |
| 1558 | * Reset the blocksize for everything so that we can read |
| 1559 | * the partition table. |
| 1560 | */ |
| 1561 | blksize_size[MAJOR_NR8][minor] = 1024; |
| 1562 | } |
| 1563 | |
| 1564 | #ifdef MAYBE_REINITsd_init_onedisk(target) |
| 1565 | MAYBE_REINITsd_init_onedisk(target); |
| 1566 | #endif |
| 1567 | |
| 1568 | gdev->part[start].nr_sects = CAPACITYrscsi_disks[target].capacity; |
| 1569 | resetup_one_dev(gdev, target); |
| 1570 | |
| 1571 | DEVICE_BUSYrscsi_disks[target].device->busy = 0; |
| 1572 | return 0; |
| 1573 | } |
| 1574 | |
| 1575 | static int fop_revalidate_scsidisk(kdev_t dev){ |
| 1576 | return revalidate_scsidisk(dev, 0); |
| 1577 | } |
| 1578 | |
| 1579 | |
| 1580 | static void sd_detach(Scsi_Device * SDp) |
| 1581 | { |
| 1582 | Scsi_Disk * dpnt; |
| 1583 | int i; |
| 1584 | int max_p; |
| 1585 | int start; |
| 1586 | |
| 1587 | for(dpnt = rscsi_disks, i=0; i<sd_template.dev_max; i++, dpnt++) |
| 1588 | if(dpnt->device == SDp) { |
| 1589 | |
| 1590 | /* If we are disconnecting a disk driver, sync and invalidate |
| 1591 | * everything */ |
| 1592 | max_p = sd_gendisk.max_p; |
| 1593 | start = i << sd_gendisk.minor_shift; |
| 1594 | |
| 1595 | for (i=max_p - 1; i >=0 ; i--) { |
| 1596 | int minor = start+i; |
| 1597 | kdev_t devi = MKDEV(MAJOR_NR, minor)(((8) << 8) | (minor)); |
| 1598 | sync_dev(devi); |
| 1599 | invalidate_inodes(devi); |
| 1600 | invalidate_buffers(devi); |
| 1601 | sd_gendisk.part[minor].start_sect = 0; |
| 1602 | sd_gendisk.part[minor].nr_sects = 0; |
| 1603 | sd_sizes[minor] = 0; |
| 1604 | } |
| 1605 | |
| 1606 | dpnt->has_part_table = 0; |
| 1607 | dpnt->device = NULL((void *) 0); |
| 1608 | dpnt->capacity = 0; |
| 1609 | SDp->attached--; |
| 1610 | sd_template.dev_noticed--; |
| 1611 | sd_template.nr_dev--; |
| 1612 | sd_gendisk.nr_real--; |
| 1613 | return; |
| 1614 | } |
| 1615 | return; |
| 1616 | } |
| 1617 | |
| 1618 | #ifdef MODULE |
| 1619 | |
| 1620 | int init_module(void) { |
| 1621 | sd_template.usage_count = &mod_use_count_; |
| 1622 | return scsi_register_module(MODULE_SCSI_DEV4, &sd_template); |
| 1623 | } |
| 1624 | |
| 1625 | void cleanup_module( void) |
| 1626 | { |
| 1627 | struct gendisk * prev_sdgd; |
| 1628 | struct gendisk * sdgd; |
| 1629 | |
| 1630 | scsi_unregister_module(MODULE_SCSI_DEV4, &sd_template); |
| 1631 | unregister_blkdev(SCSI_DISK_MAJOR8, "sd"); |
| 1632 | sd_registered--; |
| 1633 | if( rscsi_disks != NULL((void *) 0) ) |
| 1634 | { |
| 1635 | scsi_init_free((char *) rscsi_disks, |
| 1636 | (sd_template.dev_noticed + SD_EXTRA_DEVS2) |
| 1637 | * sizeof(Scsi_Disk)); |
| 1638 | |
| 1639 | scsi_init_free((char *) sd_sizes, sd_template.dev_max * sizeof(int)); |
| 1640 | scsi_init_free((char *) sd_blocksizes, sd_template.dev_max * sizeof(int)); |
| 1641 | scsi_init_free((char *) sd_hardsizes, sd_template.dev_max * sizeof(int)); |
| 1642 | scsi_init_free((char *) sd, |
| 1643 | (sd_template.dev_max << 4) * sizeof(struct hd_struct)); |
| 1644 | /* |
| 1645 | * Now remove sd_gendisk from the linked list |
| 1646 | */ |
| 1647 | sdgd = gendisk_head; |
| 1648 | prev_sdgd = NULL((void *) 0); |
| 1649 | while(sdgd != &sd_gendisk) |
| 1650 | { |
| 1651 | prev_sdgd = sdgd; |
| 1652 | sdgd = sdgd->next; |
| 1653 | } |
| 1654 | |
| 1655 | if(sdgd != &sd_gendisk) |
| 1656 | printk("sd_gendisk not in disk chain.\n"); |
| 1657 | else { |
| 1658 | if(prev_sdgd != NULL((void *) 0)) |
| 1659 | prev_sdgd->next = sdgd->next; |
| 1660 | else |
| 1661 | gendisk_head = sdgd->next; |
| 1662 | } |
| 1663 | } |
| 1664 | |
| 1665 | blksize_size[MAJOR_NR8] = NULL((void *) 0); |
| 1666 | blk_dev[MAJOR_NR8].request_fn = NULL((void *) 0); |
| 1667 | blk_size[MAJOR_NR8] = NULL((void *) 0); |
| 1668 | hardsect_size[MAJOR_NR8] = NULL((void *) 0); |
| 1669 | read_ahead[MAJOR_NR8] = 0; |
| 1670 | sd_template.dev_max = 0; |
| 1671 | } |
| 1672 | #endif /* MODULE */ |
| 1673 | |
| 1674 | /* |
| 1675 | * Overrides for Emacs so that we almost follow Linus's tabbing style. |
| 1676 | * Emacs will notice this stuff at the end of the file and automatically |
| 1677 | * adjust the settings for this buffer only. This must remain at the end |
| 1678 | * of the file. |
| 1679 | * --------------------------------------------------------------------------- |
| 1680 | * Local variables: |
| 1681 | * c-indent-level: 4 |
| 1682 | * c-brace-imaginary-offset: 0 |
| 1683 | * c-brace-offset: -4 |
| 1684 | * c-argdecl-indent: 4 |
| 1685 | * c-label-offset: -4 |
| 1686 | * c-continued-statement-offset: 4 |
| 1687 | * c-continued-brace-offset: 0 |
| 1688 | * indent-tabs-mode: nil |
| 1689 | * tab-width: 8 |
| 1690 | * End: |
| 1691 | */ |