summaryrefslogtreecommitdiff
path: root/windhoek/ide
diff options
context:
space:
mode:
Diffstat (limited to 'windhoek/ide')
-rw-r--r--windhoek/ide/ide-cd.h131
-rw-r--r--windhoek/ide/ide-disk.c750
-rw-r--r--windhoek/ide/ide-disk.h30
-rw-r--r--windhoek/ide/ide-dma.c507
-rw-r--r--windhoek/ide/ide-floppy.h39
-rw-r--r--windhoek/ide/ide-gd.c409
-rw-r--r--windhoek/ide/ide-gd.h44
-rw-r--r--windhoek/ide/ide-generic.c206
-rw-r--r--windhoek/ide/ide-io.c1228
-rw-r--r--windhoek/ide/ide-iops.c1215
-rw-r--r--windhoek/ide/ide-lib.c423
-rw-r--r--windhoek/ide/ide-park.c124
-rw-r--r--windhoek/ide/ide-pci-generic.c195
-rw-r--r--windhoek/ide/ide-pio-blacklist.c94
-rw-r--r--windhoek/ide/ide-pm.c239
-rw-r--r--windhoek/ide/ide-pnp.c107
-rw-r--r--windhoek/ide/ide-probe.c1739
-rw-r--r--windhoek/ide/ide-proc.c701
-rw-r--r--windhoek/ide/ide-sysfs.c125
-rw-r--r--windhoek/ide/ide-taskfile.c695
-rw-r--r--windhoek/ide/ide.c554
-rw-r--r--windhoek/ide/local.h11
-rw-r--r--windhoek/ide/piix.c482
-rw-r--r--windhoek/ide/setup-pci.c694
24 files changed, 10742 insertions, 0 deletions
diff --git a/windhoek/ide/ide-cd.h b/windhoek/ide/ide-cd.h
new file mode 100644
index 00000000..c878bfcf
--- /dev/null
+++ b/windhoek/ide/ide-cd.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 1996-98 Erik Andersen
+ * Copyright (C) 1998-2000 Jens Axboe
+ */
+#ifndef _IDE_CD_H
+#define _IDE_CD_H
+
+#include <linux/cdrom.h>
+#include <asm/byteorder.h>
+
+#define IDECD_DEBUG_LOG 0
+
+#if IDECD_DEBUG_LOG
+#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
+#else
+#define ide_debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
+#define ATAPI_WAIT_WRITE_BUSY (10 * HZ)
+
+/************************************************************************/
+
+#define SECTOR_BITS 9
+#ifndef SECTOR_SIZE
+#define SECTOR_SIZE (1 << SECTOR_BITS)
+#endif
+#define SECTORS_PER_FRAME (CD_FRAMESIZE >> SECTOR_BITS)
+#define SECTOR_BUFFER_SIZE (CD_FRAMESIZE * 32)
+
+/* Capabilities Page size including 8 bytes of Mode Page Header */
+#define ATAPI_CAPABILITIES_PAGE_SIZE (8 + 20)
+#define ATAPI_CAPABILITIES_PAGE_PAD_SIZE 4
+
+/* Structure of a MSF cdrom address. */
+struct atapi_msf {
+ u8 reserved;
+ u8 minute;
+ u8 second;
+ u8 frame;
+};
+
+/* Space to hold the disk TOC. */
+#define MAX_TRACKS 99
+struct atapi_toc_header {
+ unsigned short toc_length;
+ u8 first_track;
+ u8 last_track;
+};
+
+struct atapi_toc_entry {
+ u8 reserved1;
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u8 adr : 4;
+ u8 control : 4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u8 control : 4;
+ u8 adr : 4;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+ u8 track;
+ u8 reserved2;
+ union {
+ unsigned lba;
+ struct atapi_msf msf;
+ } addr;
+};
+
+struct atapi_toc {
+ int last_session_lba;
+ int xa_flag;
+ unsigned long capacity;
+ struct atapi_toc_header hdr;
+ struct atapi_toc_entry ent[MAX_TRACKS+1];
+ /* One extra for the leadout. */
+};
+
+/* Extra per-device info for cdrom drives. */
+struct cdrom_info {
+ ide_drive_t *drive;
+ struct ide_driver *driver;
+ struct gendisk *disk;
+ struct device dev;
+
+ /* Buffer for table of contents. NULL if we haven't allocated
+ a TOC buffer for this device yet. */
+
+ struct atapi_toc *toc;
+
+ /* The result of the last successful request sense command
+ on this device. */
+ struct request_sense sense_data;
+
+ struct request request_sense_request;
+
+ u8 max_speed; /* Max speed of the drive. */
+ u8 current_speed; /* Current speed of the drive. */
+
+ /* Per-device info needed by cdrom.c generic driver. */
+ struct cdrom_device_info devinfo;
+
+ unsigned long write_timeout;
+};
+
+/* ide-cd_verbose.c */
+void ide_cd_log_error(const char *, struct request *, struct request_sense *);
+
+/* ide-cd.c functions used by ide-cd_ioctl.c */
+int ide_cd_queue_pc(ide_drive_t *, const unsigned char *, int, void *,
+ unsigned *, struct request_sense *, int, unsigned int);
+int ide_cd_read_toc(ide_drive_t *, struct request_sense *);
+int ide_cdrom_get_capabilities(ide_drive_t *, u8 *);
+void ide_cdrom_update_speed(ide_drive_t *, u8 *);
+int cdrom_check_status(ide_drive_t *, struct request_sense *);
+
+/* ide-cd_ioctl.c */
+int ide_cdrom_open_real(struct cdrom_device_info *, int);
+void ide_cdrom_release_real(struct cdrom_device_info *);
+int ide_cdrom_drive_status(struct cdrom_device_info *, int);
+int ide_cdrom_check_media_change_real(struct cdrom_device_info *, int);
+int ide_cdrom_tray_move(struct cdrom_device_info *, int);
+int ide_cdrom_lock_door(struct cdrom_device_info *, int);
+int ide_cdrom_select_speed(struct cdrom_device_info *, int);
+int ide_cdrom_get_last_session(struct cdrom_device_info *,
+ struct cdrom_multisession *);
+int ide_cdrom_get_mcn(struct cdrom_device_info *, struct cdrom_mcn *);
+int ide_cdrom_reset(struct cdrom_device_info *cdi);
+int ide_cdrom_audio_ioctl(struct cdrom_device_info *, unsigned int, void *);
+int ide_cdrom_packet(struct cdrom_device_info *, struct packet_command *);
+
+#endif /* _IDE_CD_H */
diff --git a/windhoek/ide/ide-disk.c b/windhoek/ide/ide-disk.c
new file mode 100644
index 00000000..24654e93
--- /dev/null
+++ b/windhoek/ide/ide-disk.c
@@ -0,0 +1,750 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 1998-2002 Linux ATA Development
+ * Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * This is the IDE/ATA disk driver, as evolved from hd.c and ide.c.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/leds.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/div64.h>
+
+#include "ide-disk.h"
+
+static const u8 ide_rw_cmds[] = {
+ ATA_CMD_READ_MULTI,
+ ATA_CMD_WRITE_MULTI,
+ ATA_CMD_READ_MULTI_EXT,
+ ATA_CMD_WRITE_MULTI_EXT,
+ ATA_CMD_PIO_READ,
+ ATA_CMD_PIO_WRITE,
+ ATA_CMD_PIO_READ_EXT,
+ ATA_CMD_PIO_WRITE_EXT,
+ ATA_CMD_READ,
+ ATA_CMD_WRITE,
+ ATA_CMD_READ_EXT,
+ ATA_CMD_WRITE_EXT,
+};
+
+static const u8 ide_data_phases[] = {
+ TASKFILE_MULTI_IN,
+ TASKFILE_MULTI_OUT,
+ TASKFILE_IN,
+ TASKFILE_OUT,
+ TASKFILE_IN_DMA,
+ TASKFILE_OUT_DMA,
+};
+
+static void ide_tf_set_cmd(ide_drive_t *drive, ide_task_t *task, u8 dma)
+{
+ u8 index, lba48, write;
+
+ lba48 = (task->tf_flags & IDE_TFLAG_LBA48) ? 2 : 0;
+ write = (task->tf_flags & IDE_TFLAG_WRITE) ? 1 : 0;
+
+ if (dma)
+ index = 8;
+ else
+ index = drive->mult_count ? 0 : 4;
+
+ task->tf.command = ide_rw_cmds[index + lba48 + write];
+
+ if (dma)
+ index = 8; /* fixup index */
+
+ task->data_phase = ide_data_phases[index / 2 + write];
+}
+
+/*
+ * __ide_do_rw_disk() issues READ and WRITE commands to a disk,
+ * using LBA if supported, or CHS otherwise, to address sectors.
+ */
+static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 nsectors = (u16)rq->nr_sectors;
+ u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
+ u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+ ide_startstop_t rc;
+
+ if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) {
+ if (block + rq->nr_sectors > 1ULL << 28)
+ dma = 0;
+ else
+ lba48 = 0;
+ }
+
+ if (!dma) {
+ ide_init_sg_cmd(drive, rq);
+ ide_map_sg(drive, rq);
+ }
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ if (drive->dev_flags & IDE_DFLAG_LBA) {
+ if (lba48) {
+ pr_debug("%s: LBA=0x%012llx\n", drive->name,
+ (unsigned long long)block);
+
+ tf->hob_nsect = (nsectors >> 8) & 0xff;
+ tf->hob_lbal = (u8)(block >> 24);
+ if (sizeof(block) != 4) {
+ tf->hob_lbam = (u8)((u64)block >> 32);
+ tf->hob_lbah = (u8)((u64)block >> 40);
+ }
+
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = (u8) block;
+ tf->lbam = (u8)(block >> 8);
+ tf->lbah = (u8)(block >> 16);
+
+ task.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ } else {
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = block;
+ tf->lbam = block >>= 8;
+ tf->lbah = block >>= 8;
+ tf->device = (block >> 8) & 0xf;
+ }
+
+ tf->device |= ATA_LBA;
+ } else {
+ unsigned int sect, head, cyl, track;
+
+ track = (int)block / drive->sect;
+ sect = (int)block % drive->sect + 1;
+ head = track % drive->head;
+ cyl = track / drive->head;
+
+ pr_debug("%s: CHS=%u/%u/%u\n", drive->name, cyl, head, sect);
+
+ tf->nsect = nsectors & 0xff;
+ tf->lbal = sect;
+ tf->lbam = cyl;
+ tf->lbah = cyl >> 8;
+ tf->device = head;
+ }
+
+ if (rq_data_dir(rq))
+ task.tf_flags |= IDE_TFLAG_WRITE;
+
+ ide_tf_set_cmd(drive, &task, dma);
+ if (!dma)
+ hwif->data_phase = task.data_phase;
+ task.rq = rq;
+
+ rc = do_rw_taskfile(drive, &task);
+
+ if (rc == ide_stopped && dma) {
+ /* fallback to PIO */
+ task.tf_flags |= IDE_TFLAG_DMA_PIO_FALLBACK;
+ ide_tf_set_cmd(drive, &task, 0);
+ hwif->data_phase = task.data_phase;
+ ide_init_sg_cmd(drive, rq);
+ rc = do_rw_taskfile(drive, &task);
+ }
+
+ return rc;
+}
+
+/*
+ * 268435455 == 137439 MB or 28bit limit
+ * 320173056 == 163929 MB or 48bit addressing
+ * 1073741822 == 549756 MB or 48bit addressing fake drive
+ */
+
+static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
+ sector_t block)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED);
+
+ if (!blk_fs_request(rq)) {
+ blk_dump_rq_flags(rq, "ide_do_rw_disk - bad command");
+ ide_end_request(drive, 0, 0);
+ return ide_stopped;
+ }
+
+ ledtrig_ide_activity();
+
+ pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n",
+ drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
+ (unsigned long long)block, rq->nr_sectors,
+ (unsigned long)rq->buffer);
+
+ if (hwif->rw_disk)
+ hwif->rw_disk(drive, rq);
+
+ return __ide_do_rw_disk(drive, rq, block);
+}
+
+/*
+ * Queries for true maximum capacity of the drive.
+ * Returns maximum LBA address (> 0) of the drive, 0 if failed.
+ */
+static u64 idedisk_read_native_max_address(ide_drive_t *drive, int lba48)
+{
+ ide_task_t args;
+ struct ide_taskfile *tf = &args.tf;
+ u64 addr = 0;
+
+ /* Create IDE/ATA command request structure */
+ memset(&args, 0, sizeof(ide_task_t));
+ if (lba48)
+ tf->command = ATA_CMD_READ_NATIVE_MAX_EXT;
+ else
+ tf->command = ATA_CMD_READ_NATIVE_MAX;
+ tf->device = ATA_LBA;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ if (lba48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ /* submit command request */
+ ide_no_data_taskfile(drive, &args);
+
+ /* if OK, compute maximum address value */
+ if ((tf->status & 0x01) == 0)
+ addr = ide_get_lba_addr(tf, lba48) + 1;
+
+ return addr;
+}
+
+/*
+ * Sets maximum virtual LBA address of the drive.
+ * Returns new maximum virtual LBA address (> 0) or 0 on failure.
+ */
+static u64 idedisk_set_max_address(ide_drive_t *drive, u64 addr_req, int lba48)
+{
+ ide_task_t args;
+ struct ide_taskfile *tf = &args.tf;
+ u64 addr_set = 0;
+
+ addr_req--;
+ /* Create IDE/ATA command request structure */
+ memset(&args, 0, sizeof(ide_task_t));
+ tf->lbal = (addr_req >> 0) & 0xff;
+ tf->lbam = (addr_req >>= 8) & 0xff;
+ tf->lbah = (addr_req >>= 8) & 0xff;
+ if (lba48) {
+ tf->hob_lbal = (addr_req >>= 8) & 0xff;
+ tf->hob_lbam = (addr_req >>= 8) & 0xff;
+ tf->hob_lbah = (addr_req >>= 8) & 0xff;
+ tf->command = ATA_CMD_SET_MAX_EXT;
+ } else {
+ tf->device = (addr_req >>= 8) & 0x0f;
+ tf->command = ATA_CMD_SET_MAX;
+ }
+ tf->device |= ATA_LBA;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ if (lba48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_HOB);
+ /* submit command request */
+ ide_no_data_taskfile(drive, &args);
+ /* if OK, compute maximum address value */
+ if ((tf->status & 0x01) == 0)
+ addr_set = ide_get_lba_addr(tf, lba48) + 1;
+
+ return addr_set;
+}
+
+static unsigned long long sectors_to_MB(unsigned long long n)
+{
+ n <<= 9; /* make it bytes */
+ do_div(n, 1000000); /* make it MB */
+ return n;
+}
+
+/*
+ * Some disks report total number of sectors instead of
+ * maximum sector address. We list them here.
+ */
+static const struct drive_list_entry hpa_list[] = {
+ { "ST340823A", NULL },
+ { "ST320413A", NULL },
+ { "ST310211A", NULL },
+ { NULL, NULL }
+};
+
+static void idedisk_check_hpa(ide_drive_t *drive)
+{
+ unsigned long long capacity, set_max;
+ int lba48 = ata_id_lba48_enabled(drive->id);
+
+ capacity = drive->capacity64;
+
+ set_max = idedisk_read_native_max_address(drive, lba48);
+
+ if (ide_in_drive_list(drive->id, hpa_list)) {
+ /*
+ * Since we are inclusive wrt to firmware revisions do this
+ * extra check and apply the workaround only when needed.
+ */
+ if (set_max == capacity + 1)
+ set_max--;
+ }
+
+ if (set_max <= capacity)
+ return;
+
+ printk(KERN_INFO "%s: Host Protected Area detected.\n"
+ "\tcurrent capacity is %llu sectors (%llu MB)\n"
+ "\tnative capacity is %llu sectors (%llu MB)\n",
+ drive->name,
+ capacity, sectors_to_MB(capacity),
+ set_max, sectors_to_MB(set_max));
+
+ set_max = idedisk_set_max_address(drive, set_max, lba48);
+
+ if (set_max) {
+ drive->capacity64 = set_max;
+ printk(KERN_INFO "%s: Host Protected Area disabled.\n",
+ drive->name);
+ }
+}
+
+static int ide_disk_get_capacity(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ int lba;
+
+ if (ata_id_lba48_enabled(id)) {
+ /* drive speaks 48-bit LBA */
+ lba = 1;
+ drive->capacity64 = ata_id_u64(id, ATA_ID_LBA_CAPACITY_2);
+ } else if (ata_id_has_lba(id) && ata_id_is_lba_capacity_ok(id)) {
+ /* drive speaks 28-bit LBA */
+ lba = 1;
+ drive->capacity64 = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
+ } else {
+ /* drive speaks boring old 28-bit CHS */
+ lba = 0;
+ drive->capacity64 = drive->cyl * drive->head * drive->sect;
+ }
+
+ if (lba) {
+ drive->dev_flags |= IDE_DFLAG_LBA;
+
+ /*
+ * If this device supports the Host Protected Area feature set,
+ * then we may need to change our opinion about its capacity.
+ */
+ if (ata_id_hpa_enabled(id))
+ idedisk_check_hpa(drive);
+ }
+
+ /* limit drive capacity to 137GB if LBA48 cannot be used */
+ if ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 &&
+ drive->capacity64 > 1ULL << 28) {
+ printk(KERN_WARNING "%s: cannot use LBA48 - full capacity "
+ "%llu sectors (%llu MB)\n",
+ drive->name, (unsigned long long)drive->capacity64,
+ sectors_to_MB(drive->capacity64));
+ drive->capacity64 = 1ULL << 28;
+ }
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) &&
+ (drive->dev_flags & IDE_DFLAG_LBA48)) {
+ if (drive->capacity64 > 1ULL << 28) {
+ printk(KERN_INFO "%s: cannot use LBA48 DMA - PIO mode"
+ " will be used for accessing sectors "
+ "> %u\n", drive->name, 1 << 28);
+ } else
+ drive->dev_flags &= ~IDE_DFLAG_LBA48;
+ }
+
+ return 0;
+}
+
+static void idedisk_prepare_flush(struct request_queue *q, struct request *rq)
+{
+ ide_drive_t *drive = q->queuedata;
+ ide_task_t *task = kmalloc(sizeof(*task), GFP_ATOMIC);
+
+ /* FIXME: map struct ide_taskfile on rq->cmd[] */
+ BUG_ON(task == NULL);
+
+ memset(task, 0, sizeof(*task));
+ if (ata_id_flush_ext_enabled(drive->id) &&
+ (drive->capacity64 >= (1UL << 28)))
+ task->tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ task->tf.command = ATA_CMD_FLUSH;
+ task->tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_OUT_DEVICE |
+ IDE_TFLAG_DYN;
+ task->data_phase = TASKFILE_NO_DATA;
+
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->cmd_flags |= REQ_SOFTBARRIER;
+ rq->special = task;
+}
+
+ide_devset_get(multcount, mult_count);
+
+/*
+ * This is tightly woven into the driver->do_special can not touch.
+ * DON'T do it again until a total personality rewrite is committed.
+ */
+static int set_multcount(ide_drive_t *drive, int arg)
+{
+ struct request *rq;
+ int error;
+
+ if (arg < 0 || arg > (drive->id[ATA_ID_MAX_MULTSECT] & 0xff))
+ return -EINVAL;
+
+ if (drive->special.b.set_multmode)
+ return -EBUSY;
+
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+
+ drive->mult_req = arg;
+ drive->special.b.set_multmode = 1;
+ error = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ return (drive->mult_count == arg) ? 0 : -EIO;
+}
+
+ide_devset_get_flag(nowerr, IDE_DFLAG_NOWERR);
+
+static int set_nowerr(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_NOWERR;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_NOWERR;
+
+ drive->bad_wstat = arg ? BAD_R_STAT : BAD_W_STAT;
+
+ return 0;
+}
+
+static int ide_do_setfeature(ide_drive_t *drive, u8 feature, u8 nsect)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.feature = feature;
+ task.tf.nsect = nsect;
+ task.tf.command = ATA_CMD_SET_FEATURES;
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ return ide_no_data_taskfile(drive, &task);
+}
+
+static void update_ordered(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ unsigned ordered = QUEUE_ORDERED_NONE;
+ prepare_flush_fn *prep_fn = NULL;
+
+ if (drive->dev_flags & IDE_DFLAG_WCACHE) {
+ unsigned long long capacity;
+ int barrier;
+ /*
+ * We must avoid issuing commands a drive does not
+ * understand or we may crash it. We check flush cache
+ * is supported. We also check we have the LBA48 flush
+ * cache if the drive capacity is too large. By this
+ * time we have trimmed the drive capacity if LBA48 is
+ * not available so we don't need to recheck that.
+ */
+ capacity = ide_gd_capacity(drive);
+ barrier = ata_id_flush_enabled(id) &&
+ (drive->dev_flags & IDE_DFLAG_NOFLUSH) == 0 &&
+ ((drive->dev_flags & IDE_DFLAG_LBA48) == 0 ||
+ capacity <= (1ULL << 28) ||
+ ata_id_flush_ext_enabled(id));
+
+ printk(KERN_INFO "%s: cache flushes %ssupported\n",
+ drive->name, barrier ? "" : "not ");
+
+ if (barrier) {
+ ordered = QUEUE_ORDERED_DRAIN_FLUSH;
+ prep_fn = idedisk_prepare_flush;
+ }
+ } else
+ ordered = QUEUE_ORDERED_DRAIN;
+
+ blk_queue_ordered(drive->queue, ordered, prep_fn);
+}
+
+ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
+
+static int set_wcache(ide_drive_t *drive, int arg)
+{
+ int err = 1;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ata_id_flush_enabled(drive->id)) {
+ err = ide_do_setfeature(drive,
+ arg ? SETFEATURES_WC_ON : SETFEATURES_WC_OFF, 0);
+ if (err == 0) {
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_WCACHE;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_WCACHE;
+ }
+ }
+
+ update_ordered(drive);
+
+ return err;
+}
+
+static int do_idedisk_flushcache(ide_drive_t *drive)
+{
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ if (ata_id_flush_ext_enabled(drive->id))
+ args.tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ args.tf.command = ATA_CMD_FLUSH;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ return ide_no_data_taskfile(drive, &args);
+}
+
+ide_devset_get(acoustic, acoustic);
+
+static int set_acoustic(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 254)
+ return -EINVAL;
+
+ ide_do_setfeature(drive,
+ arg ? SETFEATURES_AAM_ON : SETFEATURES_AAM_OFF, arg);
+
+ drive->acoustic = arg;
+
+ return 0;
+}
+
+ide_devset_get_flag(addressing, IDE_DFLAG_LBA48);
+
+/*
+ * drive->addressing:
+ * 0: 28-bit
+ * 1: 48-bit
+ * 2: 48-bit capable doing 28-bit
+ */
+static int set_addressing(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 2)
+ return -EINVAL;
+
+ if (arg && ((drive->hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
+ ata_id_lba48_enabled(drive->id) == 0))
+ return -EIO;
+
+ if (arg == 2)
+ arg = 0;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_LBA48;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_LBA48;
+
+ return 0;
+}
+
+ide_ext_devset_rw(acoustic, acoustic);
+ide_ext_devset_rw(address, addressing);
+ide_ext_devset_rw(multcount, multcount);
+ide_ext_devset_rw(wcache, wcache);
+
+ide_ext_devset_rw_sync(nowerr, nowerr);
+
+static int ide_disk_check(ide_drive_t *drive, const char *s)
+{
+ return 1;
+}
+
+static void ide_disk_setup(ide_drive_t *drive)
+{
+ struct ide_disk_obj *idkp = drive->driver_data;
+ struct request_queue *q = drive->queue;
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ unsigned long long capacity;
+
+ ide_proc_register_driver(drive, idkp->driver);
+
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0)
+ return;
+
+ if (drive->dev_flags & IDE_DFLAG_REMOVABLE) {
+ /*
+ * Removable disks (eg. SYQUEST); ignore 'WD' drives
+ */
+ if (m[0] != 'W' || m[1] != 'D')
+ drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
+ }
+
+ (void)set_addressing(drive, 1);
+
+ if (drive->dev_flags & IDE_DFLAG_LBA48) {
+ int max_s = 2048;
+
+ if (max_s > hwif->rqsize)
+ max_s = hwif->rqsize;
+
+ blk_queue_max_sectors(q, max_s);
+ }
+
+ printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name,
+ q->max_sectors / 2);
+
+ if (ata_id_is_ssd(id))
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+
+ /* calculate drive capacity, and select LBA if possible */
+ ide_disk_get_capacity(drive);
+
+ /*
+ * if possible, give fdisk access to more of the drive,
+ * by correcting bios_cyls:
+ */
+ capacity = ide_gd_capacity(drive);
+
+ if ((drive->dev_flags & IDE_DFLAG_FORCED_GEOM) == 0) {
+ if (ata_id_lba48_enabled(drive->id)) {
+ /* compatibility */
+ drive->bios_sect = 63;
+ drive->bios_head = 255;
+ }
+
+ if (drive->bios_sect && drive->bios_head) {
+ unsigned int cap0 = capacity; /* truncate to 32 bits */
+ unsigned int cylsz, cyl;
+
+ if (cap0 != capacity)
+ drive->bios_cyl = 65535;
+ else {
+ cylsz = drive->bios_sect * drive->bios_head;
+ cyl = cap0 / cylsz;
+ if (cyl > 65535)
+ cyl = 65535;
+ if (cyl > drive->bios_cyl)
+ drive->bios_cyl = cyl;
+ }
+ }
+ }
+ printk(KERN_INFO "%s: %llu sectors (%llu MB)",
+ drive->name, capacity, sectors_to_MB(capacity));
+
+ /* Only print cache size when it was specified */
+ if (id[ATA_ID_BUF_SIZE])
+ printk(KERN_CONT " w/%dKiB Cache", id[ATA_ID_BUF_SIZE] / 2);
+
+ printk(KERN_CONT ", CHS=%d/%d/%d\n",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ /* write cache enabled? */
+ if ((id[ATA_ID_CSFO] & 1) || ata_id_wcache_enabled(id))
+ drive->dev_flags |= IDE_DFLAG_WCACHE;
+
+ set_wcache(drive, 1);
+
+ if ((drive->dev_flags & IDE_DFLAG_LBA) == 0 &&
+ (drive->head == 0 || drive->head > 16)) {
+ printk(KERN_ERR "%s: invalid geometry: %d physical heads?\n",
+ drive->name, drive->head);
+ drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+ } else
+ drive->dev_flags |= IDE_DFLAG_ATTACH;
+}
+
+static void ide_disk_flush(ide_drive_t *drive)
+{
+ if (ata_id_flush_enabled(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_WCACHE) == 0)
+ return;
+
+ if (do_idedisk_flushcache(drive))
+ printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
+}
+
+static int ide_disk_init_media(ide_drive_t *drive, struct gendisk *disk)
+{
+ return 0;
+}
+
+static int ide_disk_set_doorlock(ide_drive_t *drive, struct gendisk *disk,
+ int on)
+{
+ ide_task_t task;
+ int ret;
+
+ if ((drive->dev_flags & IDE_DFLAG_DOORLOCKING) == 0)
+ return 0;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.command = on ? ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK;
+ task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+
+ ret = ide_no_data_taskfile(drive, &task);
+
+ if (ret)
+ drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING;
+
+ return ret;
+}
+
+#ifdef DDE_LINUX
+int ide_disk_ioctl(ide_drive_t *drive, struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+#endif
+
+const struct ide_disk_ops ide_ata_disk_ops = {
+ .check = ide_disk_check,
+ .get_capacity = ide_disk_get_capacity,
+ .setup = ide_disk_setup,
+ .flush = ide_disk_flush,
+ .init_media = ide_disk_init_media,
+ .set_doorlock = ide_disk_set_doorlock,
+ .do_request = ide_do_rw_disk,
+ .end_request = ide_end_request,
+ .ioctl = ide_disk_ioctl,
+};
diff --git a/windhoek/ide/ide-disk.h b/windhoek/ide/ide-disk.h
new file mode 100644
index 00000000..326a12ff
--- /dev/null
+++ b/windhoek/ide/ide-disk.h
@@ -0,0 +1,30 @@
+#ifndef __IDE_DISK_H
+#define __IDE_DISK_H
+
+#include "ide-gd.h"
+#include "local.h"
+
+#ifdef CONFIG_IDE_GD_ATA
+/* ide-disk.c */
+extern const struct ide_disk_ops ide_ata_disk_ops;
+ide_decl_devset(address);
+ide_decl_devset(multcount);
+ide_decl_devset(nowerr);
+ide_decl_devset(wcache);
+ide_decl_devset(acoustic);
+
+/* ide-disk_ioctl.c */
+int ide_disk_ioctl(ide_drive_t *, struct block_device *, fmode_t, unsigned int,
+ unsigned long);
+
+#ifdef CONFIG_IDE_PROC_FS
+/* ide-disk_proc.c */
+extern ide_proc_entry_t ide_disk_proc[];
+extern const struct ide_proc_devset ide_disk_settings[];
+#endif
+#else
+#define ide_disk_proc NULL
+#define ide_disk_settings NULL
+#endif
+
+#endif /* __IDE_DISK_H */
diff --git a/windhoek/ide/ide-dma.c b/windhoek/ide/ide-dma.c
new file mode 100644
index 00000000..059c90bb
--- /dev/null
+++ b/windhoek/ide/ide-dma.c
@@ -0,0 +1,507 @@
+/*
+ * IDE DMA support (including IDE PCI BM-DMA).
+ *
+ * Copyright (C) 1995-1998 Mark Lord
+ * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * DMA is supported for all IDE devices (disk drives, cdroms, tapes, floppies).
+ */
+
+/*
+ * Special Thanks to Mark for his Six years of work.
+ */
+
+/*
+ * Thanks to "Christopher J. Reimer" <reimer@doe.carleton.ca> for
+ * fixing the problem with the BIOS on some Acer motherboards.
+ *
+ * Thanks to "Benoit Poulot-Cazajous" <poulot@chorus.fr> for testing
+ * "TX" chipset compatibility and for providing patches for the "TX" chipset.
+ *
+ * Thanks to Christian Brunner <chb@muc.de> for taking a good first crack
+ * at generic DMA -- his patches were referred to when preparing this code.
+ *
+ * Most importantly, thanks to Robert Bringman <rob@mars.trion.com>
+ * for supplying a Promise UDMA board & WD UDMA drive for this work!
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+static const struct drive_list_entry drive_whitelist[] = {
+ { "Micropolis 2112A" , NULL },
+ { "CONNER CTMA 4000" , NULL },
+ { "CONNER CTT8000-A" , NULL },
+ { "ST34342A" , NULL },
+ { NULL , NULL }
+};
+
+static const struct drive_list_entry drive_blacklist[] = {
+ { "WDC AC11000H" , NULL },
+ { "WDC AC22100H" , NULL },
+ { "WDC AC32500H" , NULL },
+ { "WDC AC33100H" , NULL },
+ { "WDC AC31600H" , NULL },
+ { "WDC AC32100H" , "24.09P07" },
+ { "WDC AC23200L" , "21.10N21" },
+ { "Compaq CRD-8241B" , NULL },
+ { "CRD-8400B" , NULL },
+ { "CRD-8480B", NULL },
+ { "CRD-8482B", NULL },
+ { "CRD-84" , NULL },
+ { "SanDisk SDP3B" , NULL },
+ { "SanDisk SDP3B-64" , NULL },
+ { "SANYO CD-ROM CRD" , NULL },
+ { "HITACHI CDR-8" , NULL },
+ { "HITACHI CDR-8335" , NULL },
+ { "HITACHI CDR-8435" , NULL },
+ { "Toshiba CD-ROM XM-6202B" , NULL },
+ { "TOSHIBA CD-ROM XM-1702BC", NULL },
+ { "CD-532E-A" , NULL },
+ { "E-IDE CD-ROM CR-840", NULL },
+ { "CD-ROM Drive/F5A", NULL },
+ { "WPI CDD-820", NULL },
+ { "SAMSUNG CD-ROM SC-148C", NULL },
+ { "SAMSUNG CD-ROM SC", NULL },
+ { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
+ { "_NEC DV5800A", NULL },
+ { "SAMSUNG CD-ROM SN-124", "N001" },
+ { "Seagate STT20000A", NULL },
+ { "CD-ROM CDR_U200", "1.09" },
+ { NULL , NULL }
+
+};
+
+/**
+ * ide_dma_intr - IDE DMA interrupt handler
+ * @drive: the drive the interrupt is for
+ *
+ * Handle an interrupt completing a read/write DMA transfer on an
+ * IDE device
+ */
+
+ide_startstop_t ide_dma_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = 0, dma_stat = 0;
+
+ dma_stat = hwif->dma_ops->dma_end(drive);
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, DRIVE_READY, drive->bad_wstat | ATA_DRQ)) {
+ if (!dma_stat) {
+ struct request *rq = hwif->rq;
+
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+ printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
+ drive->name, __func__, dma_stat);
+ }
+ return ide_error(drive, "dma_intr", stat);
+}
+EXPORT_SYMBOL_GPL(ide_dma_intr);
+
+int ide_dma_good_drive(ide_drive_t *drive)
+{
+ return ide_in_drive_list(drive->id, drive_whitelist);
+}
+
+/**
+ * ide_build_sglist - map IDE scatter gather for DMA I/O
+ * @drive: the drive to build the DMA table for
+ * @rq: the request holding the sg list
+ *
+ * Perform the DMA mapping magic necessary to access the source or
+ * target buffers of a request via DMA. The lower layers of the
+ * kernel provide the necessary cache management so that we can
+ * operate in a portable fashion.
+ */
+
+int ide_build_sglist(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+ int i;
+
+ ide_map_sg(drive, rq);
+
+ if (rq_data_dir(rq) == READ)
+ hwif->sg_dma_direction = DMA_FROM_DEVICE;
+ else
+ hwif->sg_dma_direction = DMA_TO_DEVICE;
+
+ i = dma_map_sg(hwif->dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
+ if (i) {
+ hwif->orig_sg_nents = hwif->sg_nents;
+ hwif->sg_nents = i;
+ }
+
+ return i;
+}
+EXPORT_SYMBOL_GPL(ide_build_sglist);
+
+/**
+ * ide_destroy_dmatable - clean up DMA mapping
+ * @drive: The drive to unmap
+ *
+ * Teardown mappings after DMA has completed. This must be called
+ * after the completion of each use of ide_build_dmatable and before
+ * the next use of ide_build_dmatable. Failure to do so will cause
+ * an oops as only one mapping can be live for each target at a given
+ * time.
+ */
+
+void ide_destroy_dmatable(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->orig_sg_nents,
+ hwif->sg_dma_direction);
+}
+EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
+
+/**
+ * ide_dma_off_quietly - Generic DMA kill
+ * @drive: drive to control
+ *
+ * Turn off the current DMA on this IDE controller.
+ */
+
+void ide_dma_off_quietly(ide_drive_t *drive)
+{
+ drive->dev_flags &= ~IDE_DFLAG_USING_DMA;
+ ide_toggle_bounce(drive, 0);
+
+ drive->hwif->dma_ops->dma_host_set(drive, 0);
+}
+EXPORT_SYMBOL(ide_dma_off_quietly);
+
+/**
+ * ide_dma_off - disable DMA on a device
+ * @drive: drive to disable DMA on
+ *
+ * Disable IDE DMA for a device on this IDE controller.
+ * Inform the user that DMA has been disabled.
+ */
+
+void ide_dma_off(ide_drive_t *drive)
+{
+ printk(KERN_INFO "%s: DMA disabled\n", drive->name);
+ ide_dma_off_quietly(drive);
+}
+EXPORT_SYMBOL(ide_dma_off);
+
+/**
+ * ide_dma_on - Enable DMA on a device
+ * @drive: drive to enable DMA on
+ *
+ * Enable IDE DMA for a device on this IDE controller.
+ */
+
+void ide_dma_on(ide_drive_t *drive)
+{
+ drive->dev_flags |= IDE_DFLAG_USING_DMA;
+ ide_toggle_bounce(drive, 1);
+
+ drive->hwif->dma_ops->dma_host_set(drive, 1);
+}
+
+int __ide_dma_bad_drive(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ int blacklist = ide_in_drive_list(id, drive_blacklist);
+ if (blacklist) {
+ printk(KERN_WARNING "%s: Disabling (U)DMA for %s (blacklisted)\n",
+ drive->name, (char *)&id[ATA_ID_PROD]);
+ return blacklist;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(__ide_dma_bad_drive);
+
+static const u8 xfer_mode_bases[] = {
+ XFER_UDMA_0,
+ XFER_MW_DMA_0,
+ XFER_SW_DMA_0,
+};
+
+static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
+{
+ u16 *id = drive->id;
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ unsigned int mask = 0;
+
+ switch (base) {
+ case XFER_UDMA_0:
+ if ((id[ATA_ID_FIELD_VALID] & 4) == 0)
+ break;
+
+ if (port_ops && port_ops->udma_filter)
+ mask = port_ops->udma_filter(drive);
+ else
+ mask = hwif->ultra_mask;
+ mask &= id[ATA_ID_UDMA_MODES];
+
+ /*
+ * avoid false cable warning from eighty_ninty_three()
+ */
+ if (req_mode > XFER_UDMA_2) {
+ if ((mask & 0x78) && (eighty_ninty_three(drive) == 0))
+ mask &= 0x07;
+ }
+ break;
+ case XFER_MW_DMA_0:
+ if ((id[ATA_ID_FIELD_VALID] & 2) == 0)
+ break;
+ if (port_ops && port_ops->mdma_filter)
+ mask = port_ops->mdma_filter(drive);
+ else
+ mask = hwif->mwdma_mask;
+ mask &= id[ATA_ID_MWDMA_MODES];
+ break;
+ case XFER_SW_DMA_0:
+ if (id[ATA_ID_FIELD_VALID] & 2) {
+ mask = id[ATA_ID_SWDMA_MODES] & hwif->swdma_mask;
+ } else if (id[ATA_ID_OLD_DMA_MODES] >> 8) {
+ u8 mode = id[ATA_ID_OLD_DMA_MODES] >> 8;
+
+ /*
+ * if the mode is valid convert it to the mask
+ * (the maximum allowed mode is XFER_SW_DMA_2)
+ */
+ if (mode <= 2)
+ mask = ((2 << mode) - 1) & hwif->swdma_mask;
+ }
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return mask;
+}
+
+/**
+ * ide_find_dma_mode - compute DMA speed
+ * @drive: IDE device
+ * @req_mode: requested mode
+ *
+ * Checks the drive/host capabilities and finds the speed to use for
+ * the DMA transfer. The speed is then limited by the requested mode.
+ *
+ * Returns 0 if the drive/host combination is incapable of DMA transfers
+ * or if the requested mode is not a DMA mode.
+ */
+
+u8 ide_find_dma_mode(ide_drive_t *drive, u8 req_mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int mask;
+ int x, i;
+ u8 mode = 0;
+
+ if (drive->media != ide_disk) {
+ if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA)
+ return 0;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(xfer_mode_bases); i++) {
+ if (req_mode < xfer_mode_bases[i])
+ continue;
+ mask = ide_get_mode_mask(drive, xfer_mode_bases[i], req_mode);
+ x = fls(mask) - 1;
+ if (x >= 0) {
+ mode = xfer_mode_bases[i] + x;
+ break;
+ }
+ }
+
+ if (hwif->chipset == ide_acorn && mode == 0) {
+ /*
+ * is this correct?
+ */
+ if (ide_dma_good_drive(drive) &&
+ drive->id[ATA_ID_EIDE_DMA_TIME] < 150)
+ mode = XFER_MW_DMA_1;
+ }
+
+ mode = min(mode, req_mode);
+
+ printk(KERN_INFO "%s: %s mode selected\n", drive->name,
+ mode ? ide_xfer_verbose(mode) : "no DMA");
+
+ return mode;
+}
+EXPORT_SYMBOL_GPL(ide_find_dma_mode);
+
+static int ide_tune_dma(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 speed;
+
+ if (ata_id_has_dma(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_NODMA))
+ return 0;
+
+ /* consult the list of known "bad" drives */
+ if (__ide_dma_bad_drive(drive))
+ return 0;
+
+ if (ide_id_dma_bug(drive))
+ return 0;
+
+ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ return config_drive_for_dma(drive);
+
+ speed = ide_max_dma_mode(drive);
+
+ if (!speed)
+ return 0;
+
+ if (ide_set_dma_mode(drive, speed))
+ return 0;
+
+ return 1;
+}
+
+static int ide_dma_check(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if (ide_tune_dma(drive))
+ return 0;
+
+ /* TODO: always do PIO fallback */
+ if (hwif->host_flags & IDE_HFLAG_TRUST_BIOS_FOR_DMA)
+ return -1;
+
+ ide_set_max_pio(drive);
+
+ return -1;
+}
+
+int ide_id_dma_bug(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ if (id[ATA_ID_FIELD_VALID] & 4) {
+ if ((id[ATA_ID_UDMA_MODES] >> 8) &&
+ (id[ATA_ID_MWDMA_MODES] >> 8))
+ goto err_out;
+ } else if (id[ATA_ID_FIELD_VALID] & 2) {
+ if ((id[ATA_ID_MWDMA_MODES] >> 8) &&
+ (id[ATA_ID_SWDMA_MODES] >> 8))
+ goto err_out;
+ }
+ return 0;
+err_out:
+ printk(KERN_ERR "%s: bad DMA info in identify block\n", drive->name);
+ return 1;
+}
+
+int ide_set_dma(ide_drive_t *drive)
+{
+ int rc;
+
+ /*
+ * Force DMAing for the beginning of the check.
+ * Some chipsets appear to do interesting
+ * things, if not checked and cleared.
+ * PARANOIA!!!
+ */
+ ide_dma_off_quietly(drive);
+
+ rc = ide_dma_check(drive);
+ if (rc)
+ return rc;
+
+ ide_dma_on(drive);
+
+ return 0;
+}
+
+void ide_check_dma_crc(ide_drive_t *drive)
+{
+ u8 mode;
+
+ ide_dma_off_quietly(drive);
+ drive->crc_count = 0;
+ mode = drive->current_speed;
+ /*
+ * Don't try non Ultra-DMA modes without iCRC's. Force the
+ * device to PIO and make the user enable SWDMA/MWDMA modes.
+ */
+ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ mode--;
+ else
+ mode = XFER_PIO_4;
+ ide_set_xfer_rate(drive, mode);
+ if (drive->current_speed >= XFER_SW_DMA_0)
+ ide_dma_on(drive);
+}
+
+void ide_dma_lost_irq(ide_drive_t *drive)
+{
+ printk(KERN_ERR "%s: DMA interrupt recovery\n", drive->name);
+}
+EXPORT_SYMBOL_GPL(ide_dma_lost_irq);
+
+void ide_dma_timeout(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
+
+ if (hwif->dma_ops->dma_test_irq(drive))
+ return;
+
+ ide_dump_status(drive, "DMA timeout", hwif->tp_ops->read_status(hwif));
+
+ hwif->dma_ops->dma_end(drive);
+}
+EXPORT_SYMBOL_GPL(ide_dma_timeout);
+
+void ide_release_dma_engine(ide_hwif_t *hwif)
+{
+ if (hwif->dmatable_cpu) {
+ int prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
+
+ dma_free_coherent(hwif->dev, prd_size,
+ hwif->dmatable_cpu, hwif->dmatable_dma);
+ hwif->dmatable_cpu = NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_release_dma_engine);
+
+int ide_allocate_dma_engine(ide_hwif_t *hwif)
+{
+ int prd_size;
+
+ if (hwif->prd_max_nents == 0)
+ hwif->prd_max_nents = PRD_ENTRIES;
+ if (hwif->prd_ent_size == 0)
+ hwif->prd_ent_size = PRD_BYTES;
+
+ prd_size = hwif->prd_max_nents * hwif->prd_ent_size;
+
+ hwif->dmatable_cpu = dma_alloc_coherent(hwif->dev, prd_size,
+ &hwif->dmatable_dma,
+ GFP_ATOMIC);
+ if (hwif->dmatable_cpu == NULL) {
+ printk(KERN_ERR "%s: unable to allocate PRD table\n",
+ hwif->name);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
diff --git a/windhoek/ide/ide-floppy.h b/windhoek/ide/ide-floppy.h
new file mode 100644
index 00000000..6dd2beb4
--- /dev/null
+++ b/windhoek/ide/ide-floppy.h
@@ -0,0 +1,39 @@
+#ifndef __IDE_FLOPPY_H
+#define __IDE_FLOPPY_H
+
+#include "ide-gd.h"
+
+#ifdef CONFIG_IDE_GD_ATAPI
+/*
+ * Pages of the SELECT SENSE / MODE SENSE packet commands.
+ * See SFF-8070i spec.
+ */
+#define IDEFLOPPY_CAPABILITIES_PAGE 0x1b
+#define IDEFLOPPY_FLEXIBLE_DISK_PAGE 0x05
+
+/* IOCTLs used in low-level formatting. */
+#define IDEFLOPPY_IOCTL_FORMAT_SUPPORTED 0x4600
+#define IDEFLOPPY_IOCTL_FORMAT_GET_CAPACITY 0x4601
+#define IDEFLOPPY_IOCTL_FORMAT_START 0x4602
+#define IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS 0x4603
+
+/* ide-floppy.c */
+extern const struct ide_disk_ops ide_atapi_disk_ops;
+void ide_floppy_create_mode_sense_cmd(struct ide_atapi_pc *, u8);
+void ide_floppy_create_read_capacity_cmd(struct ide_atapi_pc *);
+
+/* ide-floppy_ioctl.c */
+int ide_floppy_ioctl(ide_drive_t *, struct block_device *, fmode_t,
+ unsigned int, unsigned long);
+
+#ifdef CONFIG_IDE_PROC_FS
+/* ide-floppy_proc.c */
+extern ide_proc_entry_t ide_floppy_proc[];
+extern const struct ide_proc_devset ide_floppy_settings[];
+#endif
+#else
+#define ide_floppy_proc NULL
+#define ide_floppy_settings NULL
+#endif
+
+#endif /*__IDE_FLOPPY_H */
diff --git a/windhoek/ide/ide-gd.c b/windhoek/ide/ide-gd.c
new file mode 100644
index 00000000..275ed3bf
--- /dev/null
+++ b/windhoek/ide/ide-gd.c
@@ -0,0 +1,409 @@
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+#if !defined(CONFIG_DEBUG_BLOCK_EXT_DEVT)
+#define IDE_DISK_MINORS (1 << PARTN_BITS)
+#else
+#define IDE_DISK_MINORS 0
+#endif
+
+#include "ide-disk.h"
+#include "ide-floppy.h"
+#include "local.h"
+
+#define IDE_GD_VERSION "1.18"
+
+/* module parameters */
+static unsigned long debug_mask;
+module_param(debug_mask, ulong, 0644);
+
+static DEFINE_MUTEX(ide_disk_ref_mutex);
+
+static void ide_disk_release(struct device *);
+
+static struct ide_disk_obj *ide_disk_get(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = NULL;
+
+ mutex_lock(&ide_disk_ref_mutex);
+ idkp = ide_drv_g(disk, ide_disk_obj);
+ if (idkp) {
+ if (ide_device_get(idkp->drive))
+ idkp = NULL;
+ else
+ get_device(&idkp->dev);
+ }
+ mutex_unlock(&ide_disk_ref_mutex);
+ return idkp;
+}
+
+static void ide_disk_put(struct ide_disk_obj *idkp)
+{
+ ide_drive_t *drive = idkp->drive;
+
+ mutex_lock(&ide_disk_ref_mutex);
+ put_device(&idkp->dev);
+ ide_device_put(drive);
+ mutex_unlock(&ide_disk_ref_mutex);
+}
+
+sector_t ide_gd_capacity(ide_drive_t *drive)
+{
+ return drive->capacity64;
+}
+
+static int ide_gd_probe(ide_drive_t *);
+
+static void ide_gd_remove(ide_drive_t *drive)
+{
+ struct ide_disk_obj *idkp = drive->driver_data;
+ struct gendisk *g = idkp->disk;
+
+ ide_proc_unregister_driver(drive, idkp->driver);
+ device_del(&idkp->dev);
+ del_gendisk(g);
+ drive->disk_ops->flush(drive);
+
+ mutex_lock(&ide_disk_ref_mutex);
+ put_device(&idkp->dev);
+ mutex_unlock(&ide_disk_ref_mutex);
+}
+
+static void ide_disk_release(struct device *dev)
+{
+ struct ide_disk_obj *idkp = to_ide_drv(dev, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+ struct gendisk *g = idkp->disk;
+
+ drive->disk_ops = NULL;
+ drive->driver_data = NULL;
+ g->private_data = NULL;
+ put_disk(g);
+ kfree(idkp);
+}
+
+/*
+ * On HPA drives the capacity needs to be
+ * reinitilized on resume otherwise the disk
+ * can not be used and a hard reset is required
+ */
+static void ide_gd_resume(ide_drive_t *drive)
+{
+ if (ata_id_hpa_enabled(drive->id))
+ (void)drive->disk_ops->get_capacity(drive);
+}
+
+static void ide_gd_shutdown(ide_drive_t *drive)
+{
+#ifdef CONFIG_ALPHA
+ /* On Alpha, halt(8) doesn't actually turn the machine off,
+ it puts you into the sort of firmware monitor. Typically,
+ it's used to boot another kernel image, so it's not much
+ different from reboot(8). Therefore, we don't need to
+ spin down the disk in this case, especially since Alpha
+ firmware doesn't handle disks in standby mode properly.
+ On the other hand, it's reasonably safe to turn the power
+ off when the shutdown process reaches the firmware prompt,
+ as the firmware initialization takes rather long time -
+ at least 10 seconds, which should be sufficient for
+ the disk to expire its write cache. */
+ if (system_state != SYSTEM_POWER_OFF) {
+#else
+ if (system_state == SYSTEM_RESTART) {
+#endif
+ drive->disk_ops->flush(drive);
+ return;
+ }
+
+ printk(KERN_INFO "Shutdown: %s\n", drive->name);
+
+ drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
+}
+
+#ifdef CONFIG_IDE_PROC_FS
+static ide_proc_entry_t *ide_disk_proc_entries(ide_drive_t *drive)
+{
+ return (drive->media == ide_disk) ? ide_disk_proc : ide_floppy_proc;
+}
+
+static const struct ide_proc_devset *ide_disk_proc_devsets(ide_drive_t *drive)
+{
+ return (drive->media == ide_disk) ? ide_disk_settings
+ : ide_floppy_settings;
+}
+#endif
+
+static ide_startstop_t ide_gd_do_request(ide_drive_t *drive,
+ struct request *rq, sector_t sector)
+{
+ return drive->disk_ops->do_request(drive, rq, sector);
+}
+
+static int ide_gd_end_request(ide_drive_t *drive, int uptodate, int nrsecs)
+{
+ return drive->disk_ops->end_request(drive, uptodate, nrsecs);
+}
+
+static struct ide_driver ide_gd_driver = {
+ .gen_driver = {
+ .owner = THIS_MODULE,
+ .name = "ide-gd",
+ .bus = &ide_bus_type,
+ },
+ .probe = ide_gd_probe,
+ .remove = ide_gd_remove,
+ .resume = ide_gd_resume,
+ .shutdown = ide_gd_shutdown,
+ .version = IDE_GD_VERSION,
+ .do_request = ide_gd_do_request,
+ .end_request = ide_gd_end_request,
+#ifdef CONFIG_IDE_PROC_FS
+ .proc_entries = ide_disk_proc_entries,
+ .proc_devsets = ide_disk_proc_devsets,
+#endif
+};
+
+static int ide_gd_open(struct block_device *bdev, fmode_t mode)
+{
+ struct gendisk *disk = bdev->bd_disk;
+ struct ide_disk_obj *idkp;
+ ide_drive_t *drive;
+ int ret = 0;
+
+ idkp = ide_disk_get(disk);
+ if (idkp == NULL)
+ return -ENXIO;
+
+ drive = idkp->drive;
+
+ ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+ idkp->openers++;
+
+ if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+ drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+ /* Just in case */
+
+ ret = drive->disk_ops->init_media(drive, disk);
+
+ /*
+ * Allow O_NDELAY to open a drive without a disk, or with an
+ * unreadable disk, so that we can get the format capacity
+ * of the drive or begin the format - Sam
+ */
+ if (ret && (mode & FMODE_NDELAY) == 0) {
+ ret = -EIO;
+ goto out_put_idkp;
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_WP) && (mode & FMODE_WRITE)) {
+ ret = -EROFS;
+ goto out_put_idkp;
+ }
+
+ /*
+ * Ignore the return code from door_lock,
+ * since the open() has already succeeded,
+ * and the door_lock is irrelevant at this point.
+ */
+ drive->disk_ops->set_doorlock(drive, disk, 1);
+ drive->dev_flags |= IDE_DFLAG_MEDIA_CHANGED;
+ check_disk_change(bdev);
+ } else if (drive->dev_flags & IDE_DFLAG_FORMAT_IN_PROGRESS) {
+ ret = -EBUSY;
+ goto out_put_idkp;
+ }
+ return 0;
+
+out_put_idkp:
+ idkp->openers--;
+ ide_disk_put(idkp);
+ return ret;
+}
+
+static int ide_gd_release(struct gendisk *disk, fmode_t mode)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ ide_debug_log(IDE_DBG_FUNC, "Call %s\n", __func__);
+
+ if (idkp->openers == 1)
+ drive->disk_ops->flush(drive);
+
+ if ((drive->dev_flags & IDE_DFLAG_REMOVABLE) && idkp->openers == 1) {
+ drive->disk_ops->set_doorlock(drive, disk, 0);
+ drive->dev_flags &= ~IDE_DFLAG_FORMAT_IN_PROGRESS;
+ }
+
+ idkp->openers--;
+
+ ide_disk_put(idkp);
+
+ return 0;
+}
+
+static int ide_gd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ geo->heads = drive->bios_head;
+ geo->sectors = drive->bios_sect;
+ geo->cylinders = (u16)drive->bios_cyl; /* truncate */
+ return 0;
+}
+
+static int ide_gd_media_changed(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+ int ret;
+
+ /* do not scan partitions twice if this is a removable device */
+ if (drive->dev_flags & IDE_DFLAG_ATTACH) {
+ drive->dev_flags &= ~IDE_DFLAG_ATTACH;
+ return 0;
+ }
+
+ ret = !!(drive->dev_flags & IDE_DFLAG_MEDIA_CHANGED);
+ drive->dev_flags &= ~IDE_DFLAG_MEDIA_CHANGED;
+
+ return ret;
+}
+
+static int ide_gd_revalidate_disk(struct gendisk *disk)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ if (ide_gd_media_changed(disk))
+ drive->disk_ops->get_capacity(drive);
+
+ set_capacity(disk, ide_gd_capacity(drive));
+ return 0;
+}
+
+static int ide_gd_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct ide_disk_obj *idkp = ide_drv_g(bdev->bd_disk, ide_disk_obj);
+ ide_drive_t *drive = idkp->drive;
+
+ return drive->disk_ops->ioctl(drive, bdev, mode, cmd, arg);
+}
+
+static struct block_device_operations ide_gd_ops = {
+ .owner = THIS_MODULE,
+ .open = ide_gd_open,
+ .release = ide_gd_release,
+ .locked_ioctl = ide_gd_ioctl,
+ .getgeo = ide_gd_getgeo,
+ .media_changed = ide_gd_media_changed,
+ .revalidate_disk = ide_gd_revalidate_disk
+};
+
+static int ide_gd_probe(ide_drive_t *drive)
+{
+ const struct ide_disk_ops *disk_ops = NULL;
+ struct ide_disk_obj *idkp;
+ struct gendisk *g;
+
+ /* strstr("foo", "") is non-NULL */
+ if (!strstr("ide-gd", drive->driver_req))
+ goto failed;
+
+#ifdef CONFIG_IDE_GD_ATA
+ if (drive->media == ide_disk)
+ disk_ops = &ide_ata_disk_ops;
+#endif
+#ifdef CONFIG_IDE_GD_ATAPI
+ if (drive->media == ide_floppy)
+ disk_ops = &ide_atapi_disk_ops;
+#endif
+ if (disk_ops == NULL)
+ goto failed;
+
+ if (disk_ops->check(drive, DRV_NAME) == 0) {
+ printk(KERN_ERR PFX "%s: not supported by this driver\n",
+ drive->name);
+ goto failed;
+ }
+
+ idkp = kzalloc(sizeof(*idkp), GFP_KERNEL);
+ if (!idkp) {
+ printk(KERN_ERR PFX "%s: can't allocate a disk structure\n",
+ drive->name);
+ goto failed;
+ }
+
+ g = alloc_disk_node(IDE_DISK_MINORS, hwif_to_node(drive->hwif));
+ if (!g)
+ goto out_free_idkp;
+
+ ide_init_disk(g, drive);
+
+ idkp->dev.parent = &drive->gendev;
+ idkp->dev.release = ide_disk_release;
+ dev_set_name(&idkp->dev, dev_name(&drive->gendev));
+
+ if (device_register(&idkp->dev))
+ goto out_free_disk;
+
+ idkp->drive = drive;
+ idkp->driver = &ide_gd_driver;
+ idkp->disk = g;
+
+ g->private_data = &idkp->driver;
+
+ drive->driver_data = idkp;
+ drive->debug_mask = debug_mask;
+ drive->disk_ops = disk_ops;
+
+ disk_ops->setup(drive);
+
+ set_capacity(g, ide_gd_capacity(drive));
+
+ g->minors = IDE_DISK_MINORS;
+ g->driverfs_dev = &drive->gendev;
+ g->flags |= GENHD_FL_EXT_DEVT;
+ if (drive->dev_flags & IDE_DFLAG_REMOVABLE)
+ g->flags = GENHD_FL_REMOVABLE;
+ g->fops = &ide_gd_ops;
+ add_disk(g);
+ return 0;
+
+out_free_disk:
+ put_disk(g);
+out_free_idkp:
+ kfree(idkp);
+failed:
+ return -ENODEV;
+}
+
+static int __init ide_gd_init(void)
+{
+ printk(KERN_INFO DRV_NAME " driver " IDE_GD_VERSION "\n");
+ return driver_register(&ide_gd_driver.gen_driver);
+}
+
+static void __exit ide_gd_exit(void)
+{
+ driver_unregister(&ide_gd_driver.gen_driver);
+}
+
+MODULE_ALIAS("ide:*m-disk*");
+MODULE_ALIAS("ide-disk");
+MODULE_ALIAS("ide:*m-floppy*");
+MODULE_ALIAS("ide-floppy");
+module_init(ide_gd_init);
+module_exit(ide_gd_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("generic ATA/ATAPI disk driver");
diff --git a/windhoek/ide/ide-gd.h b/windhoek/ide/ide-gd.h
new file mode 100644
index 00000000..b604bdd3
--- /dev/null
+++ b/windhoek/ide/ide-gd.h
@@ -0,0 +1,44 @@
+#ifndef __IDE_GD_H
+#define __IDE_GD_H
+
+#define DRV_NAME "ide-gd"
+#define PFX DRV_NAME ": "
+
+/* define to see debug info */
+#define IDE_GD_DEBUG_LOG 0
+
+#if IDE_GD_DEBUG_LOG
+#define ide_debug_log(lvl, fmt, args...) __ide_debug_log(lvl, fmt, args)
+#else
+#define ide_debug_log(lvl, fmt, args...) do {} while (0)
+#endif
+
+struct ide_disk_obj {
+ ide_drive_t *drive;
+ struct ide_driver *driver;
+ struct gendisk *disk;
+ struct device dev;
+ unsigned int openers; /* protected by BKL for now */
+
+ /* Last failed packet command */
+ struct ide_atapi_pc *failed_pc;
+ /* used for blk_{fs,pc}_request() requests */
+ struct ide_atapi_pc queued_pc;
+
+ /* Last error information */
+ u8 sense_key, asc, ascq;
+
+ int progress_indication;
+
+ /* Device information */
+ /* Current format */
+ int blocks, block_size, bs_factor;
+ /* Last format capacity descriptor */
+ u8 cap_desc[8];
+ /* Copy of the flexible disk page */
+ u8 flexible_disk_page[32];
+};
+
+sector_t ide_gd_capacity(ide_drive_t *);
+
+#endif /* __IDE_GD_H */
diff --git a/windhoek/ide/ide-generic.c b/windhoek/ide/ide-generic.c
new file mode 100644
index 00000000..d9636b67
--- /dev/null
+++ b/windhoek/ide/ide-generic.c
@@ -0,0 +1,206 @@
+/*
+ * generic/default IDE host driver
+ *
+ * Copyright (C) 2004, 2008 Bartlomiej Zolnierkiewicz
+ * This code was split off from ide.c. See it for original copyrights.
+ *
+ * May be copied or modified under the terms of the GNU General Public License.
+ */
+
+/*
+ * For special cases new interfaces may be added using sysfs, i.e.
+ *
+ * echo -n "0x168:0x36e:10" > /sys/class/ide_generic/add
+ *
+ * will add an interface using I/O ports 0x168-0x16f/0x36e and IRQ 10.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ide.h>
+#include <linux/pci_ids.h>
+
+/* FIXME: convert m32r to use ide_platform host driver */
+#ifdef CONFIG_M32R
+#include <asm/m32r.h>
+#endif
+
+#include "local.h"
+
+#define DRV_NAME "ide_generic"
+
+static int probe_mask;
+module_param(probe_mask, int, 0);
+MODULE_PARM_DESC(probe_mask, "probe mask for legacy ISA IDE ports");
+
+static ssize_t store_add(struct class *cls, const char *buf, size_t n)
+{
+ unsigned int base, ctl;
+ int irq, rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+
+ if (sscanf(buf, "%x:%x:%d", &base, &ctl, &irq) != 3)
+ return -EINVAL;
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, base, ctl);
+ hw.irq = irq;
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, NULL);
+ if (rc)
+ return rc;
+
+ return n;
+};
+
+static struct class_attribute ide_generic_class_attrs[] = {
+ __ATTR(add, S_IWUSR, NULL, store_add),
+ __ATTR_NULL
+};
+
+static void ide_generic_class_release(struct class *cls)
+{
+ kfree(cls);
+}
+
+static int __init ide_generic_sysfs_init(void)
+{
+ struct class *cls;
+ int rc;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls)
+ return -ENOMEM;
+
+ cls->name = DRV_NAME;
+ cls->owner = THIS_MODULE;
+ cls->class_release = ide_generic_class_release;
+ cls->class_attrs = ide_generic_class_attrs;
+
+ rc = class_register(cls);
+ if (rc) {
+ kfree(cls);
+ return rc;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) \
+ || defined(CONFIG_PLAT_OPSPUT)
+static const u16 legacy_bases[] = { 0x1f0 };
+static const int legacy_irqs[] = { PLD_IRQ_CFIREQ };
+#elif defined(CONFIG_PLAT_MAPPI3)
+static const u16 legacy_bases[] = { 0x1f0, 0x170 };
+static const int legacy_irqs[] = { PLD_IRQ_CFIREQ, PLD_IRQ_IDEIREQ };
+#elif defined(CONFIG_ALPHA)
+static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168 };
+static const int legacy_irqs[] = { 14, 15, 11, 10 };
+#else
+static const u16 legacy_bases[] = { 0x1f0, 0x170, 0x1e8, 0x168, 0x1e0, 0x160 };
+static const int legacy_irqs[] = { 14, 15, 11, 10, 8, 12 };
+#endif
+
+static void ide_generic_check_pci_legacy_iobases(int *primary, int *secondary)
+{
+ struct pci_dev *p = NULL;
+ u16 val;
+
+ for_each_pci_dev(p) {
+
+ if (pci_resource_start(p, 0) == 0x1f0)
+ *primary = 1;
+ if (pci_resource_start(p, 2) == 0x170)
+ *secondary = 1;
+
+ /* Cyrix CS55{1,2}0 pre SFF MWDMA ATA on the bridge */
+ if (p->vendor == PCI_VENDOR_ID_CYRIX &&
+ (p->device == PCI_DEVICE_ID_CYRIX_5510 ||
+ p->device == PCI_DEVICE_ID_CYRIX_5520))
+ *primary = *secondary = 1;
+
+ /* Intel MPIIX - PIO ATA on non PCI side of bridge */
+ if (p->vendor == PCI_VENDOR_ID_INTEL &&
+ p->device == PCI_DEVICE_ID_INTEL_82371MX) {
+
+ pci_read_config_word(p, 0x6C, &val);
+ if (val & 0x8000) {
+ /* ATA port enabled */
+ if (val & 0x4000)
+ *secondary = 1;
+ else
+ *primary = 1;
+ }
+ }
+ }
+}
+
+static int __init ide_generic_init(void)
+{
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+ unsigned long io_addr;
+ int i, rc = 0, primary = 0, secondary = 0;
+
+ ide_generic_check_pci_legacy_iobases(&primary, &secondary);
+
+ if (!probe_mask) {
+ printk(KERN_INFO DRV_NAME ": please use \"probe_mask=0x3f\" "
+ "module parameter for probing all legacy ISA IDE ports\n");
+
+ if (primary == 0)
+ probe_mask |= 0x1;
+
+ if (secondary == 0)
+ probe_mask |= 0x2;
+ } else
+ printk(KERN_INFO DRV_NAME ": enforcing probing of I/O ports "
+ "upon user request\n");
+
+ for (i = 0; i < ARRAY_SIZE(legacy_bases); i++) {
+ io_addr = legacy_bases[i];
+
+ if ((probe_mask & (1 << i)) && io_addr) {
+ if (!request_region(io_addr, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr, io_addr + 7);
+ continue;
+ }
+
+ if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX "
+ "not free.\n",
+ DRV_NAME, io_addr + 0x206);
+ release_region(io_addr, 8);
+ continue;
+ }
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, io_addr, io_addr + 0x206);
+#ifdef CONFIG_IA64
+ hw.irq = isa_irq_to_vector(legacy_irqs[i]);
+#else
+ hw.irq = legacy_irqs[i];
+#endif
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, NULL);
+ if (rc) {
+ release_region(io_addr + 0x206, 1);
+ release_region(io_addr, 8);
+ }
+ }
+ }
+
+ if (ide_generic_sysfs_init())
+ printk(KERN_ERR DRV_NAME ": failed to create ide_generic "
+ "class\n");
+
+ return rc;
+}
+
+module_init(ide_generic_init);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-io.c b/windhoek/ide/ide-io.c
new file mode 100644
index 00000000..a9a6c208
--- /dev/null
+++ b/windhoek/ide/ide-io.c
@@ -0,0 +1,1228 @@
+/*
+ * IDE I/O functions
+ *
+ * Basic PIO and command management functionality.
+ *
+ * This code was split off from ide.c. See ide.c for history and original
+ * copyrights.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ */
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/reboot.h>
+#include <linux/cdrom.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/kmod.h>
+#include <linux/scatterlist.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+static int __ide_end_request(ide_drive_t *drive, struct request *rq,
+ int uptodate, unsigned int nr_bytes, int dequeue)
+{
+ int ret = 1;
+ int error = 0;
+
+ if (uptodate <= 0)
+ error = uptodate ? uptodate : -EIO;
+
+ /*
+ * if failfast is set on a request, override number of sectors and
+ * complete the whole request right now
+ */
+ if (blk_noretry_request(rq) && error)
+ nr_bytes = rq->hard_nr_sectors << 9;
+
+ if (!blk_fs_request(rq) && error && !rq->errors)
+ rq->errors = -EIO;
+
+ /*
+ * decide whether to reenable DMA -- 3 is a random magic for now,
+ * if we DMA timeout more than 3 times, just stay in PIO
+ */
+ if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) &&
+ drive->retry_pio <= 3) {
+ drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY;
+ ide_dma_on(drive);
+ }
+
+ if (!blk_end_request(rq, error, nr_bytes))
+ ret = 0;
+
+ if (ret == 0 && dequeue)
+ drive->hwif->rq = NULL;
+
+ return ret;
+}
+
+/**
+ * ide_end_request - complete an IDE I/O
+ * @drive: IDE device for the I/O
+ * @uptodate:
+ * @nr_sectors: number of sectors completed
+ *
+ * This is our end_request wrapper function. We complete the I/O
+ * update random number input and dequeue the request, which if
+ * it was tagged may be out of order.
+ */
+
+int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors)
+{
+ unsigned int nr_bytes = nr_sectors << 9;
+ struct request *rq = drive->hwif->rq;
+
+ if (!nr_bytes) {
+ if (blk_pc_request(rq))
+ nr_bytes = rq->data_len;
+ else
+ nr_bytes = rq->hard_cur_sectors << 9;
+ }
+
+ return __ide_end_request(drive, rq, uptodate, nr_bytes, 1);
+}
+EXPORT_SYMBOL(ide_end_request);
+
+/**
+ * ide_end_dequeued_request - complete an IDE I/O
+ * @drive: IDE device for the I/O
+ * @uptodate:
+ * @nr_sectors: number of sectors completed
+ *
+ * Complete an I/O that is no longer on the request queue. This
+ * typically occurs when we pull the request and issue a REQUEST_SENSE.
+ * We must still finish the old request but we must not tamper with the
+ * queue in the meantime.
+ *
+ * NOTE: This path does not handle barrier, but barrier is not supported
+ * on ide-cd anyway.
+ */
+
+int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
+ int uptodate, int nr_sectors)
+{
+ BUG_ON(!blk_rq_started(rq));
+
+ return __ide_end_request(drive, rq, uptodate, nr_sectors << 9, 0);
+}
+EXPORT_SYMBOL_GPL(ide_end_dequeued_request);
+
+/**
+ * ide_end_drive_cmd - end an explicit drive command
+ * @drive: command
+ * @stat: status bits
+ * @err: error bits
+ *
+ * Clean up after success/failure of an explicit drive command.
+ * These get thrown onto the queue so they are synchronized with
+ * real I/O operations on the drive.
+ *
+ * In LBA48 mode we have to read the register set twice to get
+ * all the extra information out.
+ */
+
+void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ ide_task_t *task = (ide_task_t *)rq->special;
+
+ if (task) {
+ struct ide_taskfile *tf = &task->tf;
+
+ tf->error = err;
+ tf->status = stat;
+
+ drive->hwif->tp_ops->tf_read(drive, task);
+
+ if (task->tf_flags & IDE_TFLAG_DYN)
+ kfree(task);
+ }
+ } else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->data;
+
+ ide_complete_power_step(drive, rq);
+ if (pm->pm_step == IDE_PM_COMPLETED)
+ ide_complete_pm_request(drive, rq);
+ return;
+ }
+
+ hwif->rq = NULL;
+
+ rq->errors = err;
+
+ if (unlikely(blk_end_request(rq, (rq->errors ? -EIO : 0),
+ blk_rq_bytes(rq))))
+ BUG();
+}
+EXPORT_SYMBOL(ide_end_drive_cmd);
+
+static void ide_kill_rq(ide_drive_t *drive, struct request *rq)
+{
+ if (rq->rq_disk) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+ drv->end_request(drive, 0, 0);
+ } else
+ ide_end_request(drive, 0, 0);
+}
+
+static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else if (stat & ATA_ERR) {
+ /* err has different meaning on cdrom and tape */
+ if (err == ATA_ABORTED) {
+ if ((drive->dev_flags & IDE_DFLAG_LBA) &&
+ /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */
+ hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS)
+ return ide_stopped;
+ } else if ((err & BAD_CRC) == BAD_CRC) {
+ /* UDMA crc error, just retry the operation */
+ drive->crc_count++;
+ } else if (err & (ATA_BBK | ATA_UNC)) {
+ /* retries won't help these */
+ rq->errors = ERROR_MAX;
+ } else if (err & ATA_TRK0NF) {
+ /* help it find track zero */
+ rq->errors |= ERROR_RECAL;
+ }
+ }
+
+ if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ &&
+ (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) {
+ int nsect = drive->mult_count ? drive->mult_count : 1;
+
+ ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE);
+ }
+
+ if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) {
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ rq->errors |= ERROR_RESET;
+
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+
+ if ((rq->errors & ERROR_RECAL) == ERROR_RECAL)
+ drive->special.b.recalibrate = 1;
+
+ ++rq->errors;
+
+ return ide_stopped;
+}
+
+static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if ((stat & ATA_BUSY) ||
+ ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) {
+ /* other bits are useless when BUSY */
+ rq->errors |= ERROR_RESET;
+ } else {
+ /* add decoding error stuff */
+ }
+
+ if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ))
+ /* force an abort */
+ hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE);
+
+ if (rq->errors >= ERROR_MAX) {
+ ide_kill_rq(drive, rq);
+ } else {
+ if ((rq->errors & ERROR_RESET) == ERROR_RESET) {
+ ++rq->errors;
+ return ide_do_reset(drive);
+ }
+ ++rq->errors;
+ }
+
+ return ide_stopped;
+}
+
+static ide_startstop_t
+__ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
+{
+ if (drive->media == ide_disk)
+ return ide_ata_error(drive, rq, stat, err);
+ return ide_atapi_error(drive, rq, stat, err);
+}
+
+/**
+ * ide_error - handle an error on the IDE
+ * @drive: drive the error occurred on
+ * @msg: message to report
+ * @stat: status bits
+ *
+ * ide_error() takes action based on the error returned by the drive.
+ * For normal I/O that may well include retries. We deal with
+ * both new-style (taskfile) and old style command handling here.
+ * In the case of taskfile command handling there is work left to
+ * do
+ */
+
+ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat)
+{
+ struct request *rq;
+ u8 err;
+
+ err = ide_dump_status(drive, msg, stat);
+
+ rq = drive->hwif->rq;
+ if (rq == NULL)
+ return ide_stopped;
+
+ /* retry only "normal" I/O: */
+ if (!blk_fs_request(rq)) {
+ rq->errors = 1;
+ ide_end_drive_cmd(drive, stat, err);
+ return ide_stopped;
+ }
+
+ return __ide_error(drive, rq, stat, err);
+}
+EXPORT_SYMBOL_GPL(ide_error);
+
+static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->sect;
+ tf->lbal = drive->sect;
+ tf->lbam = drive->cyl;
+ tf->lbah = drive->cyl >> 8;
+ tf->device = (drive->head - 1) | drive->select;
+ tf->command = ATA_CMD_INIT_DEV_PARAMS;
+}
+
+static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->sect;
+ tf->command = ATA_CMD_RESTORE;
+}
+
+static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf)
+{
+ tf->nsect = drive->mult_req;
+ tf->command = ATA_CMD_SET_MULTI;
+}
+
+static ide_startstop_t ide_disk_special(ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ args.data_phase = TASKFILE_NO_DATA;
+
+ if (s->b.set_geometry) {
+ s->b.set_geometry = 0;
+ ide_tf_set_specify_cmd(drive, &args.tf);
+ } else if (s->b.recalibrate) {
+ s->b.recalibrate = 0;
+ ide_tf_set_restore_cmd(drive, &args.tf);
+ } else if (s->b.set_multmode) {
+ s->b.set_multmode = 0;
+ ide_tf_set_setmult_cmd(drive, &args.tf);
+ } else if (s->all) {
+ int special = s->all;
+ s->all = 0;
+ printk(KERN_ERR "%s: bad special flag: 0x%02x\n", drive->name, special);
+ return ide_stopped;
+ }
+
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE |
+ IDE_TFLAG_CUSTOM_HANDLER;
+
+ do_rw_taskfile(drive, &args);
+
+ return ide_started;
+}
+
+/**
+ * do_special - issue some special commands
+ * @drive: drive the command is for
+ *
+ * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS,
+ * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive.
+ *
+ * It used to do much more, but has been scaled back.
+ */
+
+static ide_startstop_t do_special (ide_drive_t *drive)
+{
+ special_t *s = &drive->special;
+
+#ifdef DEBUG
+ printk("%s: do_special: 0x%02x\n", drive->name, s->all);
+#endif
+ if (drive->media == ide_disk)
+ return ide_disk_special(drive);
+
+ s->all = 0;
+ drive->mult_req = 0;
+ return ide_stopped;
+}
+
+void ide_map_sg(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
+ hwif->sg_nents = 1;
+ } else if (!rq->bio) {
+ sg_init_one(sg, rq->data, rq->data_len);
+ hwif->sg_nents = 1;
+ } else {
+ hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
+ }
+}
+
+EXPORT_SYMBOL_GPL(ide_map_sg);
+
+void ide_init_sg_cmd(ide_drive_t *drive, struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ hwif->nsect = hwif->nleft = rq->nr_sectors;
+ hwif->cursg_ofs = 0;
+ hwif->cursg = NULL;
+}
+
+EXPORT_SYMBOL_GPL(ide_init_sg_cmd);
+
+/**
+ * execute_drive_command - issue special drive command
+ * @drive: the drive to issue the command on
+ * @rq: the request structure holding the command
+ *
+ * execute_drive_cmd() issues a special drive command, usually
+ * initiated by ioctl() from the external hdparm program. The
+ * command can be a drive command, drive task or taskfile
+ * operation. Weirdly you can call it with NULL to wait for
+ * all commands to finish. Don't do this as that is due to change
+ */
+
+static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
+ struct request *rq)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t *task = rq->special;
+
+ if (task) {
+ hwif->data_phase = task->data_phase;
+
+ switch (hwif->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ case TASKFILE_OUT:
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_IN:
+ ide_init_sg_cmd(drive, rq);
+ ide_map_sg(drive, rq);
+ default:
+ break;
+ }
+
+ return do_rw_taskfile(drive, task);
+ }
+
+ /*
+ * NULL is actually a valid way of waiting for
+ * all current requests to be flushed from the queue.
+ */
+#ifdef DEBUG
+ printk("%s: DRIVE_CMD (null)\n", drive->name);
+#endif
+ ide_end_drive_cmd(drive, hwif->tp_ops->read_status(hwif),
+ ide_read_error(drive));
+
+ return ide_stopped;
+}
+
+int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting,
+ int arg)
+{
+ struct request_queue *q = drive->queue;
+ struct request *rq;
+ int ret = 0;
+
+ if (!(setting->flags & DS_SYNC))
+ return setting->set(drive, arg);
+
+ rq = blk_get_request(q, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ rq->cmd_len = 5;
+ rq->cmd[0] = REQ_DEVSET_EXEC;
+ *(int *)&rq->cmd[1] = arg;
+ rq->special = setting->set;
+
+ if (blk_execute_rq(q, NULL, rq, 0))
+ ret = rq->errors;
+ blk_put_request(rq);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_devset_execute);
+
+static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq)
+{
+ u8 cmd = rq->cmd[0];
+
+ if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) {
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+
+ memset(&task, 0, sizeof(task));
+ if (cmd == REQ_PARK_HEADS) {
+ drive->sleep = *(unsigned long *)rq->special;
+ drive->dev_flags |= IDE_DFLAG_SLEEPING;
+ tf->command = ATA_CMD_IDLEIMMEDIATE;
+ tf->feature = 0x44;
+ tf->lbal = 0x4c;
+ tf->lbam = 0x4e;
+ tf->lbah = 0x55;
+ task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER;
+ } else /* cmd == REQ_UNPARK_HEADS */
+ tf->command = ATA_CMD_CHK_POWER;
+
+ task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ task.rq = rq;
+ drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA;
+ return do_rw_taskfile(drive, &task);
+ }
+
+ switch (cmd) {
+ case REQ_DEVSET_EXEC:
+ {
+ int err, (*setfunc)(ide_drive_t *, int) = rq->special;
+
+ err = setfunc(drive, *(int *)&rq->cmd[1]);
+ if (err)
+ rq->errors = err;
+ else
+ err = 1;
+ ide_end_request(drive, err, 0);
+ return ide_stopped;
+ }
+ case REQ_DRIVE_RESET:
+ return ide_do_reset(drive);
+ default:
+ blk_dump_rq_flags(rq, "ide_special_rq - bad request");
+ ide_end_request(drive, 0, 0);
+ return ide_stopped;
+ }
+}
+
+/**
+ * start_request - start of I/O and command issuing for IDE
+ *
+ * start_request() initiates handling of a new I/O request. It
+ * accepts commands and I/O (read/write) requests.
+ *
+ * FIXME: this function needs a rename
+ */
+
+static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
+{
+ ide_startstop_t startstop;
+
+ BUG_ON(!blk_rq_started(rq));
+
+#ifdef DEBUG
+ printk("%s: start_request: current=0x%08lx\n",
+ drive->hwif->name, (unsigned long) rq);
+#endif
+
+ /* bail early if we've exceeded max_failures */
+ if (drive->max_failures && (drive->failures > drive->max_failures)) {
+ rq->cmd_flags |= REQ_FAILED;
+ goto kill_rq;
+ }
+
+ if (blk_pm_request(rq))
+ ide_check_pm_state(drive, rq);
+
+ SELECT_DRIVE(drive);
+ if (ide_wait_stat(&startstop, drive, drive->ready_stat,
+ ATA_BUSY | ATA_DRQ, WAIT_READY)) {
+ printk(KERN_ERR "%s: drive not ready for command\n", drive->name);
+ return startstop;
+ }
+ if (!drive->special.all) {
+ struct ide_driver *drv;
+
+ /*
+ * We reset the drive so we need to issue a SETFEATURES.
+ * Do it _after_ do_special() restored device parameters.
+ */
+ if (drive->current_speed == 0xff)
+ ide_config_drive_speed(drive, drive->desired_speed);
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ return execute_drive_cmd(drive, rq);
+ else if (blk_pm_request(rq)) {
+ struct request_pm_state *pm = rq->data;
+#ifdef DEBUG_PM
+ printk("%s: start_power_step(step: %d)\n",
+ drive->name, pm->pm_step);
+#endif
+ startstop = ide_start_power_step(drive, rq);
+ if (startstop == ide_stopped &&
+ pm->pm_step == IDE_PM_COMPLETED)
+ ide_complete_pm_request(drive, rq);
+ return startstop;
+ } else if (!rq->rq_disk && blk_special_request(rq))
+ /*
+ * TODO: Once all ULDs have been modified to
+ * check for specific op codes rather than
+ * blindly accepting any special request, the
+ * check for ->rq_disk above may be replaced
+ * by a more suitable mechanism or even
+ * dropped entirely.
+ */
+ return ide_special_rq(drive, rq);
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+
+ return drv->do_request(drive, rq, rq->sector);
+ }
+ return do_special(drive);
+kill_rq:
+ ide_kill_rq(drive, rq);
+ return ide_stopped;
+}
+
+/**
+ * ide_stall_queue - pause an IDE device
+ * @drive: drive to stall
+ * @timeout: time to stall for (jiffies)
+ *
+ * ide_stall_queue() can be used by a drive to give excess bandwidth back
+ * to the port by sleeping for timeout jiffies.
+ */
+
+void ide_stall_queue (ide_drive_t *drive, unsigned long timeout)
+{
+ if (timeout > WAIT_WORSTCASE)
+ timeout = WAIT_WORSTCASE;
+ drive->sleep = timeout + jiffies;
+ drive->dev_flags |= IDE_DFLAG_SLEEPING;
+}
+EXPORT_SYMBOL(ide_stall_queue);
+
+static inline int ide_lock_port(ide_hwif_t *hwif)
+{
+ if (hwif->busy)
+ return 1;
+
+ hwif->busy = 1;
+
+ return 0;
+}
+
+static inline void ide_unlock_port(ide_hwif_t *hwif)
+{
+ hwif->busy = 0;
+}
+
+static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif)
+{
+ int rc = 0;
+
+ if (host->host_flags & IDE_HFLAG_SERIALIZE) {
+ rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy);
+ if (rc == 0) {
+ /* for atari only */
+ ide_get_lock(ide_intr, hwif);
+ }
+ }
+ return rc;
+}
+
+static inline void ide_unlock_host(struct ide_host *host)
+{
+ if (host->host_flags & IDE_HFLAG_SERIALIZE) {
+ /* for atari only */
+ ide_release_lock();
+ clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy);
+ }
+}
+
+/*
+ * Issue a new request to a device.
+ */
+void do_ide_request(struct request_queue *q)
+{
+ ide_drive_t *drive = q->queuedata;
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_host *host = hwif->host;
+ struct request *rq = NULL;
+ ide_startstop_t startstop;
+
+ /*
+ * drive is doing pre-flush, ordered write, post-flush sequence. even
+ * though that is 3 requests, it must be seen as a single transaction.
+ * we must not preempt this drive until that is complete
+ */
+ if (blk_queue_flushing(q))
+ /*
+ * small race where queue could get replugged during
+ * the 3-request flush cycle, just yank the plug since
+ * we want it to finish asap
+ */
+ blk_remove_plug(q);
+
+ spin_unlock_irq(q->queue_lock);
+
+ if (ide_lock_host(host, hwif))
+ goto plug_device_2;
+
+ spin_lock_irq(&hwif->lock);
+
+ if (!ide_lock_port(hwif)) {
+ ide_hwif_t *prev_port;
+repeat:
+ prev_port = hwif->host->cur_port;
+ hwif->rq = NULL;
+
+ if (drive->dev_flags & IDE_DFLAG_SLEEPING) {
+ if (time_before(drive->sleep, jiffies)) {
+ ide_unlock_port(hwif);
+ goto plug_device;
+ }
+ }
+
+ if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) &&
+ hwif != prev_port) {
+ /*
+ * set nIEN for previous port, drives in the
+ * quirk_list may not like intr setups/cleanups
+ */
+ if (prev_port && prev_port->cur_dev->quirk_list == 0)
+ prev_port->tp_ops->set_irq(prev_port, 0);
+
+ hwif->host->cur_port = hwif;
+ }
+ hwif->cur_dev = drive;
+ drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
+
+ spin_unlock_irq(&hwif->lock);
+ spin_lock_irq(q->queue_lock);
+ /*
+ * we know that the queue isn't empty, but this can happen
+ * if the q->prep_rq_fn() decides to kill a request
+ */
+ rq = elv_next_request(drive->queue);
+ spin_unlock_irq(q->queue_lock);
+ spin_lock_irq(&hwif->lock);
+
+ if (!rq) {
+ ide_unlock_port(hwif);
+ goto out;
+ }
+
+ /*
+ * Sanity: don't accept a request that isn't a PM request
+ * if we are currently power managed. This is very important as
+ * blk_stop_queue() doesn't prevent the elv_next_request()
+ * above to return us whatever is in the queue. Since we call
+ * ide_do_request() ourselves, we end up taking requests while
+ * the queue is blocked...
+ *
+ * We let requests forced at head of queue with ide-preempt
+ * though. I hope that doesn't happen too much, hopefully not
+ * unless the subdriver triggers such a thing in its own PM
+ * state machine.
+ */
+ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
+ blk_pm_request(rq) == 0 &&
+ (rq->cmd_flags & REQ_PREEMPT) == 0) {
+ /* there should be no pending command at this point */
+ ide_unlock_port(hwif);
+ goto plug_device;
+ }
+
+ hwif->rq = rq;
+
+ spin_unlock_irq(&hwif->lock);
+ startstop = start_request(drive, rq);
+ spin_lock_irq(&hwif->lock);
+
+ if (startstop == ide_stopped)
+ goto repeat;
+ } else
+ goto plug_device;
+out:
+ spin_unlock_irq(&hwif->lock);
+ if (rq == NULL)
+ ide_unlock_host(host);
+ spin_lock_irq(q->queue_lock);
+ return;
+
+plug_device:
+ spin_unlock_irq(&hwif->lock);
+ ide_unlock_host(host);
+plug_device_2:
+ spin_lock_irq(q->queue_lock);
+
+ if (!elv_queue_empty(q))
+ blk_plug_device(q);
+}
+
+/*
+ * un-busy the port etc, and clear any pending DMA status. we want to
+ * retry the current request in pio mode instead of risking tossing it
+ * all away
+ */
+static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ ide_startstop_t ret = ide_stopped;
+
+ /*
+ * end current dma transaction
+ */
+
+ if (error < 0) {
+ printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
+ (void)hwif->dma_ops->dma_end(drive);
+ ret = ide_error(drive, "dma timeout error",
+ hwif->tp_ops->read_status(hwif));
+ } else {
+ printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
+ hwif->dma_ops->dma_timeout(drive);
+ }
+
+ /*
+ * disable dma for now, but remember that we did so because of
+ * a timeout -- we'll reenable after we finish this next request
+ * (or rather the first chunk of it) in pio.
+ */
+ drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY;
+ drive->retry_pio++;
+ ide_dma_off_quietly(drive);
+
+ /*
+ * un-busy drive etc and make sure request is sane
+ */
+
+ rq = hwif->rq;
+ if (!rq)
+ goto out;
+
+ hwif->rq = NULL;
+
+ rq->errors = 0;
+
+ if (!rq->bio)
+ goto out;
+
+ rq->sector = rq->bio->bi_sector;
+ rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9;
+ rq->hard_cur_sectors = rq->current_nr_sectors;
+ rq->buffer = bio_data(rq->bio);
+out:
+ return ret;
+}
+
+static void ide_plug_device(ide_drive_t *drive)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (!elv_queue_empty(q))
+ blk_plug_device(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/**
+ * ide_timer_expiry - handle lack of an IDE interrupt
+ * @data: timer callback magic (hwif)
+ *
+ * An IDE command has timed out before the expected drive return
+ * occurred. At this point we attempt to clean up the current
+ * mess. If the current handler includes an expiry handler then
+ * we invoke the expiry handler, and providing it is happy the
+ * work is done. If that fails we apply generic recovery rules
+ * invoking the handler and checking the drive DMA status. We
+ * have an excessively incestuous relationship with the DMA
+ * logic that wants cleaning up.
+ */
+
+void ide_timer_expiry (unsigned long data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *)data;
+ ide_drive_t *uninitialized_var(drive);
+ ide_handler_t *handler;
+ unsigned long flags;
+ int wait = -1;
+ int plug_device = 0;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ handler = hwif->handler;
+
+ if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) {
+ /*
+ * Either a marginal timeout occurred
+ * (got the interrupt just as timer expired),
+ * or we were "sleeping" to give other devices a chance.
+ * Either way, we don't really want to complain about anything.
+ */
+ } else {
+ ide_expiry_t *expiry = hwif->expiry;
+ ide_startstop_t startstop = ide_stopped;
+
+ drive = hwif->cur_dev;
+
+ if (expiry) {
+ wait = expiry(drive);
+ if (wait > 0) { /* continue */
+ /* reset timer */
+ hwif->timer.expires = jiffies + wait;
+ hwif->req_gen_timer = hwif->req_gen;
+ add_timer(&hwif->timer);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return;
+ }
+ }
+ hwif->handler = NULL;
+ /*
+ * We need to simulate a real interrupt when invoking
+ * the handler() function, which means we need to
+ * globally mask the specific IRQ:
+ */
+ spin_unlock(&hwif->lock);
+ /* disable_irq_nosync ?? */
+ disable_irq(hwif->irq);
+ /* local CPU only, as if we were handling an interrupt */
+ local_irq_disable();
+ if (hwif->polling) {
+ startstop = handler(drive);
+ } else if (drive_is_ready(drive)) {
+ if (drive->waiting_for_dma)
+ hwif->dma_ops->dma_lost_irq(drive);
+ (void)ide_ack_intr(hwif);
+ printk(KERN_WARNING "%s: lost interrupt\n",
+ drive->name);
+ startstop = handler(drive);
+ } else {
+ if (drive->waiting_for_dma)
+ startstop = ide_dma_timeout_retry(drive, wait);
+ else
+ startstop = ide_error(drive, "irq timeout",
+ hwif->tp_ops->read_status(hwif));
+ }
+ spin_lock_irq(&hwif->lock);
+ enable_irq(hwif->irq);
+ if (startstop == ide_stopped) {
+ ide_unlock_port(hwif);
+ plug_device = 1;
+ }
+ }
+ spin_unlock_irqrestore(&hwif->lock, flags);
+
+ if (plug_device) {
+ ide_unlock_host(hwif->host);
+ ide_plug_device(drive);
+ }
+}
+
+/**
+ * unexpected_intr - handle an unexpected IDE interrupt
+ * @irq: interrupt line
+ * @hwif: port being processed
+ *
+ * There's nothing really useful we can do with an unexpected interrupt,
+ * other than reading the status register (to clear it), and logging it.
+ * There should be no way that an irq can happen before we're ready for it,
+ * so we needn't worry much about losing an "important" interrupt here.
+ *
+ * On laptops (and "green" PCs), an unexpected interrupt occurs whenever
+ * the drive enters "idle", "standby", or "sleep" mode, so if the status
+ * looks "good", we just ignore the interrupt completely.
+ *
+ * This routine assumes __cli() is in effect when called.
+ *
+ * If an unexpected interrupt happens on irq15 while we are handling irq14
+ * and if the two interfaces are "serialized" (CMD640), then it looks like
+ * we could screw up by interfering with a new request being set up for
+ * irq15.
+ *
+ * In reality, this is a non-issue. The new command is not sent unless
+ * the drive is ready to accept one, in which case we know the drive is
+ * not trying to interrupt us. And ide_set_handler() is always invoked
+ * before completing the issuance of any new drive command, so we will not
+ * be accidentally invoked as a result of any valid command completion
+ * interrupt.
+ */
+
+static void unexpected_intr(int irq, ide_hwif_t *hwif)
+{
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
+ /* Try to not flood the console with msgs */
+ static unsigned long last_msgtime, count;
+ ++count;
+
+ if (time_after(jiffies, last_msgtime + HZ)) {
+ last_msgtime = jiffies;
+ printk(KERN_ERR "%s: unexpected interrupt, "
+ "status=0x%02x, count=%ld\n",
+ hwif->name, stat, count);
+ }
+ }
+}
+
+/**
+ * ide_intr - default IDE interrupt handler
+ * @irq: interrupt number
+ * @dev_id: hwif
+ * @regs: unused weirdness from the kernel irq layer
+ *
+ * This is the default IRQ handler for the IDE layer. You should
+ * not need to override it. If you do be aware it is subtle in
+ * places
+ *
+ * hwif is the interface in the group currently performing
+ * a command. hwif->cur_dev is the drive and hwif->handler is
+ * the IRQ handler to call. As we issue a command the handlers
+ * step through multiple states, reassigning the handler to the
+ * next step in the process. Unlike a smart SCSI controller IDE
+ * expects the main processor to sequence the various transfer
+ * stages. We also manage a poll timer to catch up with most
+ * timeout situations. There are still a few where the handlers
+ * don't ever decide to give up.
+ *
+ * The handler eventually returns ide_stopped to indicate the
+ * request completed. At this point we issue the next request
+ * on the port and the process begins again.
+ */
+
+irqreturn_t ide_intr (int irq, void *dev_id)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *)dev_id;
+ ide_drive_t *uninitialized_var(drive);
+ ide_handler_t *handler;
+ unsigned long flags;
+ ide_startstop_t startstop;
+ irqreturn_t irq_ret = IRQ_NONE;
+ int plug_device = 0;
+
+ if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE) {
+ if (hwif != hwif->host->cur_port)
+ goto out_early;
+ }
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ if (!ide_ack_intr(hwif))
+ goto out;
+
+ handler = hwif->handler;
+
+ if (handler == NULL || hwif->polling) {
+ /*
+ * Not expecting an interrupt from this drive.
+ * That means this could be:
+ * (1) an interrupt from another PCI device
+ * sharing the same PCI INT# as us.
+ * or (2) a drive just entered sleep or standby mode,
+ * and is interrupting to let us know.
+ * or (3) a spurious interrupt of unknown origin.
+ *
+ * For PCI, we cannot tell the difference,
+ * so in that case we just ignore it and hope it goes away.
+ *
+ * FIXME: unexpected_intr should be hwif-> then we can
+ * remove all the ifdef PCI crap
+ */
+#ifdef CONFIG_BLK_DEV_IDEPCI
+ if (hwif->chipset != ide_pci)
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+ {
+ /*
+ * Probably not a shared PCI interrupt,
+ * so we can safely try to do something about it:
+ */
+ unexpected_intr(irq, hwif);
+#ifdef CONFIG_BLK_DEV_IDEPCI
+ } else {
+ /*
+ * Whack the status register, just in case
+ * we have a leftover pending IRQ.
+ */
+ (void)hwif->tp_ops->read_status(hwif);
+#endif /* CONFIG_BLK_DEV_IDEPCI */
+ }
+ goto out;
+ }
+
+ drive = hwif->cur_dev;
+
+ if (!drive_is_ready(drive))
+ /*
+ * This happens regularly when we share a PCI IRQ with
+ * another device. Unfortunately, it can also happen
+ * with some buggy drives that trigger the IRQ before
+ * their status register is up to date. Hopefully we have
+ * enough advance overhead that the latter isn't a problem.
+ */
+ goto out;
+
+ hwif->handler = NULL;
+ hwif->req_gen++;
+ del_timer(&hwif->timer);
+ spin_unlock(&hwif->lock);
+
+ if (hwif->port_ops && hwif->port_ops->clear_irq)
+ hwif->port_ops->clear_irq(drive);
+
+ if (drive->dev_flags & IDE_DFLAG_UNMASK)
+ local_irq_enable_in_hardirq();
+
+ /* service this interrupt, may set handler for next interrupt */
+ startstop = handler(drive);
+
+ spin_lock_irq(&hwif->lock);
+ /*
+ * Note that handler() may have set things up for another
+ * interrupt to occur soon, but it cannot happen until
+ * we exit from this routine, because it will be the
+ * same irq as is currently being serviced here, and Linux
+ * won't allow another of the same (on any CPU) until we return.
+ */
+ if (startstop == ide_stopped) {
+ BUG_ON(hwif->handler);
+ ide_unlock_port(hwif);
+ plug_device = 1;
+ }
+ irq_ret = IRQ_HANDLED;
+out:
+ spin_unlock_irqrestore(&hwif->lock, flags);
+out_early:
+ if (plug_device) {
+ ide_unlock_host(hwif->host);
+ ide_plug_device(drive);
+ }
+
+ return irq_ret;
+}
+EXPORT_SYMBOL_GPL(ide_intr);
+
+/**
+ * ide_do_drive_cmd - issue IDE special command
+ * @drive: device to issue command
+ * @rq: request to issue
+ *
+ * This function issues a special IDE device request
+ * onto the request queue.
+ *
+ * the rq is queued at the head of the request queue, displacing
+ * the currently-being-processed request and this function
+ * returns immediately without waiting for the new rq to be
+ * completed. This is VERY DANGEROUS, and is intended for
+ * careful use by the ATAPI tape/cdrom driver code.
+ */
+
+void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+ drive->hwif->rq = NULL;
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(ide_do_drive_cmd);
+
+void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM |
+ IDE_TFLAG_OUT_FEATURE | tf_flags;
+ task.tf.feature = dma; /* Use PIO/DMA */
+ task.tf.lbam = bcount & 0xff;
+ task.tf.lbah = (bcount >> 8) & 0xff;
+
+ ide_tf_dump(drive->name, &task.tf);
+ hwif->tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ hwif->tp_ops->tf_load(drive, &task);
+}
+
+EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+
+void ide_pad_transfer(ide_drive_t *drive, int write, int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 buf[4] = { 0 };
+
+ while (len > 0) {
+ if (write)
+ hwif->tp_ops->output_data(drive, NULL, buf, min(4, len));
+ else
+ hwif->tp_ops->input_data(drive, NULL, buf, min(4, len));
+ len -= 4;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_pad_transfer);
diff --git a/windhoek/ide/ide-iops.c b/windhoek/ide/ide-iops.c
new file mode 100644
index 00000000..56181a53
--- /dev/null
+++ b/windhoek/ide/ide-iops.c
@@ -0,0 +1,1215 @@
+/*
+ * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+#include <linux/nmi.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+/*
+ * Conventional PIO operations for ATA devices
+ */
+
+static u8 ide_inb (unsigned long port)
+{
+ return (u8) inb(port);
+}
+
+static void ide_outb (u8 val, unsigned long port)
+{
+ outb(val, port);
+}
+
+/*
+ * MMIO operations, typically used for SATA controllers
+ */
+
+static u8 ide_mm_inb (unsigned long port)
+{
+ return (u8) readb((void __iomem *) port);
+}
+
+static void ide_mm_outb (u8 value, unsigned long port)
+{
+ writeb(value, (void __iomem *) port);
+}
+
+void SELECT_DRIVE (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_task_t task;
+
+ if (port_ops && port_ops->selectproc)
+ port_ops->selectproc(drive);
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_DEVICE;
+
+ drive->hwif->tp_ops->tf_load(drive, &task);
+}
+
+void SELECT_MASK(ide_drive_t *drive, int mask)
+{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (port_ops && port_ops->maskproc)
+ port_ops->maskproc(drive, mask);
+}
+
+void ide_exec_command(ide_hwif_t *hwif, u8 cmd)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(cmd, (void __iomem *)hwif->io_ports.command_addr);
+ else
+ outb(cmd, hwif->io_ports.command_addr);
+}
+EXPORT_SYMBOL_GPL(ide_exec_command);
+
+u8 ide_read_status(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.status_addr);
+ else
+ return inb(hwif->io_ports.status_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_status);
+
+u8 ide_read_altstatus(ide_hwif_t *hwif)
+{
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return readb((void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ return inb(hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_read_altstatus);
+
+void ide_set_irq(ide_hwif_t *hwif, int on)
+{
+ u8 ctl = ATA_DEVCTL_OBS;
+
+ if (on == 4) { /* hack for SRST */
+ ctl |= 4;
+ on &= ~4;
+ }
+
+ ctl |= on ? 0 : 2;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr);
+ else
+ outb(ctl, hwif->io_ports.ctl_addr);
+}
+EXPORT_SYMBOL_GPL(ide_set_irq);
+
+void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+ u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+ if (mmio)
+ tf_outb = ide_mm_outb;
+ else
+ tf_outb = ide_outb;
+
+ if (task->tf_flags & IDE_TFLAG_FLAGGED)
+ HIHI = 0xFF;
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+ u16 data = (tf->hob_data << 8) | tf->data;
+
+ if (mmio)
+ writew(data, (void __iomem *)io_ports->data_addr);
+ else
+ outw(data, io_ports->data_addr);
+ }
+
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+ tf_outb(tf->hob_feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+ tf_outb(tf->hob_nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+ tf_outb(tf->hob_lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+ tf_outb(tf->hob_lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+ tf_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+ tf_outb(tf->feature, io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+ tf_outb(tf->nsect, io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+ tf_outb(tf->lbal, io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+ tf_outb(tf->lbam, io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+ tf_outb(tf->lbah, io_ports->lbah_addr);
+
+ if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+ tf_outb((tf->device & HIHI) | drive->select,
+ io_ports->device_addr);
+}
+EXPORT_SYMBOL_GPL(ide_tf_load);
+
+void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ struct ide_taskfile *tf = &task->tf;
+ void (*tf_outb)(u8 addr, unsigned long port);
+ u8 (*tf_inb)(unsigned long port);
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ if (mmio) {
+ tf_outb = ide_mm_outb;
+ tf_inb = ide_mm_inb;
+ } else {
+ tf_outb = ide_outb;
+ tf_inb = ide_inb;
+ }
+
+ if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+ u16 data;
+
+ if (mmio)
+ data = readw((void __iomem *)io_ports->data_addr);
+ else
+ data = inw(io_ports->data_addr);
+
+ tf->data = data & 0xff;
+ tf->hob_data = (data >> 8) & 0xff;
+ }
+
+ /* be sure we're looking at the low order bits */
+ tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
+ tf->feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+ tf->nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+ tf->lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+ tf->lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+ tf->lbah = tf_inb(io_ports->lbah_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+ tf->device = tf_inb(io_ports->device_addr);
+
+ if (task->tf_flags & IDE_TFLAG_LBA48) {
+ tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
+
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+ tf->hob_feature = tf_inb(io_ports->feature_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+ tf->hob_nsect = tf_inb(io_ports->nsect_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+ tf->hob_lbal = tf_inb(io_ports->lbal_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+ tf->hob_lbam = tf_inb(io_ports->lbam_addr);
+ if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+ tf->hob_lbah = tf_inb(io_ports->lbah_addr);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_tf_read);
+
+/*
+ * Some localbus EIDE interfaces require a special access sequence
+ * when using 32-bit I/O instructions to transfer data. We call this
+ * the "vlb_sync" sequence, which consists of three successive reads
+ * of the sector count register location, with interrupts disabled
+ * to ensure that the reads all happen together.
+ */
+static void ata_vlb_sync(unsigned long port)
+{
+ (void)inb(port);
+ (void)inb(port);
+ (void)inb(port);
+}
+
+/*
+ * This is used for most PIO data transfers *from* the IDE interface
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd len is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
+ */
+void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ DEBUG_MSG("drive %p, rq %p, buf %p, len %d", drive, rq, buf, len);
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ DEBUG_MSG("io32bit: %d", io_32bit);
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
+ else
+ insl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ insw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
+ else
+ insw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_input_data);
+
+/*
+ * This is used for most PIO data transfers *to* the IDE interface
+ */
+void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf,
+ unsigned int len)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ unsigned long data_addr = io_ports->data_addr;
+ u8 io_32bit = drive->io_32bit;
+ u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+ len++;
+
+ if (io_32bit) {
+ unsigned long uninitialized_var(flags);
+
+ if ((io_32bit & 2) && !mmio) {
+ local_irq_save(flags);
+ ata_vlb_sync(io_ports->nsect_addr);
+ }
+
+ if (mmio)
+ __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
+ else
+ outsl(data_addr, buf, len / 4);
+
+ if ((io_32bit & 2) && !mmio)
+ local_irq_restore(flags);
+
+ if ((len & 3) >= 2) {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr,
+ (u8 *)buf + (len & ~3), 1);
+ else
+ outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+ }
+ } else {
+ if (mmio)
+ __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
+ else
+ outsw(data_addr, buf, len / 2);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_output_data);
+
+u8 ide_read_error(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_FEATURE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.error;
+}
+EXPORT_SYMBOL_GPL(ide_read_error);
+
+void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM |
+ IDE_TFLAG_IN_NSECT;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ *bcount = (task.tf.lbah << 8) | task.tf.lbam;
+ *ireason = task.tf.nsect & 3;
+}
+EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason);
+
+const struct ide_tp_ops default_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+
+ .set_irq = ide_set_irq,
+
+ .tf_load = ide_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
+void ide_fix_driveid(u16 *id)
+{
+#ifndef __LITTLE_ENDIAN
+# ifdef __BIG_ENDIAN
+ int i;
+
+ for (i = 0; i < 256; i++)
+ id[i] = __le16_to_cpu(id[i]);
+# else
+# error "Please fix <asm/byteorder.h>"
+# endif
+#endif
+}
+
+/*
+ * ide_fixstring() cleans up and (optionally) byte-swaps a text string,
+ * removing leading/trailing blanks and compressing internal blanks.
+ * It is primarily used to tidy up the model name/number fields as
+ * returned by the ATA_CMD_ID_ATA[PI] commands.
+ */
+
+void ide_fixstring (u8 *s, const int bytecount, const int byteswap)
+{
+ u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
+
+ if (byteswap) {
+ /* convert from big-endian to host byte order */
+ for (p = s ; p != end ; p += 2)
+ be16_to_cpus((u16 *) p);
+ }
+
+ /* strip leading blanks */
+ p = s;
+ while (s != end && *s == ' ')
+ ++s;
+ /* compress internal blanks and strip trailing blanks */
+ while (s != end && *s) {
+ if (*s++ != ' ' || (s != end && *s && *s != ' '))
+ *p++ = *(s-1);
+ }
+ /* wipe out trailing garbage */
+ while (p != end)
+ *p++ = '\0';
+}
+
+EXPORT_SYMBOL(ide_fixstring);
+
+/*
+ * Needed for PCI irq sharing
+ */
+int drive_is_ready (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat = 0;
+
+ if (drive->waiting_for_dma)
+ return hwif->dma_ops->dma_test_irq(drive);
+
+ /*
+ * We do a passive status test under shared PCI interrupts on
+ * cards that truly share the ATA side interrupt, but may also share
+ * an interrupt with another pci card/device. We make no assumptions
+ * about possible isa-pnp and pci-pnp issues yet.
+ */
+ if (hwif->io_ports.ctl_addr &&
+ (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0)
+ stat = hwif->tp_ops->read_altstatus(hwif);
+ else
+ /* Note: this may clear a pending IRQ!! */
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY)
+ /* drive busy: definitely not interrupting */
+ return 0;
+
+ /* drive ready: *might* be interrupting */
+ return 1;
+}
+
+EXPORT_SYMBOL(drive_is_ready);
+
+/*
+ * This routine busy-waits for the drive status to be not "busy".
+ * It then checks the status for all of the "good" bits and none
+ * of the "bad" bits, and if all is okay it returns 0. All other
+ * cases return error -- caller may then invoke ide_error().
+ *
+ * This routine should get fixed to not hog the cpu during extra long waits..
+ * That could be done by busy-waiting for the first jiffy or two, and then
+ * setting a timer to wake up at half second intervals thereafter,
+ * until timeout is achieved, before timing out.
+ */
+static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ unsigned long flags;
+ int i;
+ u8 stat;
+
+ udelay(1); /* spec allows drive 400ns to assert "BUSY" */
+ stat = tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY) {
+ local_save_flags(flags);
+ local_irq_enable_in_hardirq();
+ timeout += jiffies;
+ while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
+ if (time_after(jiffies, timeout)) {
+ /*
+ * One last read after the timeout in case
+ * heavy interrupt load made us not make any
+ * progress during the timeout..
+ */
+ stat = tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ break;
+
+ local_irq_restore(flags);
+ *rstat = stat;
+ return -EBUSY;
+ }
+ }
+ local_irq_restore(flags);
+ }
+ /*
+ * Allow status to settle, then read it again.
+ * A few rare drives vastly violate the 400ns spec here,
+ * so we'll wait up to 10usec for a "good" status
+ * rather than expensively fail things immediately.
+ * This fix courtesy of Matthew Faupel & Niccolo Rigacci.
+ */
+ for (i = 0; i < 10; i++) {
+ udelay(1);
+ stat = tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, good, bad)) {
+ *rstat = stat;
+ return 0;
+ }
+ }
+ *rstat = stat;
+ return -EFAULT;
+}
+
+/*
+ * In case of error returns error value after doing "*startstop = ide_error()".
+ * The caller should return the updated value of "startstop" in this case,
+ * "startstop" is unchanged when the function returns 0.
+ */
+int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout)
+{
+ int err;
+ u8 stat;
+
+ /* bail early if we've exceeded max_failures */
+ if (drive->max_failures && (drive->failures > drive->max_failures)) {
+ *startstop = ide_stopped;
+ return 1;
+ }
+
+ err = __ide_wait_stat(drive, good, bad, timeout, &stat);
+
+ if (err) {
+ char *s = (err == -EBUSY) ? "status timeout" : "status error";
+ *startstop = ide_error(drive, s, stat);
+ }
+
+ return err;
+}
+
+EXPORT_SYMBOL(ide_wait_stat);
+
+/**
+ * ide_in_drive_list - look for drive in black/white list
+ * @id: drive identifier
+ * @table: list to inspect
+ *
+ * Look for a drive in the blacklist and the whitelist tables
+ * Returns 1 if the drive is found in the table.
+ */
+
+int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
+{
+ for ( ; table->id_model; table++)
+ if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
+ (!table->id_firmware ||
+ strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
+ return 1;
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ide_in_drive_list);
+
+/*
+ * Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
+ * We list them here and depend on the device side cable detection for them.
+ *
+ * Some optical devices with the buggy firmwares have the same problem.
+ */
+static const struct drive_list_entry ivb_list[] = {
+ { "QUANTUM FIREBALLlct10 05" , "A03.0900" },
+ { "TSSTcorp CDDVDW SH-S202J" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202J" , "SB01" },
+ { "TSSTcorp CDDVDW SH-S202N" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202N" , "SB01" },
+ { "TSSTcorp CDDVDW SH-S202H" , "SB00" },
+ { "TSSTcorp CDDVDW SH-S202H" , "SB01" },
+ { "SAMSUNG SP0822N" , "WA100-10" },
+ { NULL , NULL }
+};
+
+/*
+ * All hosts that use the 80c ribbon must use!
+ * The name is derived from upper byte of word 93 and the 80c ribbon.
+ */
+u8 eighty_ninty_three (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ int ivb = ide_in_drive_list(id, ivb_list);
+
+ if (hwif->cbl == ATA_CBL_PATA40_SHORT)
+ return 1;
+
+ if (ivb)
+ printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
+ drive->name);
+
+ if (ata_id_is_sata(id) && !ivb)
+ return 1;
+
+ if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
+ goto no_80w;
+
+ /*
+ * FIXME:
+ * - change master/slave IDENTIFY order
+ * - force bit13 (80c cable present) check also for !ivb devices
+ * (unless the slave device is pre-ATA3)
+ */
+ if ((id[ATA_ID_HW_CONFIG] & 0x4000) ||
+ (ivb && (id[ATA_ID_HW_CONFIG] & 0x2000)))
+ return 1;
+
+no_80w:
+ if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
+ return 0;
+
+ printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
+ "limiting max speed to UDMA33\n",
+ drive->name,
+ hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
+
+ drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
+
+ return 0;
+}
+
+int ide_driveid_update(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u16 *id;
+ unsigned long flags;
+ u8 stat;
+
+ /*
+ * Re-read drive->id for possible DMA mode
+ * change (copied from ide-probe.c)
+ */
+
+ SELECT_MASK(drive, 1);
+ tp_ops->set_irq(hwif, 0);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_CMD_ID_ATA);
+
+ if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) {
+ SELECT_MASK(drive, 0);
+ return 0;
+ }
+
+ msleep(50); /* wait for IRQ and ATA_DRQ */
+ stat = tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) {
+ SELECT_MASK(drive, 0);
+ printk("%s: CHECK for good STATUS\n", drive->name);
+ return 0;
+ }
+ local_irq_save(flags);
+ SELECT_MASK(drive, 0);
+ id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
+ if (!id) {
+ local_irq_restore(flags);
+ return 0;
+ }
+ tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+ (void)tp_ops->read_status(hwif); /* clear drive IRQ */
+ local_irq_enable();
+ local_irq_restore(flags);
+ ide_fix_driveid(id);
+
+ drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
+ drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
+ drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
+ /* anything more ? */
+
+ kfree(id);
+
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) && ide_id_dma_bug(drive))
+ ide_dma_off(drive);
+
+ return 1;
+}
+
+int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u16 *id = drive->id, i;
+ int error = 0;
+ u8 stat;
+ ide_task_t task;
+
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ if (hwif->dma_ops) /* check if host supports DMA */
+ hwif->dma_ops->dma_host_set(drive, 0);
+#endif
+
+ /* Skip setting PIO flow-control modes on pre-EIDE drives */
+ if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
+ goto skip;
+
+ /*
+ * Don't use ide_wait_cmd here - it will
+ * attempt to set_geometry and recalibrate,
+ * but for some reason these don't work at
+ * this point (lost interrupt).
+ */
+ /*
+ * Select the drive, and issue the SETFEATURES command
+ */
+ disable_irq_nosync(hwif->irq);
+
+ /*
+ * FIXME: we race against the running IRQ here if
+ * this is called from non IRQ context. If we use
+ * disable_irq() we hang on the error path. Work
+ * is needed.
+ */
+
+ udelay(1);
+ SELECT_DRIVE(drive);
+ SELECT_MASK(drive, 1);
+ udelay(1);
+ tp_ops->set_irq(hwif, 0);
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT;
+ task.tf.feature = SETFEATURES_XFER;
+ task.tf.nsect = speed;
+
+ tp_ops->tf_load(drive, &task);
+
+ tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
+
+ if (drive->quirk_list == 2)
+ tp_ops->set_irq(hwif, 1);
+
+ error = __ide_wait_stat(drive, drive->ready_stat,
+ ATA_BUSY | ATA_DRQ | ATA_ERR,
+ WAIT_CMD, &stat);
+
+ SELECT_MASK(drive, 0);
+
+ enable_irq(hwif->irq);
+
+ if (error) {
+ (void) ide_dump_status(drive, "set_drive_speed_status", stat);
+ return error;
+ }
+
+ id[ATA_ID_UDMA_MODES] &= ~0xFF00;
+ id[ATA_ID_MWDMA_MODES] &= ~0x0F00;
+ id[ATA_ID_SWDMA_MODES] &= ~0x0F00;
+
+ skip:
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
+ hwif->dma_ops->dma_host_set(drive, 1);
+ else if (hwif->dma_ops) /* check if host supports DMA */
+ ide_dma_off_quietly(drive);
+#endif
+
+ if (speed >= XFER_UDMA_0) {
+ i = 1 << (speed - XFER_UDMA_0);
+ id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
+ } else if (speed >= XFER_MW_DMA_0) {
+ i = 1 << (speed - XFER_MW_DMA_0);
+ id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
+ } else if (speed >= XFER_SW_DMA_0) {
+ i = 1 << (speed - XFER_SW_DMA_0);
+ id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
+ }
+
+ if (!drive->init_speed)
+ drive->init_speed = speed;
+ drive->current_speed = speed;
+ return error;
+}
+
+/*
+ * This should get invoked any time we exit the driver to
+ * wait for an interrupt response from a drive. handler() points
+ * at the appropriate code to handle the next interrupt, and a
+ * timer is started to prevent us from waiting forever in case
+ * something goes wrong (see the ide_timer_expiry() handler later on).
+ *
+ * See also ide_execute_command
+ */
+static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
+ unsigned int timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ BUG_ON(hwif->handler);
+ hwif->handler = handler;
+ hwif->expiry = expiry;
+ hwif->timer.expires = jiffies + timeout;
+ hwif->req_gen_timer = hwif->req_gen;
+ add_timer(&hwif->timer);
+}
+
+void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler,
+ unsigned int timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ __ide_set_handler(drive, handler, timeout, expiry);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+
+EXPORT_SYMBOL(ide_set_handler);
+
+/**
+ * ide_execute_command - execute an IDE command
+ * @drive: IDE drive to issue the command against
+ * @command: command byte to write
+ * @handler: handler for next phase
+ * @timeout: timeout for command
+ * @expiry: handler to run on timeout
+ *
+ * Helper function to issue an IDE command. This handles the
+ * atomicity requirements, command timing and ensures that the
+ * handler and IRQ setup do not race. All IDE command kick off
+ * should go via this function or do equivalent locking.
+ */
+
+void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
+ unsigned timeout, ide_expiry_t *expiry)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ __ide_set_handler(drive, handler, timeout, expiry);
+ hwif->tp_ops->exec_command(hwif, cmd);
+ /*
+ * Drive takes 400nS to respond, we must avoid the IRQ being
+ * serviced before that.
+ *
+ * FIXME: we could skip this delay with care on non shared devices
+ */
+ ndelay(400);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+EXPORT_SYMBOL(ide_execute_command);
+
+void ide_execute_pkt_cmd(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwif->lock, flags);
+ hwif->tp_ops->exec_command(hwif, ATA_CMD_PACKET);
+ ndelay(400);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+}
+EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
+
+static inline void ide_complete_drive_reset(ide_drive_t *drive, int err)
+{
+ struct request *rq = drive->hwif->rq;
+
+ if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET)
+ ide_end_request(drive, err ? err : 1, 0);
+}
+
+/* needed below */
+static ide_startstop_t do_reset1 (ide_drive_t *, int);
+
+/*
+ * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an atapi drive reset operation. If the drive has not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 stat;
+
+ SELECT_DRIVE(drive);
+ udelay (10);
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, 0, ATA_BUSY))
+ printk("%s: ATAPI reset complete\n", drive->name);
+ else {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ /* end of polling */
+ hwif->polling = 0;
+ printk("%s: ATAPI reset timed-out, status=0x%02x\n",
+ drive->name, stat);
+ /* do it the old fashioned way */
+ return do_reset1(drive, 1);
+ }
+ /* done polling */
+ hwif->polling = 0;
+ ide_complete_drive_reset(drive, 0);
+ return ide_stopped;
+}
+
+static void ide_reset_report_error(ide_hwif_t *hwif, u8 err)
+{
+ static const char *err_master_vals[] =
+ { NULL, "passed", "formatter device error",
+ "sector buffer error", "ECC circuitry error",
+ "controlling MPU error" };
+
+ u8 err_master = err & 0x7f;
+
+ printk(KERN_ERR "%s: reset: master: ", hwif->name);
+ if (err_master && err_master < 6)
+ printk(KERN_CONT "%s", err_master_vals[err_master]);
+ else
+ printk(KERN_CONT "error (0x%02x?)", err);
+ if (err & 0x80)
+ printk(KERN_CONT "; slave: failed");
+ printk(KERN_CONT "\n");
+}
+
+/*
+ * reset_pollfunc() gets invoked to poll the interface for completion every 50ms
+ * during an ide reset operation. If the drives have not yet responded,
+ * and we have not yet hit our maximum waiting time, then the timer is restarted
+ * for another 50ms.
+ */
+static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 tmp;
+ int err = 0;
+
+ if (port_ops && port_ops->reset_poll) {
+ err = port_ops->reset_poll(drive);
+ if (err) {
+ printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
+ hwif->name, drive->name);
+ goto out;
+ }
+ }
+
+ tmp = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(tmp, 0, ATA_BUSY)) {
+ if (time_before(jiffies, hwif->poll_timeout)) {
+ ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+ /* continue polling */
+ return ide_started;
+ }
+ printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp);
+ drive->failures++;
+ err = -EIO;
+ } else {
+ tmp = ide_read_error(drive);
+
+ if (tmp == 1) {
+ printk(KERN_INFO "%s: reset: success\n", hwif->name);
+ drive->failures = 0;
+ } else {
+ ide_reset_report_error(hwif, tmp);
+ drive->failures++;
+ err = -EIO;
+ }
+ }
+out:
+ hwif->polling = 0; /* done polling */
+ ide_complete_drive_reset(drive, err);
+ return ide_stopped;
+}
+
+static void ide_disk_pre_reset(ide_drive_t *drive)
+{
+ int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1;
+
+ drive->special.all = 0;
+ drive->special.b.set_geometry = legacy;
+ drive->special.b.recalibrate = legacy;
+
+ drive->mult_count = 0;
+ drive->dev_flags &= ~IDE_DFLAG_PARKED;
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 &&
+ (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0)
+ drive->mult_req = 0;
+
+ if (drive->mult_req != drive->mult_count)
+ drive->special.b.set_multmode = 1;
+}
+
+static void pre_reset(ide_drive_t *drive)
+{
+ const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+ if (drive->media == ide_disk)
+ ide_disk_pre_reset(drive);
+ else
+ drive->dev_flags |= IDE_DFLAG_POST_RESET;
+
+ if (drive->dev_flags & IDE_DFLAG_USING_DMA) {
+ if (drive->crc_count)
+ ide_check_dma_crc(drive);
+ else
+ ide_dma_off(drive);
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) {
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) {
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+ drive->io_32bit = 0;
+ }
+ return;
+ }
+
+ if (port_ops && port_ops->pre_reset)
+ port_ops->pre_reset(drive);
+
+ if (drive->current_speed != 0xff)
+ drive->desired_speed = drive->current_speed;
+ drive->current_speed = 0xff;
+}
+
+/*
+ * do_reset1() attempts to recover a confused drive by resetting it.
+ * Unfortunately, resetting a disk drive actually resets all devices on
+ * the same interface, so it can really be thought of as resetting the
+ * interface rather than resetting the drive.
+ *
+ * ATAPI devices have their own reset mechanism which allows them to be
+ * individually reset without clobbering other devices on the same interface.
+ *
+ * Unfortunately, the IDE interface does not generate an interrupt to let
+ * us know when the reset operation has finished, so we must poll for this.
+ * Equally poor, though, is the fact that this may a very long time to complete,
+ * (up to 30 seconds worstcase). So, instead of busy-waiting here for it,
+ * we set a timer to poll at 50ms intervals.
+ */
+static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ const struct ide_port_ops *port_ops;
+ ide_drive_t *tdrive;
+ unsigned long flags, timeout;
+ int i;
+ DEFINE_WAIT(wait);
+
+ spin_lock_irqsave(&hwif->lock, flags);
+
+ /* We must not reset with running handlers */
+ BUG_ON(hwif->handler != NULL);
+
+ /* For an ATAPI device, first try an ATAPI SRST. */
+ if (drive->media != ide_disk && !do_not_try_atapi) {
+ pre_reset(drive);
+ SELECT_DRIVE(drive);
+ udelay (20);
+ tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
+ ndelay(400);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+ }
+
+ /* We must not disturb devices in the IDE_DFLAG_PARKED state. */
+ do {
+ unsigned long now;
+
+ prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE);
+ timeout = jiffies;
+ ide_port_for_each_dev(i, tdrive, hwif) {
+ if (tdrive->dev_flags & IDE_DFLAG_PRESENT &&
+ tdrive->dev_flags & IDE_DFLAG_PARKED &&
+ time_after(tdrive->sleep, timeout))
+ timeout = tdrive->sleep;
+ }
+
+ now = jiffies;
+ if (time_before_eq(timeout, now))
+ break;
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ timeout = schedule_timeout_uninterruptible(timeout - now);
+ spin_lock_irqsave(&hwif->lock, flags);
+ } while (timeout);
+ finish_wait(&ide_park_wq, &wait);
+
+ /*
+ * First, reset any device state data we were maintaining
+ * for any of the drives on this interface.
+ */
+ ide_port_for_each_dev(i, tdrive, hwif)
+ pre_reset(tdrive);
+
+ if (io_ports->ctl_addr == 0) {
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ ide_complete_drive_reset(drive, -ENXIO);
+ return ide_stopped;
+ }
+
+ /*
+ * Note that we also set nIEN while resetting the device,
+ * to mask unwanted interrupts from the interface during the reset.
+ * However, due to the design of PC hardware, this will cause an
+ * immediate interrupt due to the edge transition it produces.
+ * This single interrupt gives us a "fast poll" for drives that
+ * recover from reset very quickly, saving us the first 50ms wait time.
+ *
+ * TODO: add ->softreset method and stop abusing ->set_irq
+ */
+ /* set SRST and nIEN */
+ tp_ops->set_irq(hwif, 4);
+ /* more than enough time */
+ udelay(10);
+ /* clear SRST, leave nIEN (unless device is on the quirk list) */
+ tp_ops->set_irq(hwif, drive->quirk_list == 2);
+ /* more than enough time */
+ udelay(10);
+ hwif->poll_timeout = jiffies + WAIT_WORSTCASE;
+ hwif->polling = 1;
+ __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL);
+
+ /*
+ * Some weird controller like resetting themselves to a strange
+ * state when the disks are reset this way. At least, the Winbond
+ * 553 documentation says that
+ */
+ port_ops = hwif->port_ops;
+ if (port_ops && port_ops->resetproc)
+ port_ops->resetproc(drive);
+
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ return ide_started;
+}
+
+/*
+ * ide_do_reset() is the entry point to the drive/interface reset code.
+ */
+
+ide_startstop_t ide_do_reset (ide_drive_t *drive)
+{
+ return do_reset1(drive, 0);
+}
+
+EXPORT_SYMBOL(ide_do_reset);
+
+/*
+ * ide_wait_not_busy() waits for the currently selected device on the hwif
+ * to report a non-busy status, see comments in ide_probe_port().
+ */
+int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
+{
+ u8 stat = 0;
+
+ while(timeout--) {
+ /*
+ * Turn this into a schedule() sleep once I'm sure
+ * about locking issues (2.5 work ?).
+ */
+ mdelay(1);
+ stat = hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ return 0;
+ /*
+ * Assume a value of 0xff means nothing is connected to
+ * the interface and it doesn't implement the pull-down
+ * resistor on D7.
+ */
+ if (stat == 0xff)
+ return -ENODEV;
+ touch_softlockup_watchdog();
+ touch_nmi_watchdog();
+ }
+ return -EBUSY;
+}
diff --git a/windhoek/ide/ide-lib.c b/windhoek/ide/ide-lib.c
new file mode 100644
index 00000000..09526a0d
--- /dev/null
+++ b/windhoek/ide/ide-lib.c
@@ -0,0 +1,423 @@
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+
+static const char *udma_str[] =
+ { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44",
+ "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" };
+static const char *mwdma_str[] =
+ { "MWDMA0", "MWDMA1", "MWDMA2" };
+static const char *swdma_str[] =
+ { "SWDMA0", "SWDMA1", "SWDMA2" };
+static const char *pio_str[] =
+ { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" };
+
+/**
+ * ide_xfer_verbose - return IDE mode names
+ * @mode: transfer mode
+ *
+ * Returns a constant string giving the name of the mode
+ * requested.
+ */
+
+const char *ide_xfer_verbose(u8 mode)
+{
+ const char *s;
+ u8 i = mode & 0xf;
+
+ if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ s = udma_str[i];
+ else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2)
+ s = mwdma_str[i];
+ else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2)
+ s = swdma_str[i];
+ else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5)
+ s = pio_str[i & 0x7];
+ else if (mode == XFER_PIO_SLOW)
+ s = "PIO SLOW";
+ else
+ s = "XFER ERROR";
+
+ return s;
+}
+EXPORT_SYMBOL(ide_xfer_verbose);
+
+/**
+ * ide_rate_filter - filter transfer mode
+ * @drive: IDE device
+ * @speed: desired speed
+ *
+ * Given the available transfer modes this function returns
+ * the best available speed at or below the speed requested.
+ *
+ * TODO: check device PIO capabilities
+ */
+
+static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 mode = ide_find_dma_mode(drive, speed);
+
+ if (mode == 0) {
+ if (hwif->pio_mask)
+ mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0;
+ else
+ mode = XFER_PIO_4;
+ }
+
+/* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
+
+ return min(speed, mode);
+}
+
+/**
+ * ide_get_best_pio_mode - get PIO mode from drive
+ * @drive: drive to consider
+ * @mode_wanted: preferred mode
+ * @max_mode: highest allowed mode
+ *
+ * This routine returns the recommended PIO settings for a given drive,
+ * based on the drive->id information and the ide_pio_blacklist[].
+ *
+ * Drive PIO mode is auto-selected if 255 is passed as mode_wanted.
+ * This is used by most chipset support modules when "auto-tuning".
+ */
+
+u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
+{
+ u16 *id = drive->id;
+ int pio_mode = -1, overridden = 0;
+
+ if (mode_wanted != 255)
+ return min_t(u8, mode_wanted, max_mode);
+
+ if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0)
+ pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]);
+
+ if (pio_mode != -1) {
+ printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name);
+ } else {
+ pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8;
+ if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */
+ pio_mode = 2;
+ overridden = 1;
+ }
+
+ if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */
+ if (ata_id_has_iordy(id)) {
+ if (id[ATA_ID_PIO_MODES] & 7) {
+ overridden = 0;
+ if (id[ATA_ID_PIO_MODES] & 4)
+ pio_mode = 5;
+ else if (id[ATA_ID_PIO_MODES] & 2)
+ pio_mode = 4;
+ else
+ pio_mode = 3;
+ }
+ }
+ }
+
+ if (overridden)
+ printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n",
+ drive->name);
+ }
+
+ if (pio_mode > max_mode)
+ pio_mode = max_mode;
+
+ return pio_mode;
+}
+EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
+
+/* req_pio == "255" for auto-tune */
+void ide_set_pio(ide_drive_t *drive, u8 req_pio)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ u8 host_pio, pio;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return;
+
+ BUG_ON(hwif->pio_mask == 0x00);
+
+ host_pio = fls(hwif->pio_mask) - 1;
+
+ pio = ide_get_best_pio_mode(drive, req_pio, host_pio);
+
+ /*
+ * TODO:
+ * - report device max PIO mode
+ * - check req_pio != 255 against device max PIO mode
+ */
+ printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n",
+ drive->name, host_pio, req_pio,
+ req_pio == 255 ? "(auto-tune)" : "", pio);
+
+ (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio);
+}
+EXPORT_SYMBOL_GPL(ide_set_pio);
+
+/**
+ * ide_toggle_bounce - handle bounce buffering
+ * @drive: drive to update
+ * @on: on/off boolean
+ *
+ * Enable or disable bounce buffering for the device. Drives move
+ * between PIO and DMA and that changes the rules we need.
+ */
+
+void ide_toggle_bounce(ide_drive_t *drive, int on)
+{
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
+ if (!PCI_DMA_BUS_IS_PHYS) {
+ addr = BLK_BOUNCE_ANY;
+ } else if (on && drive->media == ide_disk) {
+ struct device *dev = drive->hwif->dev;
+
+ if (dev && dev->dma_mask)
+ addr = *dev->dma_mask;
+ }
+
+ if (drive->queue)
+ blk_queue_bounce_limit(drive->queue, addr);
+}
+
+int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL)
+ return -1;
+
+ /*
+ * TODO: temporary hack for some legacy host drivers that didn't
+ * set transfer mode on the device in ->set_pio_mode method...
+ */
+ if (port_ops->set_dma_mode == NULL) {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ }
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return 0;
+ } else {
+ port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+
+int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
+ return 0;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL)
+ return -1;
+
+ if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
+ if (ide_config_drive_speed(drive, mode))
+ return -1;
+ port_ops->set_dma_mode(drive, mode);
+ return 0;
+ } else {
+ port_ops->set_dma_mode(drive, mode);
+ return ide_config_drive_speed(drive, mode);
+ }
+}
+EXPORT_SYMBOL_GPL(ide_set_dma_mode);
+
+/**
+ * ide_set_xfer_rate - set transfer rate
+ * @drive: drive to set
+ * @rate: speed to attempt to set
+ *
+ * General helper for setting the speed of an IDE device. This
+ * function knows about user enforced limits from the configuration
+ * which ->set_pio_mode/->set_dma_mode does not.
+ */
+
+int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -1;
+
+ rate = ide_rate_filter(drive, rate);
+
+ BUG_ON(rate < XFER_PIO_0);
+
+ if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5)
+ return ide_set_pio_mode(drive, rate);
+
+ return ide_set_dma_mode(drive, rate);
+}
+
+static void ide_dump_opcode(ide_drive_t *drive)
+{
+ struct request *rq = drive->hwif->rq;
+ ide_task_t *task = NULL;
+
+ if (!rq)
+ return;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
+ task = rq->special;
+
+ printk(KERN_ERR "ide: failed opcode was: ");
+ if (task == NULL)
+ printk(KERN_CONT "unknown\n");
+ else
+ printk(KERN_CONT "0x%02x\n", task->tf.command);
+}
+
+u64 ide_get_lba_addr(struct ide_taskfile *tf, int lba48)
+{
+ u32 high, low;
+
+ if (lba48)
+ high = (tf->hob_lbah << 16) | (tf->hob_lbam << 8) |
+ tf->hob_lbal;
+ else
+ high = tf->device & 0xf;
+ low = (tf->lbah << 16) | (tf->lbam << 8) | tf->lbal;
+
+ return ((u64)high << 24) | low;
+}
+EXPORT_SYMBOL_GPL(ide_get_lba_addr);
+
+static void ide_dump_sector(ide_drive_t *drive)
+{
+ ide_task_t task;
+ struct ide_taskfile *tf = &task.tf;
+ u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48);
+
+ memset(&task, 0, sizeof(task));
+ if (lba48)
+ task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_HOB_LBA |
+ IDE_TFLAG_LBA48;
+ else
+ task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ if (lba48 || (tf->device & ATA_LBA))
+ printk(KERN_CONT ", LBAsect=%llu",
+ (unsigned long long)ide_get_lba_addr(tf, lba48));
+ else
+ printk(KERN_CONT ", CHS=%d/%d/%d", (tf->lbah << 8) + tf->lbam,
+ tf->device & 0xf, tf->lbal);
+}
+
+static void ide_dump_ata_error(ide_drive_t *drive, u8 err)
+{
+ printk(KERN_ERR "{ ");
+ if (err & ATA_ABORTED)
+ printk(KERN_CONT "DriveStatusError ");
+ if (err & ATA_ICRC)
+ printk(KERN_CONT "%s",
+ (err & ATA_ABORTED) ? "BadCRC " : "BadSector ");
+ if (err & ATA_UNC)
+ printk(KERN_CONT "UncorrectableError ");
+ if (err & ATA_IDNF)
+ printk(KERN_CONT "SectorIdNotFound ");
+ if (err & ATA_TRK0NF)
+ printk(KERN_CONT "TrackZeroNotFound ");
+ if (err & ATA_AMNF)
+ printk(KERN_CONT "AddrMarkNotFound ");
+ printk(KERN_CONT "}");
+ if ((err & (ATA_BBK | ATA_ABORTED)) == ATA_BBK ||
+ (err & (ATA_UNC | ATA_IDNF | ATA_AMNF))) {
+ struct request *rq = drive->hwif->rq;
+
+ ide_dump_sector(drive);
+
+ if (rq)
+ printk(KERN_CONT ", sector=%llu",
+ (unsigned long long)rq->sector);
+ }
+ printk(KERN_CONT "\n");
+}
+
+static void ide_dump_atapi_error(ide_drive_t *drive, u8 err)
+{
+ printk(KERN_ERR "{ ");
+ if (err & ATAPI_ILI)
+ printk(KERN_CONT "IllegalLengthIndication ");
+ if (err & ATAPI_EOM)
+ printk(KERN_CONT "EndOfMedia ");
+ if (err & ATA_ABORTED)
+ printk(KERN_CONT "AbortedCommand ");
+ if (err & ATA_MCR)
+ printk(KERN_CONT "MediaChangeRequested ");
+ if (err & ATAPI_LFS)
+ printk(KERN_CONT "LastFailedSense=0x%02x ",
+ (err & ATAPI_LFS) >> 4);
+ printk(KERN_CONT "}\n");
+}
+
+/**
+ * ide_dump_status - translate ATA/ATAPI error
+ * @drive: drive that status applies to
+ * @msg: text message to print
+ * @stat: status byte to decode
+ *
+ * Error reporting, in human readable form (luxurious, but a memory hog).
+ * Combines the drive name, message and status byte to provide a
+ * user understandable explanation of the device error.
+ */
+
+u8 ide_dump_status(ide_drive_t *drive, const char *msg, u8 stat)
+{
+ u8 err = 0;
+
+ printk(KERN_ERR "%s: %s: status=0x%02x { ", drive->name, msg, stat);
+ if (stat & ATA_BUSY)
+ printk(KERN_CONT "Busy ");
+ else {
+ if (stat & ATA_DRDY)
+ printk(KERN_CONT "DriveReady ");
+ if (stat & ATA_DF)
+ printk(KERN_CONT "DeviceFault ");
+ if (stat & ATA_DSC)
+ printk(KERN_CONT "SeekComplete ");
+ if (stat & ATA_DRQ)
+ printk(KERN_CONT "DataRequest ");
+ if (stat & ATA_CORR)
+ printk(KERN_CONT "CorrectedError ");
+ if (stat & ATA_IDX)
+ printk(KERN_CONT "Index ");
+ if (stat & ATA_ERR)
+ printk(KERN_CONT "Error ");
+ }
+ printk(KERN_CONT "}\n");
+ if ((stat & (ATA_BUSY | ATA_ERR)) == ATA_ERR) {
+ err = ide_read_error(drive);
+ printk(KERN_ERR "%s: %s: error=0x%02x ", drive->name, msg, err);
+ if (drive->media == ide_disk)
+ ide_dump_ata_error(drive, err);
+ else
+ ide_dump_atapi_error(drive, err);
+ }
+ ide_dump_opcode(drive);
+ return err;
+}
+EXPORT_SYMBOL(ide_dump_status);
diff --git a/windhoek/ide/ide-park.c b/windhoek/ide/ide-park.c
new file mode 100644
index 00000000..c875a957
--- /dev/null
+++ b/windhoek/ide/ide-park.c
@@ -0,0 +1,124 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/jiffies.h>
+#include <linux/blkdev.h>
+
+DECLARE_WAIT_QUEUE_HEAD(ide_park_wq);
+
+static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request_queue *q = drive->queue;
+ struct request *rq;
+ int rc;
+
+ timeout += jiffies;
+ spin_lock_irq(&hwif->lock);
+ if (drive->dev_flags & IDE_DFLAG_PARKED) {
+ int reset_timer = time_before(timeout, drive->sleep);
+ int start_queue = 0;
+
+ drive->sleep = timeout;
+ wake_up_all(&ide_park_wq);
+ if (reset_timer && del_timer(&hwif->timer))
+ start_queue = 1;
+ spin_unlock_irq(&hwif->lock);
+
+ if (start_queue) {
+ spin_lock_irq(q->queue_lock);
+ blk_start_queueing(q);
+ spin_unlock_irq(q->queue_lock);
+ }
+ return;
+ }
+ spin_unlock_irq(&hwif->lock);
+
+ rq = blk_get_request(q, READ, __GFP_WAIT);
+ rq->cmd[0] = REQ_PARK_HEADS;
+ rq->cmd_len = 1;
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ rq->special = &timeout;
+ rc = blk_execute_rq(q, NULL, rq, 1);
+ blk_put_request(rq);
+ if (rc)
+ goto out;
+
+ /*
+ * Make sure that *some* command is sent to the drive after the
+ * timeout has expired, so power management will be reenabled.
+ */
+ rq = blk_get_request(q, READ, GFP_NOWAIT);
+ if (unlikely(!rq))
+ goto out;
+
+ rq->cmd[0] = REQ_UNPARK_HEADS;
+ rq->cmd_len = 1;
+ rq->cmd_type = REQ_TYPE_SPECIAL;
+ elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 1);
+
+out:
+ return;
+}
+
+ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned long now;
+ unsigned int msecs;
+
+ if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
+ return -EOPNOTSUPP;
+
+ spin_lock_irq(&hwif->lock);
+ now = jiffies;
+ if (drive->dev_flags & IDE_DFLAG_PARKED &&
+ time_after(drive->sleep, now))
+ msecs = jiffies_to_msecs(drive->sleep - now);
+ else
+ msecs = 0;
+ spin_unlock_irq(&hwif->lock);
+
+ return snprintf(buf, 20, "%u\n", msecs);
+}
+
+ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+#define MAX_PARK_TIMEOUT 30000
+ ide_drive_t *drive = to_ide_device(dev);
+ long int input;
+ int rc;
+
+ rc = strict_strtol(buf, 10, &input);
+ if (rc || input < -2)
+ return -EINVAL;
+ if (input > MAX_PARK_TIMEOUT) {
+ input = MAX_PARK_TIMEOUT;
+ rc = -EOVERFLOW;
+ }
+
+ mutex_lock(&ide_setting_mtx);
+ if (input >= 0) {
+ if (drive->dev_flags & IDE_DFLAG_NO_UNLOAD)
+ rc = -EOPNOTSUPP;
+ else if (input || drive->dev_flags & IDE_DFLAG_PARKED)
+ issue_park_cmd(drive, msecs_to_jiffies(input));
+ } else {
+ if (drive->media == ide_disk)
+ switch (input) {
+ case -1:
+ drive->dev_flags &= ~IDE_DFLAG_NO_UNLOAD;
+ break;
+ case -2:
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+ break;
+ }
+ else
+ rc = -EOPNOTSUPP;
+ }
+ mutex_unlock(&ide_setting_mtx);
+
+ return rc ? rc : len;
+}
diff --git a/windhoek/ide/ide-pci-generic.c b/windhoek/ide/ide-pci-generic.c
new file mode 100644
index 00000000..f47b9326
--- /dev/null
+++ b/windhoek/ide/ide-pci-generic.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
+ * Portions (C) Copyright 2002 Red Hat Inc
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#define DRV_NAME "ide_pci_generic"
+
+#include "local.h"
+
+static int ide_generic_all; /* Set to claim all devices */
+
+module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
+MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
+
+#define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS)
+
+#define DECLARE_GENERIC_PCI_DEV(extra_flags) \
+ { \
+ .name = DRV_NAME, \
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA | \
+ extra_flags, \
+ .swdma_mask = ATA_SWDMA2, \
+ .mwdma_mask = ATA_MWDMA2, \
+ .udma_mask = ATA_UDMA6, \
+ }
+
+static const struct ide_port_info generic_chipsets[] __devinitdata = {
+ /* 0: Unknown */
+ DECLARE_GENERIC_PCI_DEV(0),
+
+ { /* 1: NS87410 */
+ .name = DRV_NAME,
+ .enablebits = { {0x43, 0x08, 0x08}, {0x47, 0x08, 0x08} },
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ },
+
+ /* 2: SAMURAI / HT6565 / HINT_IDE */
+ DECLARE_GENERIC_PCI_DEV(0),
+ /* 3: UM8673F / UM8886A / UM8886BF */
+ DECLARE_GENERIC_PCI_DEV(IDE_HFLAGS_UMC),
+ /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */
+ DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA),
+
+ { /* 5: VIA8237SATA */
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
+ IDE_HFLAG_OFF_BOARD,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ },
+
+ { /* 6: Revolution */
+ .name = DRV_NAME,
+ .host_flags = IDE_HFLAG_CLEAR_SIMPLEX |
+ IDE_HFLAG_TRUST_BIOS_FOR_DMA |
+ IDE_HFLAG_OFF_BOARD,
+ .swdma_mask = ATA_SWDMA2,
+ .mwdma_mask = ATA_MWDMA2,
+ .udma_mask = ATA_UDMA6,
+ }
+};
+
+/**
+ * generic_init_one - called when a PIIX is found
+ * @dev: the generic device
+ * @id: the matching pci id
+ *
+ * Called when the PCI registration layer (or the IDE initialization)
+ * finds a device matching our IDE device tables.
+ */
+
+static int __devinit generic_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ const struct ide_port_info *d = &generic_chipsets[id->driver_data];
+ int ret = -ENODEV;
+
+ /* Don't use the generic entry unless instructed to do so */
+ if (id->driver_data == 0 && ide_generic_all == 0)
+ goto out;
+
+ switch (dev->vendor) {
+ case PCI_VENDOR_ID_UMC:
+ if (dev->device == PCI_DEVICE_ID_UMC_UM8886A &&
+ !(PCI_FUNC(dev->devfn) & 1))
+ goto out; /* UM8886A/BF pair */
+ break;
+ case PCI_VENDOR_ID_OPTI:
+ if (dev->device == PCI_DEVICE_ID_OPTI_82C558 &&
+ !(PCI_FUNC(dev->devfn) & 1))
+ goto out;
+ break;
+ case PCI_VENDOR_ID_JMICRON:
+ if (dev->device != PCI_DEVICE_ID_JMICRON_JMB368 &&
+ PCI_FUNC(dev->devfn) != 1)
+ goto out;
+ break;
+ case PCI_VENDOR_ID_NS:
+ if (dev->device == PCI_DEVICE_ID_NS_87410 &&
+ (dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
+ goto out;
+ break;
+ }
+
+ if (dev->vendor != PCI_VENDOR_ID_JMICRON) {
+ u16 command;
+ pci_read_config_word(dev, PCI_COMMAND, &command);
+ if (!(command & PCI_COMMAND_IO)) {
+ printk(KERN_INFO "%s %s: skipping disabled "
+ "controller\n", d->name, pci_name(dev));
+ goto out;
+ }
+ }
+ ret = ide_pci_init_one(dev, d, NULL);
+out:
+ return ret;
+}
+
+static const struct pci_device_id generic_pci_tbl[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_87410), 1 },
+ { PCI_VDEVICE(PCTECH, PCI_DEVICE_ID_PCTECH_SAMURAI_IDE), 2 },
+ { PCI_VDEVICE(HOLTEK, PCI_DEVICE_ID_HOLTEK_6565), 2 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8673F), 3 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886A), 3 },
+ { PCI_VDEVICE(UMC, PCI_DEVICE_ID_UMC_UM8886BF), 3 },
+ { PCI_VDEVICE(HINT, PCI_DEVICE_ID_HINT_VXPROII_IDE), 2 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C561), 4 },
+ { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C558), 4 },
+#ifdef CONFIG_BLK_DEV_IDE_SATA
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8237_SATA), 5 },
+#endif
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO), 4 },
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_1), 4 },
+ { PCI_VDEVICE(TOSHIBA, PCI_DEVICE_ID_TOSHIBA_PICCOLO_2), 4 },
+ { PCI_VDEVICE(NETCELL, PCI_DEVICE_ID_REVOLUTION), 6 },
+ /*
+ * Must come last. If you add entries adjust
+ * this table and generic_chipsets[] appropriately.
+ */
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_IDE << 8, 0xFFFFFF00UL, 0 },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, generic_pci_tbl);
+
+static struct pci_driver generic_pci_driver = {
+ .name = "PCI_IDE",
+ .id_table = generic_pci_tbl,
+ .probe = generic_init_one,
+ .remove = ide_pci_remove,
+ .suspend = ide_pci_suspend,
+ .resume = ide_pci_resume,
+};
+
+static int __init generic_ide_init(void)
+{
+ return ide_pci_register_driver(&generic_pci_driver);
+}
+
+static void __exit generic_ide_exit(void)
+{
+ pci_unregister_driver(&generic_pci_driver);
+}
+
+module_init(generic_ide_init);
+module_exit(generic_ide_exit);
+
+MODULE_AUTHOR("Andre Hedrick");
+MODULE_DESCRIPTION("PCI driver module for generic PCI IDE");
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-pio-blacklist.c b/windhoek/ide/ide-pio-blacklist.c
new file mode 100644
index 00000000..a8c2c8f8
--- /dev/null
+++ b/windhoek/ide/ide-pio-blacklist.c
@@ -0,0 +1,94 @@
+/*
+ * PIO blacklist. Some drives incorrectly report their maximal PIO mode,
+ * at least in respect to CMD640. Here we keep info on some known drives.
+ *
+ * Changes to the ide_pio_blacklist[] should be made with EXTREME CAUTION
+ * to avoid breaking the fragile cmd640.c support.
+ */
+
+#include <linux/string.h>
+
+static struct ide_pio_info {
+ const char *name;
+ int pio;
+} ide_pio_blacklist [] = {
+ { "Conner Peripherals 540MB - CFS540A", 3 },
+
+ { "WDC AC2700", 3 },
+ { "WDC AC2540", 3 },
+ { "WDC AC2420", 3 },
+ { "WDC AC2340", 3 },
+ { "WDC AC2250", 0 },
+ { "WDC AC2200", 0 },
+ { "WDC AC21200", 4 },
+ { "WDC AC2120", 0 },
+ { "WDC AC2850", 3 },
+ { "WDC AC1270", 3 },
+ { "WDC AC1170", 1 },
+ { "WDC AC1210", 1 },
+ { "WDC AC280", 0 },
+ { "WDC AC31000", 3 },
+ { "WDC AC31200", 3 },
+
+ { "Maxtor 7131 AT", 1 },
+ { "Maxtor 7171 AT", 1 },
+ { "Maxtor 7213 AT", 1 },
+ { "Maxtor 7245 AT", 1 },
+ { "Maxtor 7345 AT", 1 },
+ { "Maxtor 7546 AT", 3 },
+ { "Maxtor 7540 AV", 3 },
+
+ { "SAMSUNG SHD-3121A", 1 },
+ { "SAMSUNG SHD-3122A", 1 },
+ { "SAMSUNG SHD-3172A", 1 },
+
+ { "ST5660A", 3 },
+ { "ST3660A", 3 },
+ { "ST3630A", 3 },
+ { "ST3655A", 3 },
+ { "ST3391A", 3 },
+ { "ST3390A", 1 },
+ { "ST3600A", 1 },
+ { "ST3290A", 0 },
+ { "ST3144A", 0 },
+ { "ST3491A", 1 }, /* reports 3, should be 1 or 2 (depending on drive)
+ according to Seagate's FIND-ATA program */
+
+ { "QUANTUM ELS127A", 0 },
+ { "QUANTUM ELS170A", 0 },
+ { "QUANTUM LPS240A", 0 },
+ { "QUANTUM LPS210A", 3 },
+ { "QUANTUM LPS270A", 3 },
+ { "QUANTUM LPS365A", 3 },
+ { "QUANTUM LPS540A", 3 },
+ { "QUANTUM LIGHTNING 540A", 3 },
+ { "QUANTUM LIGHTNING 730A", 3 },
+
+ { "QUANTUM FIREBALL_540", 3 }, /* Older Quantum Fireballs don't work */
+ { "QUANTUM FIREBALL_640", 3 },
+ { "QUANTUM FIREBALL_1080", 3 },
+ { "QUANTUM FIREBALL_1280", 3 },
+ { NULL, 0 }
+};
+
+/**
+ * ide_scan_pio_blacklist - check for a blacklisted drive
+ * @model: Drive model string
+ *
+ * This routine searches the ide_pio_blacklist for an entry
+ * matching the start/whole of the supplied model name.
+ *
+ * Returns -1 if no match found.
+ * Otherwise returns the recommended PIO mode from ide_pio_blacklist[].
+ */
+
+int ide_scan_pio_blacklist(char *model)
+{
+ struct ide_pio_info *p;
+
+ for (p = ide_pio_blacklist; p->name != NULL; p++) {
+ if (strncmp(p->name, model, strlen(p->name)) == 0)
+ return p->pio;
+ }
+ return -1;
+}
diff --git a/windhoek/ide/ide-pm.c b/windhoek/ide/ide-pm.c
new file mode 100644
index 00000000..60538d9c
--- /dev/null
+++ b/windhoek/ide/ide-pm.c
@@ -0,0 +1,239 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+
+int generic_ide_suspend(struct device *dev, pm_message_t mesg)
+{
+ ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ struct request_pm_state rqpm;
+ ide_task_t args;
+ int ret;
+
+ /* call ACPI _GTM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL)
+ ide_acpi_get_timing(hwif);
+
+ memset(&rqpm, 0, sizeof(rqpm));
+ memset(&args, 0, sizeof(args));
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_PM_SUSPEND;
+ rq->special = &args;
+ rq->data = &rqpm;
+ rqpm.pm_step = IDE_PM_START_SUSPEND;
+ if (mesg.event == PM_EVENT_PRETHAW)
+ mesg.event = PM_EVENT_FREEZE;
+ rqpm.pm_state = mesg.event;
+
+ ret = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ /* call ACPI _PS3 only after both devices are suspended */
+ if (ret == 0 && ((drive->dn & 1) || pair == NULL))
+ ide_acpi_set_state(hwif, 0);
+
+ return ret;
+}
+
+int generic_ide_resume(struct device *dev)
+{
+ ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive);
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq;
+ struct request_pm_state rqpm;
+ ide_task_t args;
+ int err;
+
+ /* call ACPI _PS0 / _STM only once */
+ if ((drive->dn & 1) == 0 || pair == NULL) {
+ ide_acpi_set_state(hwif, 1);
+ ide_acpi_push_timing(hwif);
+ }
+
+ ide_acpi_exec_tfs(drive);
+
+ memset(&rqpm, 0, sizeof(rqpm));
+ memset(&args, 0, sizeof(args));
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_PM_RESUME;
+ rq->cmd_flags |= REQ_PREEMPT;
+ rq->special = &args;
+ rq->data = &rqpm;
+ rqpm.pm_step = IDE_PM_START_RESUME;
+ rqpm.pm_state = PM_EVENT_ON;
+
+ err = blk_execute_rq(drive->queue, NULL, rq, 1);
+ blk_put_request(rq);
+
+ if (err == 0 && dev->driver) {
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (drv->resume)
+ drv->resume(drive);
+ }
+
+ return err;
+}
+
+void ide_complete_power_step(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+
+#ifdef DEBUG_PM
+ printk(KERN_INFO "%s: complete_power_step(step: %d)\n",
+ drive->name, pm->pm_step);
+#endif
+ if (drive->media != ide_disk)
+ return;
+
+ switch (pm->pm_step) {
+ case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
+ if (pm->pm_state == PM_EVENT_FREEZE)
+ pm->pm_step = IDE_PM_COMPLETED;
+ else
+ pm->pm_step = IDE_PM_STANDBY;
+ break;
+ case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
+ pm->pm_step = IDE_PM_COMPLETED;
+ break;
+ case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
+ pm->pm_step = IDE_PM_IDLE;
+ break;
+ case IDE_PM_IDLE: /* Resume step 2 (idle)*/
+ pm->pm_step = IDE_PM_RESTORE_DMA;
+ break;
+ }
+}
+
+ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+ ide_task_t *args = rq->special;
+
+ memset(args, 0, sizeof(*args));
+
+ switch (pm->pm_step) {
+ case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */
+ if (drive->media != ide_disk)
+ break;
+ /* Not supported? Switch to next step now. */
+ if (ata_id_flush_enabled(drive->id) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) {
+ ide_complete_power_step(drive, rq);
+ return ide_stopped;
+ }
+ if (ata_id_flush_ext_enabled(drive->id))
+ args->tf.command = ATA_CMD_FLUSH_EXT;
+ else
+ args->tf.command = ATA_CMD_FLUSH;
+ goto out_do_tf;
+ case IDE_PM_STANDBY: /* Suspend step 2 (standby) */
+ args->tf.command = ATA_CMD_STANDBYNOW1;
+ goto out_do_tf;
+ case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */
+ ide_set_max_pio(drive);
+ /*
+ * skip IDE_PM_IDLE for ATAPI devices
+ */
+ if (drive->media != ide_disk)
+ pm->pm_step = IDE_PM_RESTORE_DMA;
+ else
+ ide_complete_power_step(drive, rq);
+ return ide_stopped;
+ case IDE_PM_IDLE: /* Resume step 2 (idle) */
+ args->tf.command = ATA_CMD_IDLEIMMEDIATE;
+ goto out_do_tf;
+ case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */
+ /*
+ * Right now, all we do is call ide_set_dma(drive),
+ * we could be smarter and check for current xfer_speed
+ * in struct drive etc...
+ */
+ if (drive->hwif->dma_ops == NULL)
+ break;
+ /*
+ * TODO: respect IDE_DFLAG_USING_DMA
+ */
+ ide_set_dma(drive);
+ break;
+ }
+
+ pm->pm_step = IDE_PM_COMPLETED;
+ return ide_stopped;
+
+out_do_tf:
+ args->tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ args->data_phase = TASKFILE_NO_DATA;
+ return do_rw_taskfile(drive, args);
+}
+
+/**
+ * ide_complete_pm_request - end the current Power Management request
+ * @drive: target drive
+ * @rq: request
+ *
+ * This function cleans up the current PM request and stops the queue
+ * if necessary.
+ */
+void ide_complete_pm_request(ide_drive_t *drive, struct request *rq)
+{
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+
+#ifdef DEBUG_PM
+ printk("%s: completing PM request, %s\n", drive->name,
+ blk_pm_suspend_request(rq) ? "suspend" : "resume");
+#endif
+ spin_lock_irqsave(q->queue_lock, flags);
+ if (blk_pm_suspend_request(rq))
+ blk_stop_queue(q);
+ else
+ drive->dev_flags &= ~IDE_DFLAG_BLOCKED;
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ drive->hwif->rq = NULL;
+
+ if (blk_end_request(rq, 0, 0))
+ BUG();
+}
+
+void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
+{
+ struct request_pm_state *pm = rq->data;
+
+ if (blk_pm_suspend_request(rq) &&
+ pm->pm_step == IDE_PM_START_SUSPEND)
+ /* Mark drive blocked when starting the suspend sequence. */
+ drive->dev_flags |= IDE_DFLAG_BLOCKED;
+ else if (blk_pm_resume_request(rq) &&
+ pm->pm_step == IDE_PM_START_RESUME) {
+ /*
+ * The first thing we do on wakeup is to wait for BSY bit to
+ * go away (with a looong timeout) as a drive on this hwif may
+ * just be POSTing itself.
+ * We do that before even selecting as the "other" device on
+ * the bus may be broken enough to walk on our toes at this
+ * point.
+ */
+ ide_hwif_t *hwif = drive->hwif;
+ struct request_queue *q = drive->queue;
+ unsigned long flags;
+ int rc;
+#ifdef DEBUG_PM
+ printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name);
+#endif
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name);
+ SELECT_DRIVE(drive);
+ hwif->tp_ops->set_irq(hwif, 1);
+ rc = ide_wait_not_busy(hwif, 100000);
+ if (rc)
+ printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name);
+
+ spin_lock_irqsave(q->queue_lock, flags);
+ blk_start_queue(q);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+}
diff --git a/windhoek/ide/ide-pnp.c b/windhoek/ide/ide-pnp.c
new file mode 100644
index 00000000..bac9b392
--- /dev/null
+++ b/windhoek/ide/ide-pnp.c
@@ -0,0 +1,107 @@
+/*
+ * This file provides autodetection for ISA PnP IDE interfaces.
+ * It was tested with "ESS ES1868 Plug and Play AudioDrive" IDE interface.
+ *
+ * Copyright (C) 2000 Andrey Panin <pazke@donpac.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/pnp.h>
+#include <linux/ide.h>
+
+#define DRV_NAME "ide-pnp"
+
+/* Add your devices here :)) */
+static struct pnp_device_id idepnp_devices[] = {
+ /* Generic ESDI/IDE/ATA compatible hard disk controller */
+ {.id = "PNP0600", .driver_data = 0},
+ {.id = ""}
+};
+
+static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
+{
+ struct ide_host *host;
+ unsigned long base, ctl;
+ int rc;
+ hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
+
+ printk(KERN_INFO DRV_NAME ": generic PnP IDE interface\n");
+
+ if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
+ return -1;
+
+ base = pnp_port_start(dev, 0);
+ ctl = pnp_port_start(dev, 1);
+
+ if (!request_region(base, 8, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+ DRV_NAME, base, base + 7);
+ return -EBUSY;
+ }
+
+ if (!request_region(ctl, 1, DRV_NAME)) {
+ printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+ DRV_NAME, ctl);
+ release_region(base, 8);
+ return -EBUSY;
+ }
+
+ memset(&hw, 0, sizeof(hw));
+ ide_std_init_ports(&hw, base, ctl);
+ hw.irq = pnp_irq(dev, 0);
+ hw.chipset = ide_generic;
+
+ rc = ide_host_add(NULL, hws, &host);
+ if (rc)
+ goto out;
+
+ pnp_set_drvdata(dev, host);
+
+ return 0;
+out:
+ release_region(ctl, 1);
+ release_region(base, 8);
+
+ return rc;
+}
+
+static void idepnp_remove(struct pnp_dev *dev)
+{
+ struct ide_host *host = pnp_get_drvdata(dev);
+
+ ide_host_remove(host);
+
+ release_region(pnp_port_start(dev, 1), 1);
+ release_region(pnp_port_start(dev, 0), 8);
+}
+
+static struct pnp_driver idepnp_driver = {
+ .name = "ide",
+ .id_table = idepnp_devices,
+ .probe = idepnp_probe,
+ .remove = idepnp_remove,
+};
+
+static int __init pnpide_init(void)
+{
+ return pnp_register_driver(&idepnp_driver);
+}
+
+static void __exit pnpide_exit(void)
+{
+ pnp_unregister_driver(&idepnp_driver);
+}
+
+module_init(pnpide_init);
+module_exit(pnpide_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/ide-probe.c b/windhoek/ide/ide-probe.c
new file mode 100644
index 00000000..8ac2aee8
--- /dev/null
+++ b/windhoek/ide/ide-probe.c
@@ -0,0 +1,1739 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * See linux/MAINTAINERS for address of current maintainer.
+ *
+ * This is the IDE probe module, as evolved from hd.c and ide.c.
+ *
+ * -- increase WAIT_PIDENTIFY to avoid CD-ROM locking at boot
+ * by Andrea Arcangeli
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ide.h>
+#include <linux/spinlock.h>
+#include <linux/kmod.h>
+#include <linux/pci.h>
+#include <linux/scatterlist.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+/**
+ * generic_id - add a generic drive id
+ * @drive: drive to make an ID block for
+ *
+ * Add a fake id field to the drive we are passed. This allows
+ * use to skip a ton of NULL checks (which people always miss)
+ * and make drive properties unconditional outside of this file
+ */
+
+static void generic_id(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ id[ATA_ID_CUR_CYLS] = id[ATA_ID_CYLS] = drive->cyl;
+ id[ATA_ID_CUR_HEADS] = id[ATA_ID_HEADS] = drive->head;
+ id[ATA_ID_CUR_SECTORS] = id[ATA_ID_SECTORS] = drive->sect;
+}
+
+static void ide_disk_init_chs(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+
+ /* Extract geometry if we did not already have one for the drive */
+ if (!drive->cyl || !drive->head || !drive->sect) {
+ drive->cyl = drive->bios_cyl = id[ATA_ID_CYLS];
+ drive->head = drive->bios_head = id[ATA_ID_HEADS];
+ drive->sect = drive->bios_sect = id[ATA_ID_SECTORS];
+ }
+
+ /* Handle logical geometry translation by the drive */
+ if (ata_id_current_chs_valid(id)) {
+ drive->cyl = id[ATA_ID_CUR_CYLS];
+ drive->head = id[ATA_ID_CUR_HEADS];
+ drive->sect = id[ATA_ID_CUR_SECTORS];
+ }
+
+ /* Use physical geometry if what we have still makes no sense */
+ if (drive->head > 16 && id[ATA_ID_HEADS] && id[ATA_ID_HEADS] <= 16) {
+ drive->cyl = id[ATA_ID_CYLS];
+ drive->head = id[ATA_ID_HEADS];
+ drive->sect = id[ATA_ID_SECTORS];
+ }
+}
+
+static void ide_disk_init_mult_count(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ u8 max_multsect = id[ATA_ID_MAX_MULTSECT] & 0xff;
+
+ if (max_multsect) {
+ if ((max_multsect / 2) > 1)
+ id[ATA_ID_MULTSECT] = max_multsect | 0x100;
+ else
+ id[ATA_ID_MULTSECT] &= ~0x1ff;
+
+ drive->mult_req = id[ATA_ID_MULTSECT] & 0xff;
+
+ if (drive->mult_req)
+ drive->special.b.set_multmode = 1;
+ }
+}
+
+static void ide_classify_ata_dev(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ int is_cfa = ata_id_is_cfa(id);
+
+ /* CF devices are *not* removable in Linux definition of the term */
+ if (is_cfa == 0 && (id[ATA_ID_CONFIG] & (1 << 7)))
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+
+ drive->media = ide_disk;
+
+ if (!ata_id_has_unload(drive->id))
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+
+ printk(KERN_INFO "%s: %s, %s DISK drive\n", drive->name, m,
+ is_cfa ? "CFA" : "ATA");
+}
+
+static void ide_classify_atapi_dev(ide_drive_t *drive)
+{
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ u8 type = (id[ATA_ID_CONFIG] >> 8) & 0x1f;
+
+ printk(KERN_INFO "%s: %s, ATAPI ", drive->name, m);
+ switch (type) {
+ case ide_floppy:
+ if (!strstr(m, "CD-ROM")) {
+ if (!strstr(m, "oppy") &&
+ !strstr(m, "poyp") &&
+ !strstr(m, "ZIP"))
+ printk(KERN_CONT "cdrom or floppy?, assuming ");
+ if (drive->media != ide_cdrom) {
+ printk(KERN_CONT "FLOPPY");
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+ break;
+ }
+ }
+ /* Early cdrom models used zero */
+ type = ide_cdrom;
+ case ide_cdrom:
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+#ifdef CONFIG_PPC
+ /* kludge for Apple PowerBook internal zip */
+ if (!strstr(m, "CD-ROM") && strstr(m, "ZIP")) {
+ printk(KERN_CONT "FLOPPY");
+ type = ide_floppy;
+ break;
+ }
+#endif
+ printk(KERN_CONT "CD/DVD-ROM");
+ break;
+ case ide_tape:
+ printk(KERN_CONT "TAPE");
+ break;
+ case ide_optical:
+ printk(KERN_CONT "OPTICAL");
+ drive->dev_flags |= IDE_DFLAG_REMOVABLE;
+ break;
+ default:
+ printk(KERN_CONT "UNKNOWN (type %d)", type);
+ break;
+ }
+
+ printk(KERN_CONT " drive\n");
+ drive->media = type;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ if (ata_id_cdb_intr(id))
+ drive->atapi_flags |= IDE_AFLAG_DRQ_INTERRUPT;
+ drive->dev_flags |= IDE_DFLAG_DOORLOCKING;
+ /* we don't do head unloading on ATAPI devices */
+ drive->dev_flags |= IDE_DFLAG_NO_UNLOAD;
+}
+
+/**
+ * do_identify - identify a drive
+ * @drive: drive to identify
+ * @cmd: command used
+ *
+ * Called when we have issued a drive identify command to
+ * read and parse the results. This function is run with
+ * interrupts disabled.
+ */
+
+static void do_identify(ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u16 *id = drive->id;
+ char *m = (char *)&id[ATA_ID_PROD];
+ unsigned long flags;
+ int bswap = 1;
+
+ /* local CPU only; some systems need this */
+ local_irq_save(flags);
+ /* read 512 bytes of id info */
+ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
+ local_irq_restore(flags);
+
+ drive->dev_flags |= IDE_DFLAG_ID_READ;
+#ifdef DEBUG
+ printk(KERN_INFO "%s: dumping identify data\n", drive->name);
+ ide_dump_identify((u8 *)id);
+#endif
+ ide_fix_driveid(id);
+
+ /*
+ * ATA_CMD_ID_ATA returns little-endian info,
+ * ATA_CMD_ID_ATAPI *usually* returns little-endian info.
+ */
+ if (cmd == ATA_CMD_ID_ATAPI) {
+ if ((m[0] == 'N' && m[1] == 'E') || /* NEC */
+ (m[0] == 'F' && m[1] == 'X') || /* Mitsumi */
+ (m[0] == 'P' && m[1] == 'i')) /* Pioneer */
+ /* Vertos drives may still be weird */
+ bswap ^= 1;
+ }
+
+ ide_fixstring(m, ATA_ID_PROD_LEN, bswap);
+ ide_fixstring((char *)&id[ATA_ID_FW_REV], ATA_ID_FW_REV_LEN, bswap);
+ ide_fixstring((char *)&id[ATA_ID_SERNO], ATA_ID_SERNO_LEN, bswap);
+
+ /* we depend on this a lot! */
+ m[ATA_ID_PROD_LEN - 1] = '\0';
+
+ if (strstr(m, "E X A B Y T E N E S T"))
+ goto err_misc;
+
+ drive->dev_flags |= IDE_DFLAG_PRESENT;
+ drive->dev_flags &= ~IDE_DFLAG_DEAD;
+
+ /*
+ * Check for an ATAPI device
+ */
+ if (cmd == ATA_CMD_ID_ATAPI)
+ ide_classify_atapi_dev(drive);
+ else
+ /*
+ * Not an ATAPI device: looks like a "regular" hard disk
+ */
+ ide_classify_ata_dev(drive);
+ return;
+err_misc:
+ kfree(id);
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+}
+
+/**
+ * actual_try_to_identify - send ata/atapi identify
+ * @drive: drive to identify
+ * @cmd: command to use
+ *
+ * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive
+ * and waits for a response. It also monitors irqs while this is
+ * happening, in hope of automatically determining which one is
+ * being used by the interface.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ */
+
+static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int use_altstatus = 0, rc;
+ unsigned long timeout;
+ u8 s = 0, a = 0;
+
+ /* take a deep breath */
+ msleep(50);
+
+ if (io_ports->ctl_addr &&
+ (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) {
+ a = tp_ops->read_altstatus(hwif);
+ s = tp_ops->read_status(hwif);
+ if ((a ^ s) & ~ATA_IDX)
+ /* ancient Seagate drives, broken interfaces */
+ printk(KERN_INFO "%s: probing with STATUS(0x%02x) "
+ "instead of ALTSTATUS(0x%02x)\n",
+ drive->name, s, a);
+ else
+ /* use non-intrusive polling */
+ use_altstatus = 1;
+ }
+
+ /* set features register for atapi
+ * identify command to be sure of reply
+ */
+ if (cmd == ATA_CMD_ID_ATAPI) {
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ /* disable DMA & overlap */
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE;
+
+ tp_ops->tf_load(drive, &task);
+ }
+
+ /* ask drive for ID */
+ tp_ops->exec_command(hwif, cmd);
+
+ timeout = ((cmd == ATA_CMD_ID_ATA) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
+
+ if (ide_busy_sleep(hwif, timeout, use_altstatus))
+ return 1;
+
+ /* wait for IRQ and ATA_DRQ */
+ msleep(50);
+ s = tp_ops->read_status(hwif);
+
+ if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) {
+ /* drive returned ID */
+ do_identify(drive, cmd);
+ /* drive responded with ID */
+ rc = 0;
+ /* clear drive IRQ */
+ (void)tp_ops->read_status(hwif);
+ } else {
+ /* drive refused ID */
+ rc = 2;
+ }
+ return rc;
+}
+
+/**
+ * try_to_identify - try to identify a drive
+ * @drive: drive to probe
+ * @cmd: command to use
+ *
+ * Issue the identify command and then do IRQ probing to
+ * complete the identification when needed by finding the
+ * IRQ the drive is attached to
+ */
+
+static int try_to_identify (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int retval;
+ int autoprobe = 0;
+ unsigned long cookie = 0;
+
+ /*
+ * Disable device irq unless we need to
+ * probe for it. Otherwise we'll get spurious
+ * interrupts during the identify-phase that
+ * the irq handler isn't expecting.
+ */
+ if (hwif->io_ports.ctl_addr) {
+ if (!hwif->irq) {
+ autoprobe = 1;
+ cookie = probe_irq_on();
+ }
+ tp_ops->set_irq(hwif, autoprobe);
+ }
+
+ retval = actual_try_to_identify(drive, cmd);
+
+ if (autoprobe) {
+ int irq;
+
+ tp_ops->set_irq(hwif, 0);
+ /* clear drive IRQ */
+ (void)tp_ops->read_status(hwif);
+ udelay(5);
+ irq = probe_irq_off(cookie);
+ if (!hwif->irq) {
+ if (irq > 0) {
+ hwif->irq = irq;
+ } else {
+ /* Mmmm.. multiple IRQs..
+ * don't know which was ours
+ */
+ printk(KERN_ERR "%s: IRQ probe failed (0x%lx)\n",
+ drive->name, cookie);
+ }
+ }
+ }
+ return retval;
+}
+
+int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus)
+{
+ u8 stat;
+
+ timeout += jiffies;
+
+ do {
+ msleep(50); /* give drive a breather */
+ stat = altstatus ? hwif->tp_ops->read_altstatus(hwif)
+ : hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return 1; /* drive timed-out */
+}
+
+static u8 ide_read_device(ide_drive_t *drive)
+{
+ ide_task_t task;
+
+ memset(&task, 0, sizeof(task));
+ task.tf_flags = IDE_TFLAG_IN_DEVICE;
+
+ drive->hwif->tp_ops->tf_read(drive, &task);
+
+ return task.tf.device;
+}
+
+/**
+ * do_probe - probe an IDE device
+ * @drive: drive to probe
+ * @cmd: command to use
+ *
+ * do_probe() has the difficult job of finding a drive if it exists,
+ * without getting hung up if it doesn't exist, without trampling on
+ * ethernet cards, and without leaving any IRQs dangling to haunt us later.
+ *
+ * If a drive is "known" to exist (from CMOS or kernel parameters),
+ * but does not respond right away, the probe will "hang in there"
+ * for the maximum wait time (about 30 seconds), otherwise it will
+ * exit much more quickly.
+ *
+ * Returns: 0 device was identified
+ * 1 device timed-out (no response to identify request)
+ * 2 device aborted the command (refused to identify itself)
+ * 3 bad status from device (possible for ATAPI drives)
+ * 4 probe was not attempted because failure was obvious
+ */
+
+static int do_probe (ide_drive_t *drive, u8 cmd)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ int rc;
+ u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat;
+
+ /* avoid waiting for inappropriate probes */
+ if (present && drive->media != ide_disk && cmd == ATA_CMD_ID_ATA)
+ return 4;
+
+#ifdef DEBUG
+ printk(KERN_INFO "probing for %s: present=%d, media=%d, probetype=%s\n",
+ drive->name, present, drive->media,
+ (cmd == ATA_CMD_ID_ATA) ? "ATA" : "ATAPI");
+#endif
+
+ /* needed for some systems
+ * (e.g. crw9624 as drive0 with disk as slave)
+ */
+ msleep(50);
+ SELECT_DRIVE(drive);
+ msleep(50);
+
+ if (ide_read_device(drive) != drive->select && present == 0) {
+ if (drive->dn & 1) {
+ /* exit with drive0 selected */
+ SELECT_DRIVE(hwif->devices[0]);
+ /* allow ATA_BUSY to assert & clear */
+ msleep(50);
+ }
+ /* no i/f present: mmm.. this should be a 4 -ml */
+ return 3;
+ }
+
+ stat = tp_ops->read_status(hwif);
+
+ if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) ||
+ present || cmd == ATA_CMD_ID_ATAPI) {
+ /* send cmd and wait */
+ if ((rc = try_to_identify(drive, cmd))) {
+ /* failed: try again */
+ rc = try_to_identify(drive,cmd);
+ }
+
+ stat = tp_ops->read_status(hwif);
+
+ if (stat == (ATA_BUSY | ATA_DRDY))
+ return 4;
+
+ if (rc == 1 && cmd == ATA_CMD_ID_ATAPI) {
+ printk(KERN_ERR "%s: no response (status = 0x%02x), "
+ "resetting drive\n", drive->name, stat);
+ msleep(50);
+ SELECT_DRIVE(drive);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET);
+ (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0);
+ rc = try_to_identify(drive, cmd);
+ }
+
+ /* ensure drive IRQ is clear */
+ stat = tp_ops->read_status(hwif);
+
+ if (rc == 1)
+ printk(KERN_ERR "%s: no response (status = 0x%02x)\n",
+ drive->name, stat);
+ } else {
+ /* not present or maybe ATAPI */
+ rc = 3;
+ }
+ if (drive->dn & 1) {
+ /* exit with drive0 selected */
+ SELECT_DRIVE(hwif->devices[0]);
+ msleep(50);
+ /* ensure drive irq is clear */
+ (void)tp_ops->read_status(hwif);
+ }
+ return rc;
+}
+
+/*
+ *
+ */
+static void enable_nest (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ u8 stat;
+
+ printk(KERN_INFO "%s: enabling %s -- ",
+ hwif->name, (char *)&drive->id[ATA_ID_PROD]);
+
+ SELECT_DRIVE(drive);
+ msleep(50);
+ tp_ops->exec_command(hwif, ATA_EXABYTE_ENABLE_NEST);
+
+ if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 0)) {
+ printk(KERN_CONT "failed (timeout)\n");
+ return;
+ }
+
+ msleep(50);
+
+ stat = tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, 0, BAD_STAT))
+ printk(KERN_CONT "failed (status = 0x%02x)\n", stat);
+ else
+ printk(KERN_CONT "success\n");
+}
+
+/**
+ * probe_for_drives - upper level drive probe
+ * @drive: drive to probe for
+ *
+ * probe_for_drive() tests for existence of a given drive using do_probe()
+ * and presents things to the user as needed.
+ *
+ * Returns: 0 no device was found
+ * 1 device was found
+ * (note: IDE_DFLAG_PRESENT might still be not set)
+ */
+
+static u8 probe_for_drive(ide_drive_t *drive)
+{
+ char *m;
+
+ /*
+ * In order to keep things simple we have an id
+ * block for all drives at all times. If the device
+ * is pre ATA or refuses ATA/ATAPI identify we
+ * will add faked data to this.
+ *
+ * Also note that 0 everywhere means "can't do X"
+ */
+
+ drive->dev_flags &= ~IDE_DFLAG_ID_READ;
+
+ drive->id = kzalloc(SECTOR_SIZE, GFP_KERNEL);
+ if (drive->id == NULL) {
+ printk(KERN_ERR "ide: out of memory for id data.\n");
+ return 0;
+ }
+
+ m = (char *)&drive->id[ATA_ID_PROD];
+ strcpy(m, "UNKNOWN");
+
+ /* skip probing? */
+ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) {
+retry:
+ /* if !(success||timed-out) */
+ if (do_probe(drive, ATA_CMD_ID_ATA) >= 2)
+ /* look for ATAPI device */
+ (void)do_probe(drive, ATA_CMD_ID_ATAPI);
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ /* drive not found */
+ return 0;
+
+ if (strstr(m, "E X A B Y T E N E S T")) {
+ enable_nest(drive);
+ goto retry;
+ }
+
+ /* identification failed? */
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
+ if (drive->media == ide_disk) {
+ printk(KERN_INFO "%s: non-IDE drive, CHS=%d/%d/%d\n",
+ drive->name, drive->cyl,
+ drive->head, drive->sect);
+ } else if (drive->media == ide_cdrom) {
+ printk(KERN_INFO "%s: ATAPI cdrom (?)\n", drive->name);
+ } else {
+ /* nuke it */
+ printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name);
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ }
+ }
+ /* drive was found */
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return 0;
+
+ /* The drive wasn't being helpful. Add generic info only */
+ if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) {
+ generic_id(drive);
+ return 1;
+ }
+
+ if (drive->media == ide_disk) {
+ ide_disk_init_chs(drive);
+ ide_disk_init_mult_count(drive);
+ }
+
+ return !!(drive->dev_flags & IDE_DFLAG_PRESENT);
+}
+
+static void hwif_release_dev(struct device *dev)
+{
+ ide_hwif_t *hwif = container_of(dev, ide_hwif_t, gendev);
+
+ complete(&hwif->gendev_rel_comp);
+}
+
+static int ide_register_port(ide_hwif_t *hwif)
+{
+ int ret;
+
+ /* register with global device tree */
+ dev_set_name(&hwif->gendev, hwif->name);
+ hwif->gendev.driver_data = hwif;
+ if (hwif->gendev.parent == NULL)
+ hwif->gendev.parent = hwif->dev;
+ hwif->gendev.release = hwif_release_dev;
+
+ ret = device_register(&hwif->gendev);
+ if (ret < 0) {
+ printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ hwif->portdev = device_create(ide_port_class, &hwif->gendev,
+ MKDEV(0, 0), hwif, hwif->name);
+ if (IS_ERR(hwif->portdev)) {
+ ret = PTR_ERR(hwif->portdev);
+ device_unregister(&hwif->gendev);
+ }
+out:
+ return ret;
+}
+
+/**
+ * ide_port_wait_ready - wait for port to become ready
+ * @hwif: IDE port
+ *
+ * This is needed on some PPCs and a bunch of BIOS-less embedded
+ * platforms. Typical cases are:
+ *
+ * - The firmware hard reset the disk before booting the kernel,
+ * the drive is still doing it's poweron-reset sequence, that
+ * can take up to 30 seconds.
+ *
+ * - The firmware does nothing (or no firmware), the device is
+ * still in POST state (same as above actually).
+ *
+ * - Some CD/DVD/Writer combo drives tend to drive the bus during
+ * their reset sequence even when they are non-selected slave
+ * devices, thus preventing discovery of the main HD.
+ *
+ * Doing this wait-for-non-busy should not harm any existing
+ * configuration and fix some issues like the above.
+ *
+ * BenH.
+ *
+ * Returns 0 on success, error code (< 0) otherwise.
+ */
+
+static int ide_port_wait_ready(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i, rc;
+
+ printk(KERN_DEBUG "Probing IDE interface %s...\n", hwif->name);
+
+ /* Let HW settle down a bit from whatever init state we
+ * come from */
+ mdelay(2);
+
+ /* Wait for BSY bit to go away, spec timeout is 30 seconds,
+ * I know of at least one disk who takes 31 seconds, I use 35
+ * here to be safe
+ */
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ return rc;
+
+ /* Now make sure both master & slave are ready */
+ ide_port_for_each_dev(i, drive, hwif) {
+ /* Ignore disks that we will not probe for later. */
+ if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0 ||
+ (drive->dev_flags & IDE_DFLAG_PRESENT)) {
+ SELECT_DRIVE(drive);
+ hwif->tp_ops->set_irq(hwif, 1);
+ mdelay(2);
+ rc = ide_wait_not_busy(hwif, 35000);
+ if (rc)
+ goto out;
+ } else
+ printk(KERN_DEBUG "%s: ide_wait_not_busy() skipped\n",
+ drive->name);
+ }
+out:
+ /* Exit function with master reselected (let's be sane) */
+ if (i)
+ SELECT_DRIVE(hwif->devices[0]);
+
+ return rc;
+}
+
+/**
+ * ide_undecoded_slave - look for bad CF adapters
+ * @dev1: slave device
+ *
+ * Analyse the drives on the interface and attempt to decide if we
+ * have the same drive viewed twice. This occurs with crap CF adapters
+ * and PCMCIA sometimes.
+ */
+
+void ide_undecoded_slave(ide_drive_t *dev1)
+{
+ ide_drive_t *dev0 = dev1->hwif->devices[0];
+
+ if ((dev1->dn & 1) == 0 || (dev0->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return;
+
+ /* If the models don't match they are not the same product */
+ if (strcmp((char *)&dev0->id[ATA_ID_PROD],
+ (char *)&dev1->id[ATA_ID_PROD]))
+ return;
+
+ /* Serial numbers do not match */
+ if (strncmp((char *)&dev0->id[ATA_ID_SERNO],
+ (char *)&dev1->id[ATA_ID_SERNO], ATA_ID_SERNO_LEN))
+ return;
+
+ /* No serial number, thankfully very rare for CF */
+ if (*(char *)&dev0->id[ATA_ID_SERNO] == 0)
+ return;
+
+ /* Appears to be an IDE flash adapter with decode bugs */
+ printk(KERN_WARNING "ide-probe: ignoring undecoded slave\n");
+
+ dev1->dev_flags &= ~IDE_DFLAG_PRESENT;
+}
+
+EXPORT_SYMBOL_GPL(ide_undecoded_slave);
+
+static int ide_probe_port(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ unsigned long flags;
+ unsigned int irqd;
+ int i, rc = -ENODEV;
+
+ BUG_ON(hwif->present);
+
+ if ((hwif->devices[0]->dev_flags & IDE_DFLAG_NOPROBE) &&
+ (hwif->devices[1]->dev_flags & IDE_DFLAG_NOPROBE))
+ return -EACCES;
+
+ /*
+ * We must always disable IRQ, as probe_for_drive will assert IRQ, but
+ * we'll install our IRQ driver much later...
+ */
+ irqd = hwif->irq;
+ if (irqd)
+ disable_irq(hwif->irq);
+
+ local_save_flags(flags);
+ local_irq_enable_in_hardirq();
+
+ if (ide_port_wait_ready(hwif) == -EBUSY)
+ printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
+
+ /*
+ * Second drive should only exist if first drive was found,
+ * but a lot of cdrom drives are configured as single slaves.
+ */
+ ide_port_for_each_dev(i, drive, hwif) {
+ (void) probe_for_drive(drive);
+ if (drive->dev_flags & IDE_DFLAG_PRESENT)
+ rc = 0;
+ }
+
+ local_irq_restore(flags);
+
+ /*
+ * Use cached IRQ number. It might be (and is...) changed by probe
+ * code above
+ */
+ if (irqd)
+ enable_irq(irqd);
+
+ return rc;
+}
+
+static void ide_port_tune_devices(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ if (port_ops && port_ops->quirkproc)
+ port_ops->quirkproc(drive);
+ }
+ }
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ ide_set_max_pio(drive);
+
+ drive->dev_flags |= IDE_DFLAG_NICE1;
+
+ if (hwif->dma_ops)
+ ide_set_dma(drive);
+ }
+ }
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT)
+ drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT;
+ }
+}
+
+/*
+ * init request queue
+ */
+static int ide_init_queue(ide_drive_t *drive)
+{
+ struct request_queue *q;
+ ide_hwif_t *hwif = drive->hwif;
+ int max_sectors = 256;
+ int max_sg_entries = PRD_ENTRIES;
+
+ /*
+ * Our default set up assumes the normal IDE case,
+ * that is 64K segmenting, standard PRD setup
+ * and LBA28. Some drivers then impose their own
+ * limits and LBA48 we could raise it but as yet
+ * do not.
+ */
+
+ q = blk_init_queue_node(do_ide_request, NULL, hwif_to_node(hwif));
+ if (!q)
+ return 1;
+
+ q->queuedata = drive;
+ blk_queue_segment_boundary(q, 0xffff);
+
+ if (hwif->rqsize < max_sectors)
+ max_sectors = hwif->rqsize;
+ blk_queue_max_sectors(q, max_sectors);
+
+#ifdef CONFIG_PCI
+ /* When we have an IOMMU, we may have a problem where pci_map_sg()
+ * creates segments that don't completely match our boundary
+ * requirements and thus need to be broken up again. Because it
+ * doesn't align properly either, we may actually have to break up
+ * to more segments than what was we got in the first place, a max
+ * worst case is twice as many.
+ * This will be fixed once we teach pci_map_sg() about our boundary
+ * requirements, hopefully soon. *FIXME*
+ */
+ if (!PCI_DMA_BUS_IS_PHYS)
+ max_sg_entries >>= 1;
+#endif /* CONFIG_PCI */
+
+ blk_queue_max_hw_segments(q, max_sg_entries);
+ blk_queue_max_phys_segments(q, max_sg_entries);
+
+ /* assign drive queue */
+ drive->queue = q;
+
+ /* needs drive->queue to be set */
+ ide_toggle_bounce(drive, 1);
+
+ return 0;
+}
+
+static DEFINE_MUTEX(ide_cfg_mtx);
+
+/*
+ * For any present drive:
+ * - allocate the block device queue
+ */
+static int ide_port_setup_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i, j = 0;
+
+ mutex_lock(&ide_cfg_mtx);
+ ide_port_for_each_dev(i, drive, hwif) {
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ continue;
+
+ if (ide_init_queue(drive)) {
+ printk(KERN_ERR "ide: failed to init %s\n",
+ drive->name);
+ kfree(drive->id);
+ drive->id = NULL;
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ continue;
+ }
+
+ j++;
+ }
+ mutex_unlock(&ide_cfg_mtx);
+
+ return j;
+}
+
+/*
+ * This routine sets up the IRQ for an IDE interface.
+ */
+static int init_irq (ide_hwif_t *hwif)
+{
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+ irq_handler_t irq_handler;
+ int sa = 0;
+
+ mutex_lock(&ide_cfg_mtx);
+ spin_lock_init(&hwif->lock);
+
+ init_timer(&hwif->timer);
+ hwif->timer.function = &ide_timer_expiry;
+ hwif->timer.data = (unsigned long)hwif;
+
+ irq_handler = hwif->host->irq_handler;
+ if (irq_handler == NULL)
+ irq_handler = ide_intr;
+
+#if defined(__mc68000__)
+ sa = IRQF_SHARED;
+#endif /* __mc68000__ */
+
+ if (hwif->chipset == ide_pci)
+ sa = IRQF_SHARED;
+
+ if (io_ports->ctl_addr)
+ hwif->tp_ops->set_irq(hwif, 1);
+
+ if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+ goto out_up;
+
+ if (!hwif->rqsize) {
+ if ((hwif->host_flags & IDE_HFLAG_NO_LBA48) ||
+ (hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA))
+ hwif->rqsize = 256;
+ else
+ hwif->rqsize = 65536;
+ }
+
+#if !defined(__mc68000__)
+ printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
+ io_ports->data_addr, io_ports->status_addr,
+ io_ports->ctl_addr, hwif->irq);
+#else
+ printk(KERN_INFO "%s at 0x%08lx on irq %d", hwif->name,
+ io_ports->data_addr, hwif->irq);
+#endif /* __mc68000__ */
+ if (hwif->host->host_flags & IDE_HFLAG_SERIALIZE)
+ printk(KERN_CONT " (serialized)");
+ printk(KERN_CONT "\n");
+
+ mutex_unlock(&ide_cfg_mtx);
+ return 0;
+out_up:
+ mutex_unlock(&ide_cfg_mtx);
+ return 1;
+}
+
+static int ata_lock(dev_t dev, void *data)
+{
+ /* FIXME: we want to pin hwif down */
+ return 0;
+}
+
+static struct kobject *ata_probe(dev_t dev, int *part, void *data)
+{
+ ide_hwif_t *hwif = data;
+ int unit = *part >> PARTN_BITS;
+ ide_drive_t *drive = hwif->devices[unit];
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ return NULL;
+
+ if (drive->media == ide_disk)
+ request_module("ide-disk");
+ if (drive->media == ide_cdrom || drive->media == ide_optical)
+ request_module("ide-cd");
+ if (drive->media == ide_tape)
+ request_module("ide-tape");
+ if (drive->media == ide_floppy)
+ request_module("ide-floppy");
+
+ return NULL;
+}
+
+static struct kobject *exact_match(dev_t dev, int *part, void *data)
+{
+ struct gendisk *p = data;
+ *part &= (1 << PARTN_BITS) - 1;
+ return &disk_to_dev(p)->kobj;
+}
+
+static int exact_lock(dev_t dev, void *data)
+{
+ struct gendisk *p = data;
+
+ if (!get_disk(p))
+ return -1;
+ return 0;
+}
+
+void ide_register_region(struct gendisk *disk)
+{
+ blk_register_region(MKDEV(disk->major, disk->first_minor),
+ disk->minors, NULL, exact_match, exact_lock, disk);
+}
+
+EXPORT_SYMBOL_GPL(ide_register_region);
+
+void ide_unregister_region(struct gendisk *disk)
+{
+ blk_unregister_region(MKDEV(disk->major, disk->first_minor),
+ disk->minors);
+}
+
+EXPORT_SYMBOL_GPL(ide_unregister_region);
+
+void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ unsigned int unit = drive->dn & 1;
+
+ disk->major = hwif->major;
+ disk->first_minor = unit << PARTN_BITS;
+ sprintf(disk->disk_name, "hd%c", 'a' + hwif->index * MAX_DRIVES + unit);
+ DEBUG_MSG("disk has a name: %s", disk->disk_name);
+ disk->queue = drive->queue;
+}
+
+EXPORT_SYMBOL_GPL(ide_init_disk);
+
+static void drive_release_dev (struct device *dev)
+{
+ ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
+ ide_hwif_t *hwif = drive->hwif;
+
+ ide_proc_unregister_device(drive);
+
+ spin_lock_irq(&hwif->lock);
+ kfree(drive->id);
+ drive->id = NULL;
+ drive->dev_flags &= ~IDE_DFLAG_PRESENT;
+ /* Messed up locking ... */
+ spin_unlock_irq(&hwif->lock);
+ blk_cleanup_queue(drive->queue);
+ spin_lock_irq(&hwif->lock);
+ drive->queue = NULL;
+ spin_unlock_irq(&hwif->lock);
+
+ complete(&drive->gendev_rel_comp);
+}
+
+static int hwif_init(ide_hwif_t *hwif)
+{
+ int old_irq;
+
+ if (!hwif->irq) {
+ hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
+ printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name);
+ return 0;
+ }
+ }
+
+ if (register_blkdev(hwif->major, hwif->name))
+ return 0;
+
+ if (!hwif->sg_max_nents)
+ hwif->sg_max_nents = PRD_ENTRIES;
+
+ hwif->sg_table = kmalloc(sizeof(struct scatterlist)*hwif->sg_max_nents,
+ GFP_KERNEL);
+ if (!hwif->sg_table) {
+ printk(KERN_ERR "%s: unable to allocate SG table.\n", hwif->name);
+ goto out;
+ }
+
+ sg_init_table(hwif->sg_table, hwif->sg_max_nents);
+
+ if (init_irq(hwif) == 0)
+ goto done;
+
+ old_irq = hwif->irq;
+ /*
+ * It failed to initialise. Find the default IRQ for
+ * this port and try that.
+ */
+ hwif->irq = __ide_default_irq(hwif->io_ports.data_addr);
+ if (!hwif->irq) {
+ printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n",
+ hwif->name, old_irq);
+ goto out;
+ }
+ if (init_irq(hwif)) {
+ printk(KERN_ERR "%s: probed IRQ %d and default IRQ %d failed\n",
+ hwif->name, old_irq, hwif->irq);
+ goto out;
+ }
+ printk(KERN_WARNING "%s: probed IRQ %d failed, using default\n",
+ hwif->name, hwif->irq);
+
+done:
+ blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
+ THIS_MODULE, ata_probe, ata_lock, hwif);
+ return 1;
+
+out:
+ unregister_blkdev(hwif->major, hwif->name);
+ return 0;
+}
+
+static void hwif_register_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ unsigned int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ struct device *dev = &drive->gendev;
+ int ret;
+
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
+ continue;
+
+ dev_set_name(dev, "%u.%u", hwif->index, i);
+ dev->parent = &hwif->gendev;
+ dev->bus = &ide_bus_type;
+ dev->driver_data = drive;
+ dev->release = drive_release_dev;
+
+ ret = device_register(dev);
+ if (ret < 0)
+ printk(KERN_WARNING "IDE: %s: device_register error: "
+ "%d\n", __func__, ret);
+ }
+}
+
+static void ide_port_init_devices(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ drive->dn = i + hwif->channel * 2;
+
+ if (hwif->host_flags & IDE_HFLAG_IO_32BIT)
+ drive->io_32bit = 1;
+ if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS)
+ drive->dev_flags |= IDE_DFLAG_UNMASK;
+ if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
+ drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
+
+ if (port_ops && port_ops->init_dev)
+ port_ops->init_dev(drive);
+ }
+}
+
+static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
+ const struct ide_port_info *d)
+{
+ hwif->channel = port;
+
+ if (d->chipset)
+ hwif->chipset = d->chipset;
+
+ if (d->init_iops)
+ d->init_iops(hwif);
+
+ if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
+ (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
+ hwif->irq = port ? 15 : 14;
+
+ /* ->host_flags may be set by ->init_iops (or even earlier...) */
+ hwif->host_flags |= d->host_flags;
+ hwif->pio_mask = d->pio_mask;
+
+ if (d->tp_ops)
+ hwif->tp_ops = d->tp_ops;
+
+ /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
+ if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
+ hwif->port_ops = d->port_ops;
+
+ hwif->swdma_mask = d->swdma_mask;
+ hwif->mwdma_mask = d->mwdma_mask;
+ hwif->ultra_mask = d->udma_mask;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ int rc;
+
+ hwif->dma_ops = d->dma_ops;
+
+ if (d->init_dma)
+ rc = d->init_dma(hwif, d);
+ else
+ rc = ide_hwif_setup_dma(hwif, d);
+
+ if (rc < 0) {
+ printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+
+ hwif->dma_ops = NULL;
+ hwif->dma_base = 0;
+ hwif->swdma_mask = 0;
+ hwif->mwdma_mask = 0;
+ hwif->ultra_mask = 0;
+ }
+ }
+
+ if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
+ ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base))
+ hwif->host->host_flags |= IDE_HFLAG_SERIALIZE;
+
+ if (d->max_sectors)
+ hwif->rqsize = d->max_sectors;
+
+ /* call chipset specific routine for each enabled port */
+ if (d->init_hwif)
+ d->init_hwif(hwif);
+}
+
+static void ide_port_cable_detect(ide_hwif_t *hwif)
+{
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
+ if (hwif->cbl != ATA_CBL_PATA40_SHORT)
+ hwif->cbl = port_ops->cable_detect(hwif);
+ }
+}
+
+static const u8 ide_hwif_to_major[] =
+ { IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR, IDE4_MAJOR,
+ IDE5_MAJOR, IDE6_MAJOR, IDE7_MAJOR, IDE8_MAJOR, IDE9_MAJOR };
+
+static void ide_port_init_devices_data(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ u8 j = (hwif->index * MAX_DRIVES) + i;
+
+ memset(drive, 0, sizeof(*drive));
+
+ drive->media = ide_disk;
+ drive->select = (i << 4) | ATA_DEVICE_OBS;
+ drive->hwif = hwif;
+ drive->ready_stat = ATA_DRDY;
+ drive->bad_wstat = BAD_W_STAT;
+ drive->special.b.recalibrate = 1;
+ drive->special.b.set_geometry = 1;
+ drive->name[0] = 'h';
+ drive->name[1] = 'd';
+ drive->name[2] = 'a' + j;
+ drive->max_failures = IDE_DEFAULT_MAX_FAILURES;
+
+ INIT_LIST_HEAD(&drive->list);
+ init_completion(&drive->gendev_rel_comp);
+ }
+}
+
+static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index)
+{
+ /* fill in any non-zero initial values */
+ hwif->index = index;
+ hwif->major = ide_hwif_to_major[index];
+
+ hwif->name[0] = 'i';
+ hwif->name[1] = 'd';
+ hwif->name[2] = 'e';
+ hwif->name[3] = '0' + index;
+
+ init_completion(&hwif->gendev_rel_comp);
+
+ hwif->tp_ops = &default_tp_ops;
+
+ ide_port_init_devices_data(hwif);
+}
+
+static void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
+{
+ memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
+ hwif->irq = hw->irq;
+ hwif->chipset = hw->chipset;
+ hwif->dev = hw->dev;
+ hwif->gendev.parent = hw->parent ? hw->parent : hw->dev;
+ hwif->ack_intr = hw->ack_intr;
+ hwif->config_data = hw->config;
+}
+
+static unsigned int ide_indexes;
+
+/**
+ * ide_find_port_slot - find free port slot
+ * @d: IDE port info
+ *
+ * Return the new port slot index or -ENOENT if we are out of free slots.
+ */
+
+static int ide_find_port_slot(const struct ide_port_info *d)
+{
+ int idx = -ENOENT;
+ u8 bootable = (d && (d->host_flags & IDE_HFLAG_NON_BOOTABLE)) ? 0 : 1;
+ u8 i = (d && (d->host_flags & IDE_HFLAG_QD_2ND_PORT)) ? 1 : 0;;
+
+ /*
+ * Claim an unassigned slot.
+ *
+ * Give preference to claiming other slots before claiming ide0/ide1,
+ * just in case there's another interface yet-to-be-scanned
+ * which uses ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ *
+ * Unless there is a bootable card that does not use the standard
+ * ports 0x1f0/0x170 (the ide0/ide1 defaults).
+ */
+ mutex_lock(&ide_cfg_mtx);
+ if (bootable) {
+ if ((ide_indexes | i) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | i);
+ } else {
+ if ((ide_indexes | 3) != (1 << MAX_HWIFS) - 1)
+ idx = ffz(ide_indexes | 3);
+ else if ((ide_indexes & 3) != 3)
+ idx = ffz(ide_indexes);
+ }
+ if (idx >= 0)
+ ide_indexes |= (1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+
+ return idx;
+}
+
+static void ide_free_port_slot(int idx)
+{
+ mutex_lock(&ide_cfg_mtx);
+ ide_indexes &= ~(1 << idx);
+ mutex_unlock(&ide_cfg_mtx);
+}
+
+static void ide_port_free_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif)
+ kfree(drive);
+}
+
+static int ide_port_alloc_devices(ide_hwif_t *hwif, int node)
+{
+ int i;
+
+ for (i = 0; i < MAX_DRIVES; i++) {
+ ide_drive_t *drive;
+
+ drive = kzalloc_node(sizeof(*drive), GFP_KERNEL, node);
+ if (drive == NULL)
+ goto out_nomem;
+
+ hwif->devices[i] = drive;
+ }
+ return 0;
+
+out_nomem:
+ ide_port_free_devices(hwif);
+ return -ENOMEM;
+}
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *d, hw_regs_t **hws)
+{
+ struct ide_host *host;
+ struct device *dev = hws[0] ? hws[0]->dev : NULL;
+ int node = dev ? dev_to_node(dev) : -1;
+ int i;
+
+ host = kzalloc_node(sizeof(*host), GFP_KERNEL, node);
+ if (host == NULL)
+ return NULL;
+
+ for (i = 0; i < MAX_HOST_PORTS; i++) {
+ ide_hwif_t *hwif;
+ int idx;
+
+ if (hws[i] == NULL)
+ continue;
+
+ hwif = kzalloc_node(sizeof(*hwif), GFP_KERNEL, node);
+ if (hwif == NULL)
+ continue;
+
+ if (ide_port_alloc_devices(hwif, node) < 0) {
+ kfree(hwif);
+ continue;
+ }
+
+ idx = ide_find_port_slot(d);
+ if (idx < 0) {
+ printk(KERN_ERR "%s: no free slot for interface\n",
+ d ? d->name : "ide");
+ kfree(hwif);
+ continue;
+ }
+
+ ide_init_port_data(hwif, idx);
+
+ hwif->host = host;
+
+ host->ports[i] = hwif;
+ host->n_ports++;
+ }
+
+ if (host->n_ports == 0) {
+ kfree(host);
+ return NULL;
+ }
+
+ host->dev[0] = dev;
+
+ if (d) {
+ host->init_chipset = d->init_chipset;
+ host->host_flags = d->host_flags;
+ }
+
+ return host;
+}
+EXPORT_SYMBOL_GPL(ide_host_alloc);
+
+static void ide_port_free(ide_hwif_t *hwif)
+{
+ ide_port_free_devices(hwif);
+ ide_free_port_slot(hwif->index);
+ kfree(hwif);
+}
+
+static void ide_disable_port(ide_hwif_t *hwif)
+{
+ struct ide_host *host = hwif->host;
+ int i;
+
+ printk(KERN_INFO "%s: disabling port\n", hwif->name);
+
+ for (i = 0; i < MAX_HOST_PORTS; i++) {
+ if (host->ports[i] == hwif) {
+ host->ports[i] = NULL;
+ host->n_ports--;
+ }
+ }
+
+ ide_port_free(hwif);
+}
+
+int ide_host_register(struct ide_host *host, const struct ide_port_info *d,
+ hw_regs_t **hws)
+{
+ ide_hwif_t *hwif, *mate = NULL;
+ int i, j = 0;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL) {
+ mate = NULL;
+ continue;
+ }
+
+ ide_init_port_hw(hwif, hws[i]);
+ ide_port_apply_params(hwif);
+
+ if (d == NULL) {
+ mate = NULL;
+ } else {
+ if ((i & 1) && mate) {
+ hwif->mate = mate;
+ mate->mate = hwif;
+ }
+
+ mate = (i & 1) ? NULL : hwif;
+
+ ide_init_port(hwif, i & 1, d);
+ ide_port_cable_detect(hwif);
+ }
+
+ ide_port_init_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (ide_probe_port(hwif) == 0)
+ hwif->present = 1;
+
+ if (hwif->chipset != ide_4drives || !hwif->mate ||
+ !hwif->mate->present) {
+ if (ide_register_port(hwif)) {
+ ide_disable_port(hwif);
+ continue;
+ }
+ }
+
+ if (hwif->present)
+ ide_port_tune_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (hwif_init(hwif) == 0) {
+ printk(KERN_INFO "%s: failed to initialize IDE "
+ "interface\n", hwif->name);
+ device_unregister(&hwif->gendev);
+ ide_disable_port(hwif);
+ continue;
+ }
+
+ if (hwif->present)
+ if (ide_port_setup_devices(hwif) == 0) {
+ hwif->present = 0;
+ continue;
+ }
+
+ j++;
+
+ ide_acpi_init(hwif);
+
+ if (hwif->present)
+ ide_acpi_port_init_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ if (hwif->present)
+ hwif_register_devices(hwif);
+ }
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif == NULL)
+ continue;
+
+ ide_sysfs_register_port(hwif);
+ ide_proc_register_port(hwif);
+
+ if (hwif->present)
+ ide_proc_port_register_devices(hwif);
+ }
+
+ return j ? 0 : -1;
+}
+EXPORT_SYMBOL_GPL(ide_host_register);
+
+int ide_host_add(const struct ide_port_info *d, hw_regs_t **hws,
+ struct ide_host **hostp)
+{
+ struct ide_host *host;
+ int rc;
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL)
+ return -ENOMEM;
+
+ rc = ide_host_register(host, d, hws);
+ if (rc) {
+ ide_host_free(host);
+ return rc;
+ }
+
+ if (hostp)
+ *hostp = host;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_host_add);
+
+static void __ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if (drive->dev_flags & IDE_DFLAG_PRESENT) {
+ device_unregister(&drive->gendev);
+ wait_for_completion(&drive->gendev_rel_comp);
+ }
+ }
+}
+
+void ide_port_unregister_devices(ide_hwif_t *hwif)
+{
+ mutex_lock(&ide_cfg_mtx);
+ __ide_port_unregister_devices(hwif);
+ hwif->present = 0;
+ ide_port_init_devices_data(hwif);
+ mutex_unlock(&ide_cfg_mtx);
+}
+EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
+
+/**
+ * ide_unregister - free an IDE interface
+ * @hwif: IDE interface
+ *
+ * Perform the final unregister of an IDE interface.
+ *
+ * Locking:
+ * The caller must not hold the IDE locks.
+ *
+ * It is up to the caller to be sure there is no pending I/O here,
+ * and that the interface will not be reopened (present/vanishing
+ * locking isn't yet done BTW).
+ */
+
+static void ide_unregister(ide_hwif_t *hwif)
+{
+ BUG_ON(in_interrupt());
+ BUG_ON(irqs_disabled());
+
+ mutex_lock(&ide_cfg_mtx);
+
+ if (hwif->present) {
+ __ide_port_unregister_devices(hwif);
+ hwif->present = 0;
+ }
+
+ ide_proc_unregister_port(hwif);
+
+ free_irq(hwif->irq, hwif);
+
+ device_unregister(hwif->portdev);
+ device_unregister(&hwif->gendev);
+ wait_for_completion(&hwif->gendev_rel_comp);
+
+ /*
+ * Remove us from the kernel's knowledge
+ */
+ blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
+ kfree(hwif->sg_table);
+ unregister_blkdev(hwif->major, hwif->name);
+
+ ide_release_dma_engine(hwif);
+
+ mutex_unlock(&ide_cfg_mtx);
+}
+
+void ide_host_free(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif)
+ ide_port_free(hwif);
+ }
+
+ kfree(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_free);
+
+void ide_host_remove(struct ide_host *host)
+{
+ ide_hwif_t *hwif;
+ int i;
+
+ ide_host_for_each_port(i, hwif, host) {
+ if (hwif)
+ ide_unregister(hwif);
+ }
+
+ ide_host_free(host);
+}
+EXPORT_SYMBOL_GPL(ide_host_remove);
+
+void ide_port_scan(ide_hwif_t *hwif)
+{
+ ide_port_apply_params(hwif);
+ ide_port_cable_detect(hwif);
+ ide_port_init_devices(hwif);
+
+ if (ide_probe_port(hwif) < 0)
+ return;
+
+ hwif->present = 1;
+
+ ide_port_tune_devices(hwif);
+ ide_port_setup_devices(hwif);
+ ide_acpi_port_init_devices(hwif);
+ hwif_register_devices(hwif);
+ ide_proc_port_register_devices(hwif);
+}
+EXPORT_SYMBOL_GPL(ide_port_scan);
diff --git a/windhoek/ide/ide-proc.c b/windhoek/ide/ide-proc.c
new file mode 100644
index 00000000..a7b9287e
--- /dev/null
+++ b/windhoek/ide/ide-proc.c
@@ -0,0 +1,701 @@
+/*
+ * Copyright (C) 1997-1998 Mark Lord
+ * Copyright (C) 2003 Red Hat
+ *
+ * Some code was moved here from ide.c, see it for original copyrights.
+ */
+
+/*
+ * This is the /proc/ide/ filesystem implementation.
+ *
+ * Drive/Driver settings can be retrieved by reading the drive's
+ * "settings" files. e.g. "cat /proc/ide0/hda/settings"
+ * To write a new value "val" into a specific setting "name", use:
+ * echo "name:val" >/proc/ide/ide0/hda/settings
+ */
+
+#include <linux/module.h>
+
+#include <asm/uaccess.h>
+#include <linux/errno.h>
+#include <linux/proc_fs.h>
+#include <linux/stat.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/ctype.h>
+#include <linux/ide.h>
+#include <linux/seq_file.h>
+
+#include <asm/io.h>
+
+static struct proc_dir_entry *proc_ide_root;
+
+static int proc_ide_read_imodel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+ const char *name;
+
+ switch (hwif->chipset) {
+ case ide_generic: name = "generic"; break;
+ case ide_pci: name = "pci"; break;
+ case ide_cmd640: name = "cmd640"; break;
+ case ide_dtc2278: name = "dtc2278"; break;
+ case ide_ali14xx: name = "ali14xx"; break;
+ case ide_qd65xx: name = "qd65xx"; break;
+ case ide_umc8672: name = "umc8672"; break;
+ case ide_ht6560b: name = "ht6560b"; break;
+ case ide_4drives: name = "4drives"; break;
+ case ide_pmac: name = "mac-io"; break;
+ case ide_au1xxx: name = "au1xxx"; break;
+ case ide_palm3710: name = "palm3710"; break;
+ case ide_acorn: name = "acorn"; break;
+ default: name = "(unknown)"; break;
+ }
+ len = sprintf(page, "%s\n", name);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_mate
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+
+ if (hwif && hwif->mate)
+ len = sprintf(page, "%s\n", hwif->mate->name);
+ else
+ len = sprintf(page, "(none)\n");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_channel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_hwif_t *hwif = (ide_hwif_t *) data;
+ int len;
+
+ page[0] = hwif->channel ? '1' : '0';
+ page[1] = '\n';
+ len = 2;
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_identify
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *)data;
+ int len = 0, i = 0;
+ int err = 0;
+
+ len = sprintf(page, "\n");
+
+ if (drive) {
+ __le16 *val = (__le16 *)page;
+
+ err = taskfile_lib_get_identify(drive, page);
+ if (!err) {
+ char *out = (char *)page + SECTOR_SIZE;
+
+ page = out;
+ do {
+ out += sprintf(out, "%04x%c",
+ le16_to_cpup(val), (++i & 7) ? ' ' : '\n');
+ val += 1;
+ } while (i < SECTOR_SIZE / 2);
+ len = out - page;
+ }
+ }
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+/**
+ * ide_find_setting - find a specific setting
+ * @st: setting table pointer
+ * @name: setting name
+ *
+ * Scan's the setting table for a matching entry and returns
+ * this or NULL if no entry is found. The caller must hold the
+ * setting semaphore
+ */
+
+static
+const struct ide_proc_devset *ide_find_setting(const struct ide_proc_devset *st,
+ char *name)
+{
+ while (st->name) {
+ if (strcmp(st->name, name) == 0)
+ break;
+ st++;
+ }
+ return st->name ? st : NULL;
+}
+
+/**
+ * ide_read_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ *
+ * Read a drive setting and return the value. The caller
+ * must hold the ide_setting_mtx when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ */
+
+static int ide_read_setting(ide_drive_t *drive,
+ const struct ide_proc_devset *setting)
+{
+ const struct ide_devset *ds = setting->setting;
+ int val = -EINVAL;
+
+ if (ds->get)
+ val = ds->get(drive);
+
+ return val;
+}
+
+/**
+ * ide_write_setting - read an IDE setting
+ * @drive: drive to read from
+ * @setting: drive setting
+ * @val: value
+ *
+ * Write a drive setting if it is possible. The caller
+ * must hold the ide_setting_mtx when making this call.
+ *
+ * BUGS: the data return and error are the same return value
+ * so an error -EINVAL and true return of the same value cannot
+ * be told apart
+ *
+ * FIXME: This should be changed to enqueue a special request
+ * to the driver to change settings, and then wait on a sema for completion.
+ * The current scheme of polling is kludgy, though safe enough.
+ */
+
+static int ide_write_setting(ide_drive_t *drive,
+ const struct ide_proc_devset *setting, int val)
+{
+ const struct ide_devset *ds = setting->setting;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (!ds->set)
+ return -EPERM;
+ if ((ds->flags & DS_SYNC)
+ && (val < setting->min || val > setting->max))
+ return -EINVAL;
+ return ide_devset_execute(drive, ds, val);
+}
+
+ide_devset_get(xfer_rate, current_speed);
+
+static int set_xfer_rate (ide_drive_t *drive, int arg)
+{
+ ide_task_t task;
+ int err;
+
+ if (arg < XFER_PIO_0 || arg > XFER_UDMA_6)
+ return -EINVAL;
+
+ memset(&task, 0, sizeof(task));
+ task.tf.command = ATA_CMD_SET_FEATURES;
+ task.tf.feature = SETFEATURES_XFER;
+ task.tf.nsect = (u8)arg;
+ task.tf_flags = IDE_TFLAG_OUT_FEATURE | IDE_TFLAG_OUT_NSECT |
+ IDE_TFLAG_IN_NSECT;
+
+ err = ide_no_data_taskfile(drive, &task);
+
+ if (!err) {
+ ide_set_xfer_rate(drive, (u8) arg);
+ ide_driveid_update(drive);
+ }
+ return err;
+}
+
+ide_devset_rw(current_speed, xfer_rate);
+ide_devset_rw_field(init_speed, init_speed);
+ide_devset_rw_flag(nice1, IDE_DFLAG_NICE1);
+ide_devset_rw_field(number, dn);
+
+static const struct ide_proc_devset ide_generic_settings[] = {
+ IDE_PROC_DEVSET(current_speed, 0, 70),
+ IDE_PROC_DEVSET(init_speed, 0, 70),
+ IDE_PROC_DEVSET(io_32bit, 0, 1 + (SUPPORT_VLB_SYNC << 1)),
+ IDE_PROC_DEVSET(keepsettings, 0, 1),
+ IDE_PROC_DEVSET(nice1, 0, 1),
+ IDE_PROC_DEVSET(number, 0, 3),
+ IDE_PROC_DEVSET(pio_mode, 0, 255),
+ IDE_PROC_DEVSET(unmaskirq, 0, 1),
+ IDE_PROC_DEVSET(using_dma, 0, 1),
+ { NULL },
+};
+
+static void proc_ide_settings_warn(void)
+{
+ static int warned;
+
+ if (warned)
+ return;
+
+ printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is "
+ "obsolete, and will be removed soon!\n");
+ warned = 1;
+}
+
+static int proc_ide_read_settings
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ const struct ide_proc_devset *setting, *g, *d;
+ const struct ide_devset *ds;
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *out = page;
+ int len, rc, mul_factor, div_factor;
+
+ proc_ide_settings_warn();
+
+ mutex_lock(&ide_setting_mtx);
+ g = ide_generic_settings;
+ d = drive->settings;
+ out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
+ out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
+ while (g->name || (d && d->name)) {
+ /* read settings in the alphabetical order */
+ if (g->name && d && d->name) {
+ if (strcmp(d->name, g->name) < 0)
+ setting = d++;
+ else
+ setting = g++;
+ } else if (d && d->name) {
+ setting = d++;
+ } else
+ setting = g++;
+ mul_factor = setting->mulf ? setting->mulf(drive) : 1;
+ div_factor = setting->divf ? setting->divf(drive) : 1;
+ out += sprintf(out, "%-24s", setting->name);
+ rc = ide_read_setting(drive, setting);
+ if (rc >= 0)
+ out += sprintf(out, "%-16d", rc * mul_factor / div_factor);
+ else
+ out += sprintf(out, "%-16s", "write-only");
+ out += sprintf(out, "%-16d%-16d", (setting->min * mul_factor + div_factor - 1) / div_factor, setting->max * mul_factor / div_factor);
+ ds = setting->setting;
+ if (ds->get)
+ out += sprintf(out, "r");
+ if (ds->set)
+ out += sprintf(out, "w");
+ out += sprintf(out, "\n");
+ }
+ len = out - page;
+ mutex_unlock(&ide_setting_mtx);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+#define MAX_LEN 30
+
+static int proc_ide_write_settings(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char name[MAX_LEN + 1];
+ int for_real = 0, mul_factor, div_factor;
+ unsigned long n;
+
+ const struct ide_proc_devset *setting;
+ char *buf, *s;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ proc_ide_settings_warn();
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ s = buf = (char *)__get_free_page(GFP_USER);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, buffer, count)) {
+ free_page((unsigned long)buf);
+ return -EFAULT;
+ }
+
+ buf[count] = '\0';
+
+ /*
+ * Skip over leading whitespace
+ */
+ while (count && isspace(*s)) {
+ --count;
+ ++s;
+ }
+ /*
+ * Do one full pass to verify all parameters,
+ * then do another to actually write the new settings.
+ */
+ do {
+ char *p = s;
+ n = count;
+ while (n > 0) {
+ unsigned val;
+ char *q = p;
+
+ while (n > 0 && *p != ':') {
+ --n;
+ p++;
+ }
+ if (*p != ':')
+ goto parse_error;
+ if (p - q > MAX_LEN)
+ goto parse_error;
+ memcpy(name, q, p - q);
+ name[p - q] = 0;
+
+ if (n > 0) {
+ --n;
+ p++;
+ } else
+ goto parse_error;
+
+ val = simple_strtoul(p, &q, 10);
+ n -= q - p;
+ p = q;
+ if (n > 0 && !isspace(*p))
+ goto parse_error;
+ while (n > 0 && isspace(*p)) {
+ --n;
+ ++p;
+ }
+
+ mutex_lock(&ide_setting_mtx);
+ /* generic settings first, then driver specific ones */
+ setting = ide_find_setting(ide_generic_settings, name);
+ if (!setting) {
+ if (drive->settings)
+ setting = ide_find_setting(drive->settings, name);
+ if (!setting) {
+ mutex_unlock(&ide_setting_mtx);
+ goto parse_error;
+ }
+ }
+ if (for_real) {
+ mul_factor = setting->mulf ? setting->mulf(drive) : 1;
+ div_factor = setting->divf ? setting->divf(drive) : 1;
+ ide_write_setting(drive, setting, val * div_factor / mul_factor);
+ }
+ mutex_unlock(&ide_setting_mtx);
+ }
+ } while (!for_real++);
+ free_page((unsigned long)buf);
+ return count;
+parse_error:
+ free_page((unsigned long)buf);
+ printk("proc_ide_write_settings(): parse error\n");
+ return -EINVAL;
+}
+
+int proc_ide_read_capacity
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ int len = sprintf(page, "%llu\n", (long long)0x7fffffff);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+EXPORT_SYMBOL_GPL(proc_ide_read_capacity);
+
+int proc_ide_read_geometry
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *out = page;
+ int len;
+
+ out += sprintf(out, "physical %d/%d/%d\n",
+ drive->cyl, drive->head, drive->sect);
+ out += sprintf(out, "logical %d/%d/%d\n",
+ drive->bios_cyl, drive->bios_head, drive->bios_sect);
+
+ len = out - page;
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+EXPORT_SYMBOL(proc_ide_read_geometry);
+
+static int proc_ide_read_dmodel
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char *m = (char *)&drive->id[ATA_ID_PROD];
+ int len;
+
+ len = sprintf(page, "%.40s\n", m[0] ? m : "(none)");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int proc_ide_read_driver
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *)data;
+ struct device *dev = &drive->gendev;
+ struct ide_driver *ide_drv;
+ int len;
+
+ if (dev->driver) {
+ ide_drv = to_ide_driver(dev->driver);
+ len = sprintf(page, "%s version %s\n",
+ dev->driver->name, ide_drv->version);
+ } else
+ len = sprintf(page, "ide-default version 0.9.newide\n");
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
+{
+ struct device *dev = &drive->gendev;
+ int ret = 1;
+ int err;
+
+ device_release_driver(dev);
+ /* FIXME: device can still be in use by previous driver */
+ strlcpy(drive->driver_req, driver, sizeof(drive->driver_req));
+ err = device_attach(dev);
+ if (err < 0)
+ printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
+ __func__, err);
+ drive->driver_req[0] = 0;
+ if (dev->driver == NULL) {
+ err = device_attach(dev);
+ if (err < 0)
+ printk(KERN_WARNING
+ "IDE: %s: device_attach(2) error: %d\n",
+ __func__, err);
+ }
+ if (dev->driver && !strcmp(dev->driver->name, driver))
+ ret = 0;
+
+ return ret;
+}
+
+static int proc_ide_write_driver
+ (struct file *file, const char __user *buffer, unsigned long count, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ char name[32];
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (count > 31)
+ count = 31;
+ if (copy_from_user(name, buffer, count))
+ return -EFAULT;
+ name[count] = '\0';
+ if (ide_replace_subdriver(drive, name))
+ return -EINVAL;
+ return count;
+}
+
+static int proc_ide_read_media
+ (char *page, char **start, off_t off, int count, int *eof, void *data)
+{
+ ide_drive_t *drive = (ide_drive_t *) data;
+ const char *media;
+ int len;
+
+ switch (drive->media) {
+ case ide_disk: media = "disk\n"; break;
+ case ide_cdrom: media = "cdrom\n"; break;
+ case ide_tape: media = "tape\n"; break;
+ case ide_floppy: media = "floppy\n"; break;
+ case ide_optical: media = "optical\n"; break;
+ default: media = "UNKNOWN\n"; break;
+ }
+ strcpy(page, media);
+ len = strlen(media);
+ PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static ide_proc_entry_t generic_drive_entries[] = {
+ { "driver", S_IFREG|S_IRUGO, proc_ide_read_driver,
+ proc_ide_write_driver },
+ { "identify", S_IFREG|S_IRUSR, proc_ide_read_identify, NULL },
+ { "media", S_IFREG|S_IRUGO, proc_ide_read_media, NULL },
+ { "model", S_IFREG|S_IRUGO, proc_ide_read_dmodel, NULL },
+ { "settings", S_IFREG|S_IRUSR|S_IWUSR, proc_ide_read_settings,
+ proc_ide_write_settings },
+ { NULL, 0, NULL, NULL }
+};
+
+static void ide_add_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p, void *data)
+{
+ struct proc_dir_entry *ent;
+
+ if (!dir || !p)
+ return;
+ while (p->name != NULL) {
+ ent = create_proc_entry(p->name, p->mode, dir);
+ if (!ent) return;
+ ent->data = data;
+ ent->read_proc = p->read_proc;
+ ent->write_proc = p->write_proc;
+ p++;
+ }
+}
+
+static void ide_remove_proc_entries(struct proc_dir_entry *dir, ide_proc_entry_t *p)
+{
+ if (!dir || !p)
+ return;
+ while (p->name != NULL) {
+ remove_proc_entry(p->name, dir);
+ p++;
+ }
+}
+
+void ide_proc_register_driver(ide_drive_t *drive, struct ide_driver *driver)
+{
+ mutex_lock(&ide_setting_mtx);
+ drive->settings = driver->proc_devsets(drive);
+ mutex_unlock(&ide_setting_mtx);
+
+ ide_add_proc_entries(drive->proc, driver->proc_entries(drive), drive);
+}
+
+EXPORT_SYMBOL(ide_proc_register_driver);
+
+/**
+ * ide_proc_unregister_driver - remove driver specific data
+ * @drive: drive
+ * @driver: driver
+ *
+ * Clean up the driver specific /proc files and IDE settings
+ * for a given drive.
+ *
+ * Takes ide_setting_mtx.
+ */
+
+void ide_proc_unregister_driver(ide_drive_t *drive, struct ide_driver *driver)
+{
+ ide_remove_proc_entries(drive->proc, driver->proc_entries(drive));
+
+ mutex_lock(&ide_setting_mtx);
+ /*
+ * ide_setting_mtx protects both the settings list and the use
+ * of settings (we cannot take a setting out that is being used).
+ */
+ drive->settings = NULL;
+ mutex_unlock(&ide_setting_mtx);
+}
+EXPORT_SYMBOL(ide_proc_unregister_driver);
+
+void ide_proc_port_register_devices(ide_hwif_t *hwif)
+{
+ struct proc_dir_entry *ent;
+ struct proc_dir_entry *parent = hwif->proc;
+ ide_drive_t *drive;
+ char name[64];
+ int i;
+
+ ide_port_for_each_dev(i, drive, hwif) {
+ if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0 || drive->proc)
+ continue;
+
+ drive->proc = proc_mkdir(drive->name, parent);
+ if (drive->proc)
+ ide_add_proc_entries(drive->proc, generic_drive_entries, drive);
+ sprintf(name, "ide%d/%s", (drive->name[2]-'a')/2, drive->name);
+ ent = proc_symlink(drive->name, proc_ide_root, name);
+ if (!ent) return;
+ }
+}
+
+void ide_proc_unregister_device(ide_drive_t *drive)
+{
+ if (drive->proc) {
+ ide_remove_proc_entries(drive->proc, generic_drive_entries);
+ remove_proc_entry(drive->name, proc_ide_root);
+ remove_proc_entry(drive->name, drive->hwif->proc);
+ drive->proc = NULL;
+ }
+}
+
+static ide_proc_entry_t hwif_entries[] = {
+ { "channel", S_IFREG|S_IRUGO, proc_ide_read_channel, NULL },
+ { "mate", S_IFREG|S_IRUGO, proc_ide_read_mate, NULL },
+ { "model", S_IFREG|S_IRUGO, proc_ide_read_imodel, NULL },
+ { NULL, 0, NULL, NULL }
+};
+
+void ide_proc_register_port(ide_hwif_t *hwif)
+{
+ if (!hwif->proc) {
+ hwif->proc = proc_mkdir(hwif->name, proc_ide_root);
+
+ if (!hwif->proc)
+ return;
+
+ ide_add_proc_entries(hwif->proc, hwif_entries, hwif);
+ }
+}
+
+void ide_proc_unregister_port(ide_hwif_t *hwif)
+{
+ if (hwif->proc) {
+ ide_remove_proc_entries(hwif->proc, hwif_entries);
+ remove_proc_entry(hwif->name, proc_ide_root);
+ hwif->proc = NULL;
+ }
+}
+
+static int proc_print_driver(struct device_driver *drv, void *data)
+{
+ struct ide_driver *ide_drv = to_ide_driver(drv);
+ struct seq_file *s = data;
+
+ seq_printf(s, "%s version %s\n", drv->name, ide_drv->version);
+
+ return 0;
+}
+
+static int ide_drivers_show(struct seq_file *s, void *p)
+{
+ int err;
+
+ err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
+ if (err < 0)
+ printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
+ __func__, err);
+ return 0;
+}
+
+static int ide_drivers_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, &ide_drivers_show, NULL);
+}
+
+static const struct file_operations ide_drivers_operations = {
+ .owner = THIS_MODULE,
+ .open = ide_drivers_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void proc_ide_create(void)
+{
+ proc_ide_root = proc_mkdir("ide", NULL);
+
+ if (!proc_ide_root)
+ return;
+
+ proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
+}
+
+void proc_ide_destroy(void)
+{
+ remove_proc_entry("drivers", proc_ide_root);
+ remove_proc_entry("ide", NULL);
+}
diff --git a/windhoek/ide/ide-sysfs.c b/windhoek/ide/ide-sysfs.c
new file mode 100644
index 00000000..883ffaca
--- /dev/null
+++ b/windhoek/ide/ide-sysfs.c
@@ -0,0 +1,125 @@
+#include <linux/kernel.h>
+#include <linux/ide.h>
+
+char *ide_media_string(ide_drive_t *drive)
+{
+ switch (drive->media) {
+ case ide_disk:
+ return "disk";
+ case ide_cdrom:
+ return "cdrom";
+ case ide_tape:
+ return "tape";
+ case ide_floppy:
+ return "floppy";
+ case ide_optical:
+ return "optical";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static ssize_t media_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", ide_media_string(drive));
+}
+
+static ssize_t drivename_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", drive->name);
+}
+
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "ide:m-%s\n", ide_media_string(drive));
+}
+
+static ssize_t model_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_PROD]);
+}
+
+static ssize_t firmware_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_FW_REV]);
+}
+
+static ssize_t serial_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ return sprintf(buf, "%s\n", (char *)&drive->id[ATA_ID_SERNO]);
+}
+
+struct device_attribute ide_dev_attrs[] = {
+ __ATTR_RO(media),
+ __ATTR_RO(drivename),
+ __ATTR_RO(modalias),
+ __ATTR_RO(model),
+ __ATTR_RO(firmware),
+ __ATTR(serial, 0400, serial_show, NULL),
+ __ATTR(unload_heads, 0644, ide_park_show, ide_park_store),
+ __ATTR_NULL
+};
+
+static ssize_t store_delete_devices(struct device *portdev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+ if (strncmp(buf, "1", n))
+ return -EINVAL;
+
+ ide_port_unregister_devices(hwif);
+
+ return n;
+};
+
+static DEVICE_ATTR(delete_devices, S_IWUSR, NULL, store_delete_devices);
+
+static ssize_t store_scan(struct device *portdev,
+ struct device_attribute *attr,
+ const char *buf, size_t n)
+{
+ ide_hwif_t *hwif = dev_get_drvdata(portdev);
+
+ if (strncmp(buf, "1", n))
+ return -EINVAL;
+
+ ide_port_unregister_devices(hwif);
+ ide_port_scan(hwif);
+
+ return n;
+};
+
+static DEVICE_ATTR(scan, S_IWUSR, NULL, store_scan);
+
+static struct device_attribute *ide_port_attrs[] = {
+ &dev_attr_delete_devices,
+ &dev_attr_scan,
+ NULL
+};
+
+int ide_sysfs_register_port(ide_hwif_t *hwif)
+{
+ int i, uninitialized_var(rc);
+
+ for (i = 0; ide_port_attrs[i]; i++) {
+ rc = device_create_file(hwif->portdev, ide_port_attrs[i]);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/windhoek/ide/ide-taskfile.c b/windhoek/ide/ide-taskfile.c
new file mode 100644
index 00000000..a499923a
--- /dev/null
+++ b/windhoek/ide/ide-taskfile.c
@@ -0,0 +1,695 @@
+/*
+ * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org>
+ * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2001-2002 Klaus Smolin
+ * IBM Storage Technology Division
+ * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz
+ *
+ * The big the bad and the ugly.
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/scatterlist.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include "local.h"
+
+void ide_tf_dump(const char *s, struct ide_taskfile *tf)
+{
+#ifdef DEBUG
+ printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
+ "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
+ s, tf->feature, tf->nsect, tf->lbal,
+ tf->lbam, tf->lbah, tf->device, tf->command);
+ printk("%s: hob: nsect 0x%02x lbal 0x%02x "
+ "lbam 0x%02x lbah 0x%02x\n",
+ s, tf->hob_nsect, tf->hob_lbal,
+ tf->hob_lbam, tf->hob_lbah);
+#endif
+}
+
+int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
+{
+ ide_task_t args;
+
+ memset(&args, 0, sizeof(ide_task_t));
+ args.tf.nsect = 0x01;
+ if (drive->media == ide_disk)
+ args.tf.command = ATA_CMD_ID_ATA;
+ else
+ args.tf.command = ATA_CMD_ID_ATAPI;
+ args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
+ args.data_phase = TASKFILE_IN;
+ return ide_raw_taskfile(drive, &args, buf, 1);
+}
+
+static ide_startstop_t task_no_data_intr(ide_drive_t *);
+static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
+static ide_startstop_t task_in_intr(ide_drive_t *);
+
+ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_taskfile *tf = &task->tf;
+ ide_handler_t *handler = NULL;
+ const struct ide_tp_ops *tp_ops = hwif->tp_ops;
+ const struct ide_dma_ops *dma_ops = hwif->dma_ops;
+
+ if (task->data_phase == TASKFILE_MULTI_IN ||
+ task->data_phase == TASKFILE_MULTI_OUT) {
+ if (!drive->mult_count) {
+ printk(KERN_ERR "%s: multimode not set!\n",
+ drive->name);
+ return ide_stopped;
+ }
+ }
+
+ if (task->tf_flags & IDE_TFLAG_FLAGGED)
+ task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
+
+ memcpy(&hwif->task, task, sizeof(*task));
+
+ if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
+ ide_tf_dump(drive->name, tf);
+ tp_ops->set_irq(hwif, 1);
+ SELECT_MASK(drive, 0);
+ tp_ops->tf_load(drive, task);
+ }
+
+ switch (task->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ case TASKFILE_OUT:
+ tp_ops->exec_command(hwif, tf->command);
+ ndelay(400); /* FIXME */
+ return pre_task_out_intr(drive, task->rq);
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_IN:
+ handler = task_in_intr;
+ /* fall-through */
+ case TASKFILE_NO_DATA:
+ if (handler == NULL)
+ handler = task_no_data_intr;
+ ide_execute_command(drive, tf->command, handler,
+ WAIT_WORSTCASE, NULL);
+ return ide_started;
+ default:
+ if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
+ dma_ops->dma_setup(drive))
+ return ide_stopped;
+ dma_ops->dma_exec_cmd(drive, tf->command);
+ dma_ops->dma_start(drive);
+ return ide_started;
+ }
+}
+EXPORT_SYMBOL_GPL(do_rw_taskfile);
+
+/*
+ * Handler for commands without a data phase
+ */
+static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ ide_task_t *task = &hwif->task;
+ struct ide_taskfile *tf = &task->tf;
+ int custom = (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
+ int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
+ u8 stat;
+
+ local_irq_enable_in_hardirq();
+
+ while (1) {
+ stat = hwif->tp_ops->read_status(hwif);
+ if ((stat & ATA_BUSY) == 0 || retries-- == 0)
+ break;
+ udelay(10);
+ };
+
+ if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
+ if (custom && tf->command == ATA_CMD_SET_MULTI) {
+ drive->mult_req = drive->mult_count = 0;
+ drive->special.b.recalibrate = 1;
+ (void)ide_dump_status(drive, __func__, stat);
+ return ide_stopped;
+ } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
+ if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
+ ide_set_handler(drive, &task_no_data_intr,
+ WAIT_WORSTCASE, NULL);
+ return ide_started;
+ }
+ }
+ return ide_error(drive, "task_no_data_intr", stat);
+ /* calls ide_end_drive_cmd */
+ }
+
+ if (!custom)
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
+ else if (tf->command == ATA_CMD_IDLEIMMEDIATE) {
+ hwif->tp_ops->tf_read(drive, task);
+ if (tf->lbal != 0xc4) {
+ printk(KERN_ERR "%s: head unload failed!\n",
+ drive->name);
+ ide_tf_dump(drive->name, tf);
+ } else
+ drive->dev_flags |= IDE_DFLAG_PARKED;
+ ide_end_drive_cmd(drive, stat, ide_read_error(drive));
+ } else if (tf->command == ATA_CMD_SET_MULTI)
+ drive->mult_count = drive->mult_req;
+
+ return ide_stopped;
+}
+
+static u8 wait_drive_not_busy(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ int retries;
+ u8 stat;
+
+ /*
+ * Last sector was transfered, wait until device is ready. This can
+ * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
+ */
+ for (retries = 0; retries < 1000; retries++) {
+ stat = hwif->tp_ops->read_status(hwif);
+
+ if (stat & ATA_BUSY)
+ udelay(10);
+ else
+ break;
+ }
+
+ if (stat & ATA_BUSY)
+ printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
+
+ return stat;
+}
+
+static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ DEBUG_MSG("========= ide_pio start =========");
+ ide_hwif_t *hwif = drive->hwif;
+ struct scatterlist *sg = hwif->sg_table;
+ struct scatterlist *cursg = hwif->cursg;
+ struct page *page;
+#ifdef CONFIG_HIGHMEM
+ unsigned long flags;
+#endif
+ unsigned int offset;
+ u8 *buf;
+
+ cursg = hwif->cursg;
+ DEBUG_MSG("sg = %p, cursg %p", sg, cursg);
+ if (!cursg) {
+ cursg = sg;
+ hwif->cursg = sg;
+ }
+
+ page = sg_page(cursg);
+ offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
+ DEBUG_MSG("page %p, offset %lx", page, offset);
+
+ /* get the current page and offset */
+ page = nth_page(page, (offset >> PAGE_SHIFT));
+ offset %= PAGE_SIZE;
+ DEBUG_MSG("== transform ==> page %p, offset %lx", page, offset);
+ DEBUG_MSG(" virt %p", page_address(page));
+
+
+#ifdef CONFIG_HIGHMEM
+ local_irq_save(flags);
+#endif
+ buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
+ DEBUG_MSG("kmap_atomic(%p) = %p", page, buf);
+
+ hwif->nleft--;
+ hwif->cursg_ofs++;
+
+ if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
+ hwif->cursg = sg_next(hwif->cursg);
+ hwif->cursg_ofs = 0;
+ }
+
+ /* do the actual data transfer */
+ if (write)
+ hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
+ else
+ hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
+
+ kunmap_atomic(buf, KM_BIO_SRC_IRQ);
+#ifdef CONFIG_HIGHMEM
+ local_irq_restore(flags);
+#endif
+}
+
+static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ unsigned int nsect;
+
+ nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
+ DEBUG_MSG("io on %d sectors (%d, %d)", nsect, drive->hwif->nleft, drive->mult_count);
+ while (nsect--)
+ ide_pio_sector(drive, rq, write);
+}
+
+static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
+ unsigned int write)
+{
+ u8 saved_io_32bit = drive->io_32bit;
+
+ if (rq->bio) /* fs request */
+ rq->errors = 0;
+
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ ide_task_t *task = rq->special;
+
+ if (task->tf_flags & IDE_TFLAG_IO_16BIT)
+ drive->io_32bit = 0;
+ }
+
+ touch_softlockup_watchdog();
+
+ DEBUG_MSG("data_phase : %s", drive->hwif->data_phase == TASKFILE_MULTI_IN ? "TASKFILE_MULTI_IN" :
+ drive->hwif->data_phase == TASKFILE_MULTI_OUT ? "TASKFILE_MULTI_OUT" :
+ "unknown");
+ switch (drive->hwif->data_phase) {
+ case TASKFILE_MULTI_IN:
+ case TASKFILE_MULTI_OUT:
+ ide_pio_multi(drive, rq, write);
+ break;
+ default:
+ ide_pio_sector(drive, rq, write);
+ break;
+ }
+
+ drive->io_32bit = saved_io_32bit;
+}
+
+static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
+ const char *s, u8 stat)
+{
+ if (rq->bio) {
+ ide_hwif_t *hwif = drive->hwif;
+ int sectors = hwif->nsect - hwif->nleft;
+
+ switch (hwif->data_phase) {
+ case TASKFILE_IN:
+ if (hwif->nleft)
+ break;
+ /* fall through */
+ case TASKFILE_OUT:
+ sectors--;
+ break;
+ case TASKFILE_MULTI_IN:
+ if (hwif->nleft)
+ break;
+ /* fall through */
+ case TASKFILE_MULTI_OUT:
+ sectors -= drive->mult_count;
+ default:
+ break;
+ }
+
+ if (sectors > 0) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;
+ drv->end_request(drive, 1, sectors);
+ }
+ }
+ return ide_error(drive, s, stat);
+}
+
+void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
+ u8 err = ide_read_error(drive);
+
+ ide_end_drive_cmd(drive, stat, err);
+ return;
+ }
+
+ if (rq->rq_disk) {
+ struct ide_driver *drv;
+
+ drv = *(struct ide_driver **)rq->rq_disk->private_data;;
+ drv->end_request(drive, 1, rq->nr_sectors);
+ } else
+ ide_end_request(drive, 1, rq->nr_sectors);
+}
+
+/*
+ * We got an interrupt on a task_in case, but no errors and no DRQ.
+ *
+ * It might be a spurious irq (shared irq), but it might be a
+ * command that had no output.
+ */
+static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ /* Command all done? */
+ if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) {
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Assume it was a spurious irq */
+ ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
+ return ide_started;
+}
+
+/*
+ * Handler for command with PIO data-in phase (Read/Read Multiple).
+ */
+static ide_startstop_t task_in_intr(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ /* Error? */
+ if (stat & ATA_ERR)
+ return task_error(drive, rq, __func__, stat);
+
+ /* Didn't want any data? Odd. */
+ if ((stat & ATA_DRQ) == 0)
+ return task_in_unexpected(drive, rq, stat);
+
+ ide_pio_datablock(drive, rq, 0);
+
+ /* Are we done? Check status and finish transfer. */
+ if (!hwif->nleft) {
+ stat = wait_drive_not_busy(drive);
+ if (!OK_STAT(stat, 0, BAD_STAT))
+ return task_error(drive, rq, __func__, stat);
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Still data left to transfer. */
+ ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
+
+ return ide_started;
+}
+
+/*
+ * Handler for command with PIO data-out phase (Write/Write Multiple).
+ */
+static ide_startstop_t task_out_intr (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = hwif->rq;
+ u8 stat = hwif->tp_ops->read_status(hwif);
+
+ if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
+ return task_error(drive, rq, __func__, stat);
+
+ /* Deal with unexpected ATA data phase. */
+ if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
+ return task_error(drive, rq, __func__, stat);
+
+ if (!hwif->nleft) {
+ task_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Still data left to transfer. */
+ ide_pio_datablock(drive, rq, 1);
+ ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
+
+ return ide_started;
+}
+
+static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
+{
+ ide_startstop_t startstop;
+
+ if (ide_wait_stat(&startstop, drive, ATA_DRQ,
+ drive->bad_wstat, WAIT_DRQ)) {
+ printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
+ drive->name, drive->hwif->data_phase ? "MULT" : "",
+ (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
+ return startstop;
+ }
+
+ if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
+ local_irq_disable();
+
+ ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
+ ide_pio_datablock(drive, rq, 1);
+
+ return ide_started;
+}
+
+int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
+{
+ struct request *rq;
+ int error;
+
+ rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
+ rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
+ rq->buffer = buf;
+
+ /*
+ * (ks) We transfer currently only whole sectors.
+ * This is suffient for now. But, it would be great,
+ * if we would find a solution to transfer any size.
+ * To support special commands like READ LONG.
+ */
+ rq->hard_nr_sectors = rq->nr_sectors = nsect;
+ rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
+
+ if (task->tf_flags & IDE_TFLAG_WRITE)
+ rq->cmd_flags |= REQ_RW;
+
+ rq->special = task;
+ task->rq = rq;
+
+ error = blk_execute_rq(drive->queue, NULL, rq, 0);
+ blk_put_request(rq);
+
+ return error;
+}
+
+EXPORT_SYMBOL(ide_raw_taskfile);
+
+int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
+{
+ task->data_phase = TASKFILE_NO_DATA;
+
+ return ide_raw_taskfile(drive, task, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
+
+#ifdef CONFIG_IDE_TASK_IOCTL
+int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
+{
+ ide_task_request_t *req_task;
+ ide_task_t args;
+ u8 *outbuf = NULL;
+ u8 *inbuf = NULL;
+ u8 *data_buf = NULL;
+ int err = 0;
+ int tasksize = sizeof(struct ide_task_request_s);
+ unsigned int taskin = 0;
+ unsigned int taskout = 0;
+ u16 nsect = 0;
+ char __user *buf = (char __user *)arg;
+
+// printk("IDE Taskfile ...\n");
+
+ req_task = kzalloc(tasksize, GFP_KERNEL);
+ if (req_task == NULL) return -ENOMEM;
+ if (copy_from_user(req_task, buf, tasksize)) {
+ kfree(req_task);
+ return -EFAULT;
+ }
+
+ taskout = req_task->out_size;
+ taskin = req_task->in_size;
+
+ if (taskin > 65536 || taskout > 65536) {
+ err = -EINVAL;
+ goto abort;
+ }
+
+ if (taskout) {
+ int outtotal = tasksize;
+ outbuf = kzalloc(taskout, GFP_KERNEL);
+ if (outbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(outbuf, buf + outtotal, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ if (taskin) {
+ int intotal = tasksize + taskout;
+ inbuf = kzalloc(taskin, GFP_KERNEL);
+ if (inbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(inbuf, buf + intotal, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ memset(&args, 0, sizeof(ide_task_t));
+
+ memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
+ memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
+
+ args.data_phase = req_task->data_phase;
+
+ args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
+ IDE_TFLAG_IN_TF;
+ if (drive->dev_flags & IDE_DFLAG_LBA48)
+ args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
+
+ if (req_task->out_flags.all) {
+ args.tf_flags |= IDE_TFLAG_FLAGGED;
+
+ if (req_task->out_flags.b.data)
+ args.tf_flags |= IDE_TFLAG_OUT_DATA;
+
+ if (req_task->out_flags.b.nsector_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
+ if (req_task->out_flags.b.sector_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
+ if (req_task->out_flags.b.lcyl_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
+ if (req_task->out_flags.b.hcyl_hob)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
+
+ if (req_task->out_flags.b.error_feature)
+ args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
+ if (req_task->out_flags.b.nsector)
+ args.tf_flags |= IDE_TFLAG_OUT_NSECT;
+ if (req_task->out_flags.b.sector)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAL;
+ if (req_task->out_flags.b.lcyl)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAM;
+ if (req_task->out_flags.b.hcyl)
+ args.tf_flags |= IDE_TFLAG_OUT_LBAH;
+ } else {
+ args.tf_flags |= IDE_TFLAG_OUT_TF;
+ if (args.tf_flags & IDE_TFLAG_LBA48)
+ args.tf_flags |= IDE_TFLAG_OUT_HOB;
+ }
+
+ if (req_task->in_flags.b.data)
+ args.tf_flags |= IDE_TFLAG_IN_DATA;
+
+ switch(req_task->data_phase) {
+ case TASKFILE_MULTI_OUT:
+ if (!drive->mult_count) {
+ /* (hs): give up if multcount is not set */
+ printk(KERN_ERR "%s: %s Multimode Write " \
+ "multcount is not set\n",
+ drive->name, __func__);
+ err = -EPERM;
+ goto abort;
+ }
+ /* fall through */
+ case TASKFILE_OUT:
+ /* fall through */
+ case TASKFILE_OUT_DMAQ:
+ case TASKFILE_OUT_DMA:
+ nsect = taskout / SECTOR_SIZE;
+ data_buf = outbuf;
+ break;
+ case TASKFILE_MULTI_IN:
+ if (!drive->mult_count) {
+ /* (hs): give up if multcount is not set */
+ printk(KERN_ERR "%s: %s Multimode Read failure " \
+ "multcount is not set\n",
+ drive->name, __func__);
+ err = -EPERM;
+ goto abort;
+ }
+ /* fall through */
+ case TASKFILE_IN:
+ /* fall through */
+ case TASKFILE_IN_DMAQ:
+ case TASKFILE_IN_DMA:
+ nsect = taskin / SECTOR_SIZE;
+ data_buf = inbuf;
+ break;
+ case TASKFILE_NO_DATA:
+ break;
+ default:
+ err = -EFAULT;
+ goto abort;
+ }
+
+ if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
+ nsect = 0;
+ else if (!nsect) {
+ nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
+
+ if (!nsect) {
+ printk(KERN_ERR "%s: in/out command without data\n",
+ drive->name);
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+
+ if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
+ args.tf_flags |= IDE_TFLAG_WRITE;
+
+ err = ide_raw_taskfile(drive, &args, data_buf, nsect);
+
+ memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
+ memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
+
+ if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
+ req_task->in_flags.all == 0) {
+ req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+ if (drive->dev_flags & IDE_DFLAG_LBA48)
+ req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
+ }
+
+ if (copy_to_user(buf, req_task, tasksize)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ if (taskout) {
+ int outtotal = tasksize;
+ if (copy_to_user(buf + outtotal, outbuf, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+ if (taskin) {
+ int intotal = tasksize + taskout;
+ if (copy_to_user(buf + intotal, inbuf, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+abort:
+ kfree(req_task);
+ kfree(outbuf);
+ kfree(inbuf);
+
+// printk("IDE Taskfile ioctl ended. rc = %i\n", err);
+
+ return err;
+}
+#endif
diff --git a/windhoek/ide/ide.c b/windhoek/ide/ide.c
new file mode 100644
index 00000000..15eb62ed
--- /dev/null
+++ b/windhoek/ide/ide.c
@@ -0,0 +1,554 @@
+/*
+ * Copyright (C) 1994-1998 Linus Torvalds & authors (see below)
+ * Copyright (C) 2003-2005, 2007 Bartlomiej Zolnierkiewicz
+ */
+
+/*
+ * Mostly written by Mark Lord <mlord@pobox.com>
+ * and Gadi Oxman <gadio@netvision.net.il>
+ * and Andre Hedrick <andre@linux-ide.org>
+ *
+ * See linux/MAINTAINERS for address of current maintainer.
+ *
+ * This is the multiple IDE interface driver, as evolved from hd.c.
+ * It supports up to MAX_HWIFS IDE interfaces, on one or more IRQs
+ * (usually 14 & 15).
+ * There can be up to two drives per interface, as per the ATA-2 spec.
+ *
+ * ...
+ *
+ * From hd.c:
+ * |
+ * | It traverses the request-list, using interrupts to jump between functions.
+ * | As nearly all functions can be called within interrupts, we may not sleep.
+ * | Special care is recommended. Have Fun!
+ * |
+ * | modified by Drew Eckhardt to check nr of hd's from the CMOS.
+ * |
+ * | Thanks to Branko Lankester, lankeste@fwi.uva.nl, who found a bug
+ * | in the early extended-partition checks and added DM partitions.
+ * |
+ * | Early work on error handling by Mika Liljeberg (liljeber@cs.Helsinki.FI).
+ * |
+ * | IRQ-unmask, drive-id, multiple-mode, support for ">16 heads",
+ * | and general streamlining by Mark Lord (mlord@pobox.com).
+ *
+ * October, 1994 -- Complete line-by-line overhaul for linux 1.1.x, by:
+ *
+ * Mark Lord (mlord@pobox.com) (IDE Perf.Pkg)
+ * Delman Lee (delman@ieee.org) ("Mr. atdisk2")
+ * Scott Snyder (snyder@fnald0.fnal.gov) (ATAPI IDE cd-rom)
+ *
+ * This was a rewrite of just about everything from hd.c, though some original
+ * code is still sprinkled about. Think of it as a major evolution, with
+ * inspiration from lots of linux users, esp. hamish@zot.apana.org.au
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/hdreg.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+
+struct class *ide_port_class;
+
+/*
+ * Locks for IDE setting functionality
+ */
+
+DEFINE_MUTEX(ide_setting_mtx);
+
+ide_devset_get(io_32bit, io_32bit);
+
+static int set_io_32bit(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1))
+ return -EINVAL;
+
+ drive->io_32bit = arg;
+
+ return 0;
+}
+
+ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS);
+
+static int set_ksettings(ide_drive_t *drive, int arg)
+{
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS;
+
+ return 0;
+}
+
+ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA);
+
+static int set_using_dma(ide_drive_t *drive, int arg)
+{
+#ifdef CONFIG_BLK_DEV_IDEDMA
+ int err = -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (ata_id_has_dma(drive->id) == 0)
+ goto out;
+
+ if (drive->hwif->dma_ops == NULL)
+ goto out;
+
+ err = 0;
+
+ if (arg) {
+ if (ide_set_dma(drive))
+ err = -EIO;
+ } else
+ ide_dma_off(drive);
+
+out:
+ return err;
+#else
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ return -EPERM;
+#endif
+}
+
+/*
+ * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away
+ */
+static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio)
+{
+ switch (req_pio) {
+ case 202:
+ case 201:
+ case 200:
+ case 102:
+ case 101:
+ case 100:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0;
+ case 9:
+ case 8:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0;
+ case 7:
+ case 6:
+ return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0;
+ default:
+ return 0;
+ }
+}
+
+static int set_pio_mode(ide_drive_t *drive, int arg)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ const struct ide_port_ops *port_ops = hwif->port_ops;
+
+ if (arg < 0 || arg > 255)
+ return -EINVAL;
+
+ if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
+ (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
+ return -ENOSYS;
+
+ if (set_pio_mode_abuse(drive->hwif, arg)) {
+ if (arg == 8 || arg == 9) {
+ unsigned long flags;
+
+ /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
+ spin_lock_irqsave(&hwif->lock, flags);
+ port_ops->set_pio_mode(drive, arg);
+ spin_unlock_irqrestore(&hwif->lock, flags);
+ } else
+ port_ops->set_pio_mode(drive, arg);
+ } else {
+ int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
+
+ ide_set_pio(drive, arg);
+
+ if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) {
+ if (keep_dma)
+ ide_dma_on(drive);
+ }
+ }
+
+ return 0;
+}
+
+ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK);
+
+static int set_unmaskirq(ide_drive_t *drive, int arg)
+{
+ if (drive->dev_flags & IDE_DFLAG_NO_UNMASK)
+ return -EPERM;
+
+ if (arg < 0 || arg > 1)
+ return -EINVAL;
+
+ if (arg)
+ drive->dev_flags |= IDE_DFLAG_UNMASK;
+ else
+ drive->dev_flags &= ~IDE_DFLAG_UNMASK;
+
+ return 0;
+}
+
+ide_ext_devset_rw_sync(io_32bit, io_32bit);
+ide_ext_devset_rw_sync(keepsettings, ksettings);
+ide_ext_devset_rw_sync(unmaskirq, unmaskirq);
+ide_ext_devset_rw_sync(using_dma, using_dma);
+__IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode);
+
+/**
+ * ide_device_get - get an additional reference to a ide_drive_t
+ * @drive: device to get a reference to
+ *
+ * Gets a reference to the ide_drive_t and increments the use count of the
+ * underlying LLDD module.
+ */
+int ide_device_get(ide_drive_t *drive)
+{
+ struct device *host_dev;
+ struct module *module;
+
+ if (!get_device(&drive->gendev))
+ return -ENXIO;
+
+ host_dev = drive->hwif->host->dev[0];
+ module = host_dev ? host_dev->driver->owner : NULL;
+
+ if (module && !try_module_get(module)) {
+ put_device(&drive->gendev);
+ return -ENXIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_device_get);
+
+/**
+ * ide_device_put - release a reference to a ide_drive_t
+ * @drive: device to release a reference on
+ *
+ * Release a reference to the ide_drive_t and decrements the use count of
+ * the underlying LLDD module.
+ */
+void ide_device_put(ide_drive_t *drive)
+{
+#ifdef CONFIG_MODULE_UNLOAD
+ struct device *host_dev = drive->hwif->host->dev[0];
+ struct module *module = host_dev ? host_dev->driver->owner : NULL;
+
+ if (module)
+ module_put(module);
+#endif
+ put_device(&drive->gendev);
+}
+EXPORT_SYMBOL_GPL(ide_device_put);
+
+static int ide_bus_match(struct device *dev, struct device_driver *drv)
+{
+ return 1;
+}
+
+static int ide_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+
+ add_uevent_var(env, "MEDIA=%s", ide_media_string(drive));
+ add_uevent_var(env, "DRIVENAME=%s", drive->name);
+ add_uevent_var(env, "MODALIAS=ide:m-%s", ide_media_string(drive));
+ return 0;
+}
+
+static int generic_ide_probe(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ return drv->probe ? drv->probe(drive) : -ENODEV;
+}
+
+static int generic_ide_remove(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (drv->remove)
+ drv->remove(drive);
+
+ return 0;
+}
+
+static void generic_ide_shutdown(struct device *dev)
+{
+ ide_drive_t *drive = to_ide_device(dev);
+ struct ide_driver *drv = to_ide_driver(dev->driver);
+
+ if (dev->driver && drv->shutdown)
+ drv->shutdown(drive);
+}
+
+struct bus_type ide_bus_type = {
+ .name = "ide",
+ .match = ide_bus_match,
+ .uevent = ide_uevent,
+ .probe = generic_ide_probe,
+ .remove = generic_ide_remove,
+ .shutdown = generic_ide_shutdown,
+ .dev_attrs = ide_dev_attrs,
+#ifndef DDE_LINUX
+ .suspend = generic_ide_suspend,
+ .resume = generic_ide_resume,
+#endif
+};
+
+EXPORT_SYMBOL_GPL(ide_bus_type);
+
+int ide_vlb_clk;
+EXPORT_SYMBOL_GPL(ide_vlb_clk);
+
+module_param_named(vlb_clock, ide_vlb_clk, int, 0);
+MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
+
+int ide_pci_clk;
+EXPORT_SYMBOL_GPL(ide_pci_clk);
+
+module_param_named(pci_clock, ide_pci_clk, int, 0);
+MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
+
+static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+{
+ int a, b, i, j = 1;
+ unsigned int *dev_param_mask = (unsigned int *)kp->arg;
+
+ /* controller . device (0 or 1) [ : 1 (set) | 0 (clear) ] */
+ if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
+ sscanf(s, "%d.%d", &a, &b) != 2)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ *dev_param_mask |= (1 << i);
+ else
+ *dev_param_mask &= ~(1 << i);
+
+ return 0;
+}
+
+static unsigned int ide_nodma;
+
+module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+MODULE_PARM_DESC(nodma, "disallow DMA for a device");
+
+static unsigned int ide_noflush;
+
+module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+
+static unsigned int ide_noprobe;
+
+module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+MODULE_PARM_DESC(noprobe, "skip probing for a device");
+
+static unsigned int ide_nowerr;
+
+module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+MODULE_PARM_DESC(nowerr, "ignore the ATA_DF bit for a device");
+
+static unsigned int ide_cdroms;
+
+module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
+
+struct chs_geom {
+ unsigned int cyl;
+ u8 head;
+ u8 sect;
+};
+
+static unsigned int ide_disks;
+static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+
+static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+{
+ int a, b, c = 0, h = 0, s = 0, i, j = 1;
+
+ /* controller . device (0 or 1) : Cylinders , Heads , Sectors */
+ /* controller . device (0 or 1) : 1 (use CHS) | 0 (ignore CHS) */
+ if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
+ sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
+ return -EINVAL;
+
+ i = a * MAX_DRIVES + b;
+
+ if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (c > INT_MAX || h > 255 || s > 255)
+ return -EINVAL;
+
+ if (j)
+ ide_disks |= (1 << i);
+ else
+ ide_disks &= ~(1 << i);
+
+ ide_disks_chs[i].cyl = c;
+ ide_disks_chs[i].head = h;
+ ide_disks_chs[i].sect = s;
+
+ return 0;
+}
+
+module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
+MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
+
+static void ide_dev_apply_params(ide_drive_t *drive, u8 unit)
+{
+ int i = drive->hwif->index * MAX_DRIVES + unit;
+
+ if (ide_nodma & (1 << i)) {
+ printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_NODMA;
+ }
+ if (ide_noflush & (1 << i)) {
+ printk(KERN_INFO "ide: disabling flush requests for %s\n",
+ drive->name);
+ drive->dev_flags |= IDE_DFLAG_NOFLUSH;
+ }
+ if (ide_noprobe & (1 << i)) {
+ printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_NOPROBE;
+ }
+ if (ide_nowerr & (1 << i)) {
+ printk(KERN_INFO "ide: ignoring the ATA_DF bit for %s\n",
+ drive->name);
+ drive->bad_wstat = BAD_R_STAT;
+ }
+ if (ide_cdroms & (1 << i)) {
+ printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
+ drive->dev_flags |= IDE_DFLAG_PRESENT;
+ drive->media = ide_cdrom;
+ /* an ATAPI device ignores DRDY */
+ drive->ready_stat = 0;
+ }
+ if (ide_disks & (1 << i)) {
+ drive->cyl = drive->bios_cyl = ide_disks_chs[i].cyl;
+ drive->head = drive->bios_head = ide_disks_chs[i].head;
+ drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
+
+ printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
+ drive->name,
+ drive->cyl, drive->head, drive->sect);
+
+ drive->dev_flags |= IDE_DFLAG_FORCED_GEOM | IDE_DFLAG_PRESENT;
+ drive->media = ide_disk;
+ drive->ready_stat = ATA_DRDY;
+ }
+}
+
+static unsigned int ide_ignore_cable;
+
+static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+{
+ int i, j = 1;
+
+ /* controller (ignore) */
+ /* controller : 1 (ignore) | 0 (use) */
+ if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
+ return -EINVAL;
+
+ if (i >= MAX_HWIFS || j < 0 || j > 1)
+ return -EINVAL;
+
+ if (j)
+ ide_ignore_cable |= (1 << i);
+ else
+ ide_ignore_cable &= ~(1 << i);
+
+ return 0;
+}
+
+module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
+MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
+
+void ide_port_apply_params(ide_hwif_t *hwif)
+{
+ ide_drive_t *drive;
+ int i;
+
+ if (ide_ignore_cable & (1 << hwif->index)) {
+ printk(KERN_INFO "ide: ignoring cable detection for %s\n",
+ hwif->name);
+ hwif->cbl = ATA_CBL_PATA40_SHORT;
+ }
+
+ ide_port_for_each_dev(i, drive, hwif)
+ ide_dev_apply_params(drive, i);
+}
+
+/*
+ * This is gets invoked once during initialization, to set *everything* up
+ */
+static int __init ide_init(void)
+{
+ int ret;
+
+ printk(KERN_INFO "Uniform Multi-Platform E-IDE driver\n");
+
+ ret = bus_register(&ide_bus_type);
+ if (ret < 0) {
+ printk(KERN_WARNING "IDE: bus_register error: %d\n", ret);
+ return ret;
+ }
+
+ ide_port_class = class_create(THIS_MODULE, "ide_port");
+ if (IS_ERR(ide_port_class)) {
+ ret = PTR_ERR(ide_port_class);
+ goto out_port_class;
+ }
+
+ proc_ide_create();
+
+ return 0;
+
+out_port_class:
+ bus_unregister(&ide_bus_type);
+
+ return ret;
+}
+
+static void __exit ide_exit(void)
+{
+ proc_ide_destroy();
+
+ class_destroy(ide_port_class);
+
+ bus_unregister(&ide_bus_type);
+}
+
+module_init(ide_init);
+module_exit(ide_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/local.h b/windhoek/ide/local.h
new file mode 100644
index 00000000..c2d0952a
--- /dev/null
+++ b/windhoek/ide/local.h
@@ -0,0 +1,11 @@
+#ifndef LOCAL_H
+#define LOCAL_H
+
+#define DEBUG_MSG(msg, ...) {}
+
+// printk("%s: \033[31m"msg"\033[0m\n", __FUNCTION__, ##__VA_ARGS__)
+
+#define CONFIG_IDE_GD 1
+#define CONFIG_IDE_GD_ATA 1
+
+#endif
diff --git a/windhoek/ide/piix.c b/windhoek/ide/piix.c
new file mode 100644
index 00000000..f1e2e4ef
--- /dev/null
+++ b/windhoek/ide/piix.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2003 Red Hat
+ * Copyright (C) 2006-2007 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ *
+ * Documentation:
+ *
+ * Publically available from Intel web site. Errata documentation
+ * is also publically available. As an aide to anyone hacking on this
+ * driver the list of errata that are relevant is below.going back to
+ * PIIX4. Older device documentation is now a bit tricky to find.
+ *
+ * Errata of note:
+ *
+ * Unfixable
+ * PIIX4 errata #9 - Only on ultra obscure hw
+ * ICH3 errata #13 - Not observed to affect real hw
+ * by Intel
+ *
+ * Things we must deal with
+ * PIIX4 errata #10 - BM IDE hang with non UDMA
+ * (must stop/start dma to recover)
+ * 440MX errata #15 - As PIIX4 errata #10
+ * PIIX4 errata #15 - Must not read control registers
+ * during a PIO transfer
+ * 440MX errata #13 - As PIIX4 errata #15
+ * ICH2 errata #21 - DMA mode 0 doesn't work right
+ * ICH0/1 errata #55 - As ICH2 errata #21
+ * ICH2 spec c #9 - Extra operations needed to handle
+ * drive hotswap [NOT YET SUPPORTED]
+ * ICH2 spec c #20 - IDE PRD must not cross a 64K boundary
+ * and must be dword aligned
+ * ICH2 spec c #24 - UDMA mode 4,5 t85/86 should be 6ns not 3.3
+ *
+ * Should have been BIOS fixed:
+ * 450NX: errata #19 - DMA hangs on old 450NX
+ * 450NX: errata #20 - DMA hangs on old 450NX
+ * 450NX: errata #25 - Corruption with DMA on old 450NX
+ * ICH3 errata #15 - IDE deadlock under high load
+ * (BIOS must set dev 31 fn 0 bit 23)
+ * ICH3 errata #18 - Don't use native mode
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#define DRV_NAME "piix"
+
+static int no_piix_dma;
+
+/**
+ * piix_set_pio_mode - set host controller for PIO mode
+ * @drive: drive
+ * @pio: PIO mode number
+ *
+ * Set the interface PIO mode based upon the settings done by AMI BIOS.
+ */
+
+static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ int is_slave = drive->dn & 1;
+ int master_port = hwif->channel ? 0x42 : 0x40;
+ int slave_port = 0x44;
+ unsigned long flags;
+ u16 master_data;
+ u8 slave_data;
+ static DEFINE_SPINLOCK(tune_lock);
+ int control = 0;
+
+ /* ISP RTC */
+ static const u8 timings[][2]= {
+ { 0, 0 },
+ { 0, 0 },
+ { 1, 0 },
+ { 2, 1 },
+ { 2, 3 }, };
+
+ /*
+ * Master vs slave is synchronized above us but the slave register is
+ * shared by the two hwifs so the corner case of two slave timeouts in
+ * parallel must be locked.
+ */
+ spin_lock_irqsave(&tune_lock, flags);
+ pci_read_config_word(dev, master_port, &master_data);
+
+ if (pio > 1)
+ control |= 1; /* Programmable timing on */
+ if (drive->media == ide_disk)
+ control |= 4; /* Prefetch, post write */
+ if (pio > 2)
+ control |= 2; /* IORDY */
+ if (is_slave) {
+ master_data |= 0x4000;
+ master_data &= ~0x0070;
+ if (pio > 1) {
+ /* Set PPE, IE and TIME */
+ master_data |= control << 4;
+ }
+ pci_read_config_byte(dev, slave_port, &slave_data);
+ slave_data &= hwif->channel ? 0x0f : 0xf0;
+ slave_data |= ((timings[pio][0] << 2) | timings[pio][1]) <<
+ (hwif->channel ? 4 : 0);
+ } else {
+ master_data &= ~0x3307;
+ if (pio > 1) {
+ /* enable PPE, IE and TIME */
+ master_data |= control;
+ }
+ master_data |= (timings[pio][0] << 12) | (timings[pio][1] << 8);
+ }
+ pci_write_config_word(dev, master_port, master_data);
+ if (is_slave)
+ pci_write_config_byte(dev, slave_port, slave_data);
+ spin_unlock_irqrestore(&tune_lock, flags);
+}
+
+/**
+ * piix_set_dma_mode - set host controller for DMA mode
+ * @drive: drive
+ * @speed: DMA mode
+ *
+ * Set a PIIX host controller to the desired DMA mode. This involves
+ * programming the right timing data into the PCI configuration space.
+ */
+
+static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ u8 maslave = hwif->channel ? 0x42 : 0x40;
+ int a_speed = 3 << (drive->dn * 4);
+ int u_flag = 1 << drive->dn;
+ int v_flag = 0x01 << drive->dn;
+ int w_flag = 0x10 << drive->dn;
+ int u_speed = 0;
+ int sitre;
+ u16 reg4042, reg4a;
+ u8 reg48, reg54, reg55;
+
+ pci_read_config_word(dev, maslave, &reg4042);
+ sitre = (reg4042 & 0x4000) ? 1 : 0;
+ pci_read_config_byte(dev, 0x48, &reg48);
+ pci_read_config_word(dev, 0x4a, &reg4a);
+ pci_read_config_byte(dev, 0x54, &reg54);
+ pci_read_config_byte(dev, 0x55, &reg55);
+
+ if (speed >= XFER_UDMA_0) {
+ u8 udma = speed - XFER_UDMA_0;
+
+ u_speed = min_t(u8, 2 - (udma & 1), udma) << (drive->dn * 4);
+
+ if (!(reg48 & u_flag))
+ pci_write_config_byte(dev, 0x48, reg48 | u_flag);
+ if (speed == XFER_UDMA_5) {
+ pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag);
+ } else {
+ pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+ }
+ if ((reg4a & a_speed) != u_speed)
+ pci_write_config_word(dev, 0x4a, (reg4a & ~a_speed) | u_speed);
+ if (speed > XFER_UDMA_2) {
+ if (!(reg54 & v_flag))
+ pci_write_config_byte(dev, 0x54, reg54 | v_flag);
+ } else
+ pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
+ } else {
+ const u8 mwdma_to_pio[] = { 0, 3, 4 };
+ u8 pio;
+
+ if (reg48 & u_flag)
+ pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
+ if (reg4a & a_speed)
+ pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
+ if (reg54 & v_flag)
+ pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
+ if (reg55 & w_flag)
+ pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
+
+ if (speed >= XFER_MW_DMA_0)
+ pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ else
+ pio = 2; /* only SWDMA2 is allowed */
+
+ piix_set_pio_mode(drive, pio);
+ }
+}
+
+/**
+ * init_chipset_ich - set up the ICH chipset
+ * @dev: PCI device to set up
+ *
+ * Initialize the PCI device as required. For the ICH this turns
+ * out to be nice and simple.
+ */
+
+static unsigned int init_chipset_ich(struct pci_dev *dev)
+{
+ u32 extra = 0;
+
+ pci_read_config_dword(dev, 0x54, &extra);
+ pci_write_config_dword(dev, 0x54, extra | 0x400);
+
+ return 0;
+}
+
+/**
+ * ich_clear_irq - clear BMDMA status
+ * @drive: IDE drive
+ *
+ * ICHx contollers set DMA INTR no matter DMA or PIO.
+ * BMDMA status might need to be cleared even for
+ * PIO interrupts to prevent spurious/lost IRQ.
+ */
+static void ich_clear_irq(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ u8 dma_stat;
+
+ /*
+ * ide_dma_end() needs BMDMA status for error checking.
+ * So, skip clearing BMDMA status here and leave it
+ * to ide_dma_end() if this is DMA interrupt.
+ */
+ if (drive->waiting_for_dma || hwif->dma_base == 0)
+ return;
+
+ /* clear the INTR & ERROR bits */
+ dma_stat = inb(hwif->dma_base + ATA_DMA_STATUS);
+ /* Should we force the bit as well ? */
+ outb(dma_stat, hwif->dma_base + ATA_DMA_STATUS);
+}
+
+struct ich_laptop {
+ u16 device;
+ u16 subvendor;
+ u16 subdevice;
+};
+
+/*
+ * List of laptops that use short cables rather than 80 wire
+ */
+
+static const struct ich_laptop ich_laptop[] = {
+ /* devid, subvendor, subdev */
+ { 0x27DF, 0x1025, 0x0102 }, /* ICH7 on Acer 5602aWLMi */
+ { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
+ { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
+ { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
+ { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */
+ { 0x27DF, 0x1071, 0xD221 }, /* ICH7 on Hercules EC-900 */
+ { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
+ { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */
+ { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */
+ { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */
+ /* end marker */
+ { 0, }
+};
+
+static u8 piix_cable_detect(ide_hwif_t *hwif)
+{
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+ const struct ich_laptop *lap = &ich_laptop[0];
+ u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
+
+ /* check for specials */
+ while (lap->device) {
+ if (lap->device == pdev->device &&
+ lap->subvendor == pdev->subsystem_vendor &&
+ lap->subdevice == pdev->subsystem_device) {
+ return ATA_CBL_PATA40_SHORT;
+ }
+ lap++;
+ }
+
+ pci_read_config_byte(pdev, 0x54, &reg54h);
+
+ return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
+}
+
+/**
+ * init_hwif_piix - fill in the hwif for the PIIX
+ * @hwif: IDE interface
+ *
+ * Set up the ide_hwif_t for the PIIX interface according to the
+ * capabilities of the hardware.
+ */
+
+static void __devinit init_hwif_piix(ide_hwif_t *hwif)
+{
+ if (!hwif->dma_base)
+ return;
+
+ if (no_piix_dma)
+ hwif->ultra_mask = hwif->mwdma_mask = hwif->swdma_mask = 0;
+}
+
+static const struct ide_port_ops piix_port_ops = {
+ .set_pio_mode = piix_set_pio_mode,
+ .set_dma_mode = piix_set_dma_mode,
+ .cable_detect = piix_cable_detect,
+};
+
+static const struct ide_port_ops ich_port_ops = {
+ .set_pio_mode = piix_set_pio_mode,
+ .set_dma_mode = piix_set_dma_mode,
+ .clear_irq = ich_clear_irq,
+ .cable_detect = piix_cable_detect,
+};
+
+#ifndef CONFIG_IA64
+ #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
+#else
+ #define IDE_HFLAGS_PIIX 0
+#endif
+
+#define DECLARE_PIIX_DEV(udma) \
+ { \
+ .name = DRV_NAME, \
+ .init_hwif = init_hwif_piix, \
+ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &piix_port_ops, \
+ .host_flags = IDE_HFLAGS_PIIX, \
+ .pio_mask = ATA_PIO4, \
+ .swdma_mask = ATA_SWDMA2_ONLY, \
+ .mwdma_mask = ATA_MWDMA12_ONLY, \
+ .udma_mask = udma, \
+ }
+
+#define DECLARE_ICH_DEV(udma) \
+ { \
+ .name = DRV_NAME, \
+ .init_chipset = init_chipset_ich, \
+ .init_hwif = init_hwif_piix, \
+ .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+ .port_ops = &ich_port_ops, \
+ .host_flags = IDE_HFLAGS_PIIX, \
+ .pio_mask = ATA_PIO4, \
+ .swdma_mask = ATA_SWDMA2_ONLY, \
+ .mwdma_mask = ATA_MWDMA12_ONLY, \
+ .udma_mask = udma, \
+ }
+
+static const struct ide_port_info piix_pci_info[] __devinitdata = {
+ /* 0: MPIIX */
+ { /*
+ * MPIIX actually has only a single IDE channel mapped to
+ * the primary or secondary ports depending on the value
+ * of the bit 14 of the IDETIM register at offset 0x6c
+ */
+ .name = DRV_NAME,
+ .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}},
+ .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA |
+ IDE_HFLAGS_PIIX,
+ .pio_mask = ATA_PIO4,
+ /* This is a painful system best to let it self tune for now */
+ },
+ /* 1: PIIXa/PIIXb/PIIX3 */
+ DECLARE_PIIX_DEV(0x00), /* no udma */
+ /* 2: PIIX4 */
+ DECLARE_PIIX_DEV(ATA_UDMA2),
+ /* 3: ICH0 */
+ DECLARE_ICH_DEV(ATA_UDMA2),
+ /* 4: ICH */
+ DECLARE_ICH_DEV(ATA_UDMA4),
+ /* 5: PIIX4 */
+ DECLARE_PIIX_DEV(ATA_UDMA4),
+ /* 6: ICH[2-7]/ICH[2-3]M/C-ICH/ICH5-SATA/ESB2/ICH8M */
+ DECLARE_ICH_DEV(ATA_UDMA5),
+};
+
+/**
+ * piix_init_one - called when a PIIX is found
+ * @dev: the piix device
+ * @id: the matching pci id
+ *
+ * Called when the PCI registration layer (or the IDE initialization)
+ * finds a device matching our IDE device tables.
+ */
+
+static int __devinit piix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ return ide_pci_init_one(dev, &piix_pci_info[id->driver_data], NULL);
+}
+
+/**
+ * piix_check_450nx - Check for problem 450NX setup
+ *
+ * Check for the present of 450NX errata #19 and errata #25. If
+ * they are found, disable use of DMA IDE
+ */
+
+static void __devinit piix_check_450nx(void)
+{
+ struct pci_dev *pdev = NULL;
+ u16 cfg;
+ while((pdev=pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev))!=NULL)
+ {
+ /* Look for 450NX PXB. Check for problem configurations
+ A PCI quirk checks bit 6 already */
+ pci_read_config_word(pdev, 0x41, &cfg);
+ /* Only on the original revision: IDE DMA can hang */
+ if (pdev->revision == 0x00)
+ no_piix_dma = 1;
+ /* On all revisions below 5 PXB bus lock must be disabled for IDE */
+ else if (cfg & (1<<14) && pdev->revision < 5)
+ no_piix_dma = 2;
+ }
+ if(no_piix_dma)
+ printk(KERN_WARNING DRV_NAME ": 450NX errata present, disabling IDE DMA.\n");
+ if(no_piix_dma == 2)
+ printk(KERN_WARNING DRV_NAME ": A BIOS update may resolve this.\n");
+}
+
+static const struct pci_device_id piix_pci_tbl[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_0), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371FB_1), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), 0 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371SB_1), 1 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371AB), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AB_1), 3 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82443MX_1), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801AA_1), 4 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82372FB_1), 5 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82451NX), 2 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_9), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801BA_8), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_10), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801CA_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801E_11), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_10), 6 },
+#ifdef CONFIG_BLK_DEV_IDE_SATA
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801EB_1), 6 },
+#endif
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB_2), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH6_19), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH7_21), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82801DB_1), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ESB2_18), 6 },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICH8_6), 6 },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, piix_pci_tbl);
+
+static struct pci_driver piix_pci_driver = {
+ .name = "PIIX_IDE",
+ .id_table = piix_pci_tbl,
+ .probe = piix_init_one,
+ .remove = ide_pci_remove,
+ .suspend = ide_pci_suspend,
+ .resume = ide_pci_resume,
+};
+
+static int __init piix_ide_init(void)
+{
+ piix_check_450nx();
+ return ide_pci_register_driver(&piix_pci_driver);
+}
+
+static void __exit piix_ide_exit(void)
+{
+ pci_unregister_driver(&piix_pci_driver);
+}
+
+module_init(piix_ide_init);
+module_exit(piix_ide_exit);
+
+MODULE_AUTHOR("Andre Hedrick, Andrzej Krzysztofowicz");
+MODULE_DESCRIPTION("PCI driver module for Intel PIIX IDE");
+MODULE_LICENSE("GPL");
diff --git a/windhoek/ide/setup-pci.c b/windhoek/ide/setup-pci.c
new file mode 100644
index 00000000..e85d1ed2
--- /dev/null
+++ b/windhoek/ide/setup-pci.c
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 1995-1998 Mark Lord
+ * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ *
+ * May be copied or modified under the terms of the GNU General Public License
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ide.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+/**
+ * ide_setup_pci_baseregs - place a PCI IDE controller native
+ * @dev: PCI device of interface to switch native
+ * @name: Name of interface
+ *
+ * We attempt to place the PCI interface into PCI native mode. If
+ * we succeed the BARs are ok and the controller is in PCI mode.
+ * Returns 0 on success or an errno code.
+ *
+ * FIXME: if we program the interface and then fail to set the BARS
+ * we don't switch it back to legacy mode. Do we actually care ??
+ */
+
+static int ide_setup_pci_baseregs(struct pci_dev *dev, const char *name)
+{
+ u8 progif = 0;
+
+ /*
+ * Place both IDE interfaces into PCI "native" mode:
+ */
+ if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+ (progif & 5) != 5) {
+ if ((progif & 0xa) != 0xa) {
+ printk(KERN_INFO "%s %s: device not capable of full "
+ "native PCI mode\n", name, pci_name(dev));
+ return -EOPNOTSUPP;
+ }
+ printk(KERN_INFO "%s %s: placing both ports into native PCI "
+ "mode\n", name, pci_name(dev));
+ (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5);
+ if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) ||
+ (progif & 5) != 5) {
+ printk(KERN_ERR "%s %s: rewrite of PROGIF failed, "
+ "wanted 0x%04x, got 0x%04x\n",
+ name, pci_name(dev), progif | 5, progif);
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+static int ide_pci_clear_simplex(unsigned long dma_base, const char *name)
+{
+ u8 dma_stat = inb(dma_base + 2);
+
+ outb(dma_stat & 0x60, dma_base + 2);
+ dma_stat = inb(dma_base + 2);
+
+ return (dma_stat & 0x80) ? 1 : 0;
+}
+
+/**
+ * ide_pci_dma_base - setup BMIBA
+ * @hwif: IDE interface
+ * @d: IDE port info
+ *
+ * Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
+ */
+
+unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ unsigned long dma_base = 0;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ return hwif->dma_base;
+
+ if (hwif->mate && hwif->mate->dma_base) {
+ dma_base = hwif->mate->dma_base - (hwif->channel ? 0 : 8);
+ } else {
+ u8 baridx = (d->host_flags & IDE_HFLAG_CS5520) ? 2 : 4;
+
+ dma_base = pci_resource_start(dev, baridx);
+
+ if (dma_base == 0) {
+ printk(KERN_ERR "%s %s: DMA base is invalid\n",
+ d->name, pci_name(dev));
+ return 0;
+ }
+ }
+
+ if (hwif->channel)
+ dma_base += 8;
+
+ return dma_base;
+}
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+int ide_pci_check_simplex(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ u8 dma_stat;
+
+ if (d->host_flags & (IDE_HFLAG_MMIO | IDE_HFLAG_CS5520))
+ goto out;
+
+ if (d->host_flags & IDE_HFLAG_CLEAR_SIMPLEX) {
+ if (ide_pci_clear_simplex(hwif->dma_base, d->name))
+ printk(KERN_INFO "%s %s: simplex device: DMA forced\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+
+ /*
+ * If the device claims "simplex" DMA, this means that only one of
+ * the two interfaces can be trusted with DMA at any point in time
+ * (so we should enable DMA only on one of the two interfaces).
+ *
+ * FIXME: At this point we haven't probed the drives so we can't make
+ * the appropriate decision. Really we should defer this problem until
+ * we tune the drive then try to grab DMA ownership if we want to be
+ * the DMA end. This has to be become dynamic to handle hot-plug.
+ */
+ dma_stat = hwif->dma_ops->dma_sff_read_status(hwif);
+ if ((dma_stat & 0x80) && hwif->mate && hwif->mate->dma_base) {
+ printk(KERN_INFO "%s %s: simplex device: DMA disabled\n",
+ d->name, pci_name(dev));
+ return -1;
+ }
+out:
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_check_simplex);
+
+/*
+ * Set up BM-DMA capability (PnP BIOS should have done this)
+ */
+int ide_pci_set_master(struct pci_dev *dev, const char *name)
+{
+ u16 pcicmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+
+ if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
+ pci_set_master(dev);
+
+ if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
+ (pcicmd & PCI_COMMAND_MASTER) == 0) {
+ printk(KERN_ERR "%s %s: error updating PCICMD\n",
+ name, pci_name(dev));
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_set_master);
+#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+
+void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ printk(KERN_INFO "%s %s: IDE controller (0x%04x:0x%04x rev 0x%02x)\n",
+ d->name, pci_name(dev),
+ dev->vendor, dev->device, dev->revision);
+}
+EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
+
+
+/**
+ * ide_pci_enable - do PCI enables
+ * @dev: PCI device
+ * @d: IDE port info
+ *
+ * Enable the IDE PCI device. We attempt to enable the device in full
+ * but if that fails then we only need IO space. The PCI code should
+ * have setup the proper resources for us already for controllers in
+ * legacy mode.
+ *
+ * Returns zero on success or an error code
+ */
+
+static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ int ret, bars;
+
+ if (pci_enable_device(dev)) {
+ ret = pci_enable_device_io(dev);
+ if (ret < 0) {
+ printk(KERN_WARNING "%s %s: couldn't enable device\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+ printk(KERN_WARNING "%s %s: BIOS configuration fixed\n",
+ d->name, pci_name(dev));
+ }
+
+ /*
+ * assume all devices can do 32-bit DMA for now, we can add
+ * a DMA mask field to the struct ide_port_info if we need it
+ * (or let lower level driver set the DMA mask)
+ */
+ ret = pci_set_dma_mask(dev, DMA_32BIT_MASK);
+ if (ret < 0) {
+ printk(KERN_ERR "%s %s: can't set DMA mask\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+
+ if (d->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (d->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
+ ret = pci_request_selected_regions(dev, bars, d->name);
+ if (ret < 0)
+ printk(KERN_ERR "%s %s: can't reserve resources\n",
+ d->name, pci_name(dev));
+out:
+ return ret;
+}
+
+/**
+ * ide_pci_configure - configure an unconfigured device
+ * @dev: PCI device
+ * @d: IDE port info
+ *
+ * Enable and configure the PCI device we have been passed.
+ * Returns zero on success or an error code.
+ */
+
+static int ide_pci_configure(struct pci_dev *dev, const struct ide_port_info *d)
+{
+ u16 pcicmd = 0;
+ /*
+ * PnP BIOS was *supposed* to have setup this device, but we
+ * can do it ourselves, so long as the BIOS has assigned an IRQ
+ * (or possibly the device is using a "legacy header" for IRQs).
+ * Maybe the user deliberately *disabled* the device,
+ * but we'll eventually ignore it again if no drives respond.
+ */
+ if (ide_setup_pci_baseregs(dev, d->name) ||
+ pci_write_config_word(dev, PCI_COMMAND, pcicmd | PCI_COMMAND_IO)) {
+ printk(KERN_INFO "%s %s: device disabled (BIOS)\n",
+ d->name, pci_name(dev));
+ return -ENODEV;
+ }
+ if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd)) {
+ printk(KERN_ERR "%s %s: error accessing PCI regs\n",
+ d->name, pci_name(dev));
+ return -EIO;
+ }
+ if (!(pcicmd & PCI_COMMAND_IO)) {
+ printk(KERN_ERR "%s %s: unable to enable IDE controller\n",
+ d->name, pci_name(dev));
+ return -ENXIO;
+ }
+ return 0;
+}
+
+/**
+ * ide_pci_check_iomem - check a register is I/O
+ * @dev: PCI device
+ * @d: IDE port info
+ * @bar: BAR number
+ *
+ * Checks if a BAR is configured and points to MMIO space. If so,
+ * return an error code. Otherwise return 0
+ */
+
+static int ide_pci_check_iomem(struct pci_dev *dev, const struct ide_port_info *d,
+ int bar)
+{
+ ulong flags = pci_resource_flags(dev, bar);
+
+ /* Unconfigured ? */
+ if (!flags || pci_resource_len(dev, bar) == 0)
+ return 0;
+
+ /* I/O space */
+ if (flags & IORESOURCE_IO)
+ return 0;
+
+ /* Bad */
+ return -EINVAL;
+}
+
+/**
+ * ide_hw_configure - configure a hw_regs_t instance
+ * @dev: PCI device holding interface
+ * @d: IDE port info
+ * @port: port number
+ * @irq: PCI IRQ
+ * @hw: hw_regs_t instance corresponding to this port
+ *
+ * Perform the initial set up for the hardware interface structure. This
+ * is done per interface port rather than per PCI device. There may be
+ * more than one port per device.
+ *
+ * Returns zero on success or an error code.
+ */
+
+static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d,
+ unsigned int port, int irq, hw_regs_t *hw)
+{
+ unsigned long ctl = 0, base = 0;
+
+ if ((d->host_flags & IDE_HFLAG_ISA_PORTS) == 0) {
+ if (ide_pci_check_iomem(dev, d, 2 * port) ||
+ ide_pci_check_iomem(dev, d, 2 * port + 1)) {
+ printk(KERN_ERR "%s %s: I/O baseregs (BIOS) are "
+ "reported as MEM for port %d!\n",
+ d->name, pci_name(dev), port);
+ return -EINVAL;
+ }
+
+ ctl = pci_resource_start(dev, 2*port+1);
+ base = pci_resource_start(dev, 2*port);
+ } else {
+ /* Use default values */
+ ctl = port ? 0x374 : 0x3f4;
+ base = port ? 0x170 : 0x1f0;
+ }
+
+ if (!base || !ctl) {
+ printk(KERN_ERR "%s %s: bad PCI BARs for port %d, skipping\n",
+ d->name, pci_name(dev), port);
+ return -EINVAL;
+ }
+
+ memset(hw, 0, sizeof(*hw));
+ hw->irq = irq;
+ hw->dev = &dev->dev;
+ hw->chipset = d->chipset ? d->chipset : ide_pci;
+ ide_std_init_ports(hw, base, ctl | 2);
+
+ return 0;
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+/**
+ * ide_hwif_setup_dma - configure DMA interface
+ * @hwif: IDE interface
+ * @d: IDE port info
+ *
+ * Set up the DMA base for the interface. Enable the master bits as
+ * necessary and attempt to bring the device DMA into a ready to use
+ * state
+ */
+
+int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+
+ if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
+ ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
+ (dev->class & 0x80))) {
+ unsigned long base = ide_pci_dma_base(hwif, d);
+
+ if (base == 0)
+ return -1;
+
+ hwif->dma_base = base;
+
+ if (hwif->dma_ops == NULL)
+ hwif->dma_ops = &sff_dma_ops;
+
+ if (ide_pci_check_simplex(hwif, d) < 0)
+ return -1;
+
+ if (ide_pci_set_master(dev, d->name) < 0)
+ return -1;
+
+ if (hwif->host_flags & IDE_HFLAG_MMIO)
+ printk(KERN_INFO " %s: MMIO-DMA\n", hwif->name);
+ else
+ printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
+ hwif->name, base, base + 7);
+
+ hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+ if (ide_allocate_dma_engine(hwif))
+ return -1;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+
+/**
+ * ide_setup_pci_controller - set up IDE PCI
+ * @dev: PCI device
+ * @d: IDE port info
+ * @noisy: verbose flag
+ *
+ * Set up the PCI and controller side of the IDE interface. This brings
+ * up the PCI side of the device, checks that the device is enabled
+ * and enables it if need be
+ */
+
+static int ide_setup_pci_controller(struct pci_dev *dev,
+ const struct ide_port_info *d, int noisy)
+{
+ int ret;
+ u16 pcicmd;
+
+ if (noisy)
+ ide_setup_pci_noise(dev, d);
+
+ ret = ide_pci_enable(dev, d);
+ if (ret < 0)
+ goto out;
+
+ ret = pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+ if (ret < 0) {
+ printk(KERN_ERR "%s %s: error accessing PCI regs\n",
+ d->name, pci_name(dev));
+ goto out;
+ }
+ if (!(pcicmd & PCI_COMMAND_IO)) { /* is device disabled? */
+ ret = ide_pci_configure(dev, d);
+ if (ret < 0)
+ goto out;
+ printk(KERN_INFO "%s %s: device enabled (Linux)\n",
+ d->name, pci_name(dev));
+ }
+
+out:
+ return ret;
+}
+
+/**
+ * ide_pci_setup_ports - configure ports/devices on PCI IDE
+ * @dev: PCI device
+ * @d: IDE port info
+ * @pciirq: IRQ line
+ * @hw: hw_regs_t instances corresponding to this PCI IDE device
+ * @hws: hw_regs_t pointers table to update
+ *
+ * Scan the interfaces attached to this device and do any
+ * necessary per port setup. Attach the devices and ask the
+ * generic DMA layer to do its work for us.
+ *
+ * Normally called automaticall from do_ide_pci_setup_device,
+ * but is also used directly as a helper function by some controllers
+ * where the chipset setup is not the default PCI IDE one.
+ */
+
+void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d,
+ int pciirq, hw_regs_t *hw, hw_regs_t **hws)
+{
+ int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port;
+ u8 tmp;
+
+ /*
+ * Set up the IDE ports
+ */
+
+ for (port = 0; port < channels; ++port) {
+ const struct ide_pci_enablebit *e = &d->enablebits[port];
+
+ if (e->reg && (pci_read_config_byte(dev, e->reg, &tmp) ||
+ (tmp & e->mask) != e->val)) {
+ printk(KERN_INFO "%s %s: IDE port disabled\n",
+ d->name, pci_name(dev));
+ continue; /* port not enabled */
+ }
+
+ if (ide_hw_configure(dev, d, port, pciirq, hw + port))
+ continue;
+
+ *(hws + port) = hw + port;
+ }
+}
+EXPORT_SYMBOL_GPL(ide_pci_setup_ports);
+
+/*
+ * ide_setup_pci_device() looks at the primary/secondary interfaces
+ * on a PCI IDE device and, if they are enabled, prepares the IDE driver
+ * for use with them. This generic code works for most PCI chipsets.
+ *
+ * One thing that is not standardized is the location of the
+ * primary/secondary interface "enable/disable" bits. For chipsets that
+ * we "know" about, this information is in the struct ide_port_info;
+ * for all other chipsets, we just assume both interfaces are enabled.
+ */
+static int do_ide_setup_pci_device(struct pci_dev *dev,
+ const struct ide_port_info *d,
+ u8 noisy)
+{
+ int pciirq, ret;
+
+ /*
+ * Can we trust the reported IRQ?
+ */
+ pciirq = dev->irq;
+
+ /*
+ * This allows offboard ide-pci cards the enable a BIOS,
+ * verify interrupt settings of split-mirror pci-config
+ * space, place chipset into init-mode, and/or preserve
+ * an interrupt if the card is not native ide support.
+ */
+ ret = d->init_chipset ? d->init_chipset(dev) : 0;
+ if (ret < 0)
+ goto out;
+
+ if (ide_pci_is_in_compatibility_mode(dev)) {
+ if (noisy)
+ printk(KERN_INFO "%s %s: not 100%% native mode: will "
+ "probe irqs later\n", d->name, pci_name(dev));
+ pciirq = ret;
+ } else if (!pciirq && noisy) {
+ printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n",
+ d->name, pci_name(dev), pciirq);
+ } else if (noisy) {
+ printk(KERN_INFO "%s %s: 100%% native mode on irq %d\n",
+ d->name, pci_name(dev), pciirq);
+ }
+
+ ret = pciirq;
+out:
+ return ret;
+}
+
+int ide_pci_init_one(struct pci_dev *dev, const struct ide_port_info *d,
+ void *priv)
+{
+ struct ide_host *host;
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+ int ret;
+
+ ret = ide_setup_pci_controller(dev, d, 1);
+ if (ret < 0)
+ goto out;
+
+ ide_pci_setup_ports(dev, d, 0, &hw[0], &hws[0]);
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->dev[0] = &dev->dev;
+
+ host->host_priv = priv;
+
+ pci_set_drvdata(dev, host);
+
+ ret = do_ide_setup_pci_device(dev, d, 1);
+ if (ret < 0)
+ goto out;
+
+ /* fixup IRQ */
+ hw[1].irq = hw[0].irq = ret;
+
+ ret = ide_host_register(host, d, hws);
+ if (ret)
+ ide_host_free(host);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_pci_init_one);
+
+int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
+ const struct ide_port_info *d, void *priv)
+{
+ struct pci_dev *pdev[] = { dev1, dev2 };
+ struct ide_host *host;
+ int ret, i;
+ hw_regs_t hw[4], *hws[] = { NULL, NULL, NULL, NULL };
+
+ for (i = 0; i < 2; i++) {
+ ret = ide_setup_pci_controller(pdev[i], d, !i);
+ if (ret < 0)
+ goto out;
+
+ ide_pci_setup_ports(pdev[i], d, 0, &hw[i*2], &hws[i*2]);
+ }
+
+ host = ide_host_alloc(d, hws);
+ if (host == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ host->dev[0] = &dev1->dev;
+ host->dev[1] = &dev2->dev;
+
+ host->host_priv = priv;
+
+ pci_set_drvdata(pdev[0], host);
+ pci_set_drvdata(pdev[1], host);
+
+ for (i = 0; i < 2; i++) {
+ ret = do_ide_setup_pci_device(pdev[i], d, !i);
+
+ /*
+ * FIXME: Mom, mom, they stole me the helper function to undo
+ * do_ide_setup_pci_device() on the first device!
+ */
+ if (ret < 0)
+ goto out;
+
+ /* fixup IRQ */
+ hw[i*2 + 1].irq = hw[i*2].irq = ret;
+ }
+
+ ret = ide_host_register(host, d, hws);
+ if (ret)
+ ide_host_free(host);
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ide_pci_init_two);
+
+void ide_pci_remove(struct pci_dev *dev)
+{
+ struct ide_host *host = pci_get_drvdata(dev);
+ struct pci_dev *dev2 = host->dev[1] ? to_pci_dev(host->dev[1]) : NULL;
+ int bars;
+
+ if (host->host_flags & IDE_HFLAG_SINGLE)
+ bars = (1 << 2) - 1;
+ else
+ bars = (1 << 4) - 1;
+
+ if ((host->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+ if (host->host_flags & IDE_HFLAG_CS5520)
+ bars |= (1 << 2);
+ else
+ bars |= (1 << 4);
+ }
+
+ ide_host_remove(host);
+
+ if (dev2)
+ pci_release_selected_regions(dev2, bars);
+ pci_release_selected_regions(dev, bars);
+
+ if (dev2)
+ pci_disable_device(dev2);
+ pci_disable_device(dev);
+}
+EXPORT_SYMBOL_GPL(ide_pci_remove);
+
+#ifdef CONFIG_PM
+int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
+{
+ pci_save_state(dev);
+ pci_disable_device(dev);
+ pci_set_power_state(dev, pci_choose_state(dev, state));
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_suspend);
+
+int ide_pci_resume(struct pci_dev *dev)
+{
+ struct ide_host *host = pci_get_drvdata(dev);
+ int rc;
+
+ pci_set_power_state(dev, PCI_D0);
+
+ rc = pci_enable_device(dev);
+ if (rc)
+ return rc;
+
+ pci_restore_state(dev);
+ pci_set_master(dev);
+
+ if (host->init_chipset)
+ host->init_chipset(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_resume);
+#endif